4897f0449db24bb17eb59d9b6a49aa9fddc50a8f
[linux-2.6.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
58
59 #include <linux/module.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/ioport.h>
63 #include <linux/pci.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/kernel.h>
66 #include <linux/netdevice.h>
67 #include <linux/etherdevice.h>
68 #include <linux/mdio.h>
69 #include <linux/skbuff.h>
70 #include <linux/init.h>
71 #include <linux/delay.h>
72 #include <linux/stddef.h>
73 #include <linux/ioctl.h>
74 #include <linux/timex.h>
75 #include <linux/ethtool.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
78 #include <linux/ip.h>
79 #include <linux/tcp.h>
80 #include <linux/uaccess.h>
81 #include <linux/io.h>
82 #include <net/tcp.h>
83
84 #include <asm/system.h>
85 #include <asm/div64.h>
86 #include <asm/irq.h>
87
88 /* local include */
89 #include "s2io.h"
90 #include "s2io-regs.h"
91
92 #define DRV_VERSION "2.0.26.25"
93
94 /* S2io Driver name & version. */
95 static char s2io_driver_name[] = "Neterion";
96 static char s2io_driver_version[] = DRV_VERSION;
97
98 static int rxd_size[2] = {32, 48};
99 static int rxd_count[2] = {127, 85};
100
101 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 {
103         int ret;
104
105         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
106                (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107
108         return ret;
109 }
110
111 /*
112  * Cards with following subsystem_id have a link state indication
113  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
114  * macro below identifies these cards given the subsystem_id.
115  */
116 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)              \
117         (dev_type == XFRAME_I_DEVICE) ?                                 \
118         ((((subid >= 0x600B) && (subid <= 0x600D)) ||                   \
119           ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120
121 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
122                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123
124 static inline int is_s2io_card_up(const struct s2io_nic *sp)
125 {
126         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
127 }
128
129 /* Ethtool related variables and Macros. */
130 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
131         "Register test\t(offline)",
132         "Eeprom test\t(offline)",
133         "Link test\t(online)",
134         "RLDRAM test\t(offline)",
135         "BIST Test\t(offline)"
136 };
137
138 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
139         {"tmac_frms"},
140         {"tmac_data_octets"},
141         {"tmac_drop_frms"},
142         {"tmac_mcst_frms"},
143         {"tmac_bcst_frms"},
144         {"tmac_pause_ctrl_frms"},
145         {"tmac_ttl_octets"},
146         {"tmac_ucst_frms"},
147         {"tmac_nucst_frms"},
148         {"tmac_any_err_frms"},
149         {"tmac_ttl_less_fb_octets"},
150         {"tmac_vld_ip_octets"},
151         {"tmac_vld_ip"},
152         {"tmac_drop_ip"},
153         {"tmac_icmp"},
154         {"tmac_rst_tcp"},
155         {"tmac_tcp"},
156         {"tmac_udp"},
157         {"rmac_vld_frms"},
158         {"rmac_data_octets"},
159         {"rmac_fcs_err_frms"},
160         {"rmac_drop_frms"},
161         {"rmac_vld_mcst_frms"},
162         {"rmac_vld_bcst_frms"},
163         {"rmac_in_rng_len_err_frms"},
164         {"rmac_out_rng_len_err_frms"},
165         {"rmac_long_frms"},
166         {"rmac_pause_ctrl_frms"},
167         {"rmac_unsup_ctrl_frms"},
168         {"rmac_ttl_octets"},
169         {"rmac_accepted_ucst_frms"},
170         {"rmac_accepted_nucst_frms"},
171         {"rmac_discarded_frms"},
172         {"rmac_drop_events"},
173         {"rmac_ttl_less_fb_octets"},
174         {"rmac_ttl_frms"},
175         {"rmac_usized_frms"},
176         {"rmac_osized_frms"},
177         {"rmac_frag_frms"},
178         {"rmac_jabber_frms"},
179         {"rmac_ttl_64_frms"},
180         {"rmac_ttl_65_127_frms"},
181         {"rmac_ttl_128_255_frms"},
182         {"rmac_ttl_256_511_frms"},
183         {"rmac_ttl_512_1023_frms"},
184         {"rmac_ttl_1024_1518_frms"},
185         {"rmac_ip"},
186         {"rmac_ip_octets"},
187         {"rmac_hdr_err_ip"},
188         {"rmac_drop_ip"},
189         {"rmac_icmp"},
190         {"rmac_tcp"},
191         {"rmac_udp"},
192         {"rmac_err_drp_udp"},
193         {"rmac_xgmii_err_sym"},
194         {"rmac_frms_q0"},
195         {"rmac_frms_q1"},
196         {"rmac_frms_q2"},
197         {"rmac_frms_q3"},
198         {"rmac_frms_q4"},
199         {"rmac_frms_q5"},
200         {"rmac_frms_q6"},
201         {"rmac_frms_q7"},
202         {"rmac_full_q0"},
203         {"rmac_full_q1"},
204         {"rmac_full_q2"},
205         {"rmac_full_q3"},
206         {"rmac_full_q4"},
207         {"rmac_full_q5"},
208         {"rmac_full_q6"},
209         {"rmac_full_q7"},
210         {"rmac_pause_cnt"},
211         {"rmac_xgmii_data_err_cnt"},
212         {"rmac_xgmii_ctrl_err_cnt"},
213         {"rmac_accepted_ip"},
214         {"rmac_err_tcp"},
215         {"rd_req_cnt"},
216         {"new_rd_req_cnt"},
217         {"new_rd_req_rtry_cnt"},
218         {"rd_rtry_cnt"},
219         {"wr_rtry_rd_ack_cnt"},
220         {"wr_req_cnt"},
221         {"new_wr_req_cnt"},
222         {"new_wr_req_rtry_cnt"},
223         {"wr_rtry_cnt"},
224         {"wr_disc_cnt"},
225         {"rd_rtry_wr_ack_cnt"},
226         {"txp_wr_cnt"},
227         {"txd_rd_cnt"},
228         {"txd_wr_cnt"},
229         {"rxd_rd_cnt"},
230         {"rxd_wr_cnt"},
231         {"txf_rd_cnt"},
232         {"rxf_wr_cnt"}
233 };
234
235 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
236         {"rmac_ttl_1519_4095_frms"},
237         {"rmac_ttl_4096_8191_frms"},
238         {"rmac_ttl_8192_max_frms"},
239         {"rmac_ttl_gt_max_frms"},
240         {"rmac_osized_alt_frms"},
241         {"rmac_jabber_alt_frms"},
242         {"rmac_gt_max_alt_frms"},
243         {"rmac_vlan_frms"},
244         {"rmac_len_discard"},
245         {"rmac_fcs_discard"},
246         {"rmac_pf_discard"},
247         {"rmac_da_discard"},
248         {"rmac_red_discard"},
249         {"rmac_rts_discard"},
250         {"rmac_ingm_full_discard"},
251         {"link_fault_cnt"}
252 };
253
254 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
255         {"\n DRIVER STATISTICS"},
256         {"single_bit_ecc_errs"},
257         {"double_bit_ecc_errs"},
258         {"parity_err_cnt"},
259         {"serious_err_cnt"},
260         {"soft_reset_cnt"},
261         {"fifo_full_cnt"},
262         {"ring_0_full_cnt"},
263         {"ring_1_full_cnt"},
264         {"ring_2_full_cnt"},
265         {"ring_3_full_cnt"},
266         {"ring_4_full_cnt"},
267         {"ring_5_full_cnt"},
268         {"ring_6_full_cnt"},
269         {"ring_7_full_cnt"},
270         {"alarm_transceiver_temp_high"},
271         {"alarm_transceiver_temp_low"},
272         {"alarm_laser_bias_current_high"},
273         {"alarm_laser_bias_current_low"},
274         {"alarm_laser_output_power_high"},
275         {"alarm_laser_output_power_low"},
276         {"warn_transceiver_temp_high"},
277         {"warn_transceiver_temp_low"},
278         {"warn_laser_bias_current_high"},
279         {"warn_laser_bias_current_low"},
280         {"warn_laser_output_power_high"},
281         {"warn_laser_output_power_low"},
282         {"lro_aggregated_pkts"},
283         {"lro_flush_both_count"},
284         {"lro_out_of_sequence_pkts"},
285         {"lro_flush_due_to_max_pkts"},
286         {"lro_avg_aggr_pkts"},
287         {"mem_alloc_fail_cnt"},
288         {"pci_map_fail_cnt"},
289         {"watchdog_timer_cnt"},
290         {"mem_allocated"},
291         {"mem_freed"},
292         {"link_up_cnt"},
293         {"link_down_cnt"},
294         {"link_up_time"},
295         {"link_down_time"},
296         {"tx_tcode_buf_abort_cnt"},
297         {"tx_tcode_desc_abort_cnt"},
298         {"tx_tcode_parity_err_cnt"},
299         {"tx_tcode_link_loss_cnt"},
300         {"tx_tcode_list_proc_err_cnt"},
301         {"rx_tcode_parity_err_cnt"},
302         {"rx_tcode_abort_cnt"},
303         {"rx_tcode_parity_abort_cnt"},
304         {"rx_tcode_rda_fail_cnt"},
305         {"rx_tcode_unkn_prot_cnt"},
306         {"rx_tcode_fcs_err_cnt"},
307         {"rx_tcode_buf_size_err_cnt"},
308         {"rx_tcode_rxd_corrupt_cnt"},
309         {"rx_tcode_unkn_err_cnt"},
310         {"tda_err_cnt"},
311         {"pfc_err_cnt"},
312         {"pcc_err_cnt"},
313         {"tti_err_cnt"},
314         {"tpa_err_cnt"},
315         {"sm_err_cnt"},
316         {"lso_err_cnt"},
317         {"mac_tmac_err_cnt"},
318         {"mac_rmac_err_cnt"},
319         {"xgxs_txgxs_err_cnt"},
320         {"xgxs_rxgxs_err_cnt"},
321         {"rc_err_cnt"},
322         {"prc_pcix_err_cnt"},
323         {"rpa_err_cnt"},
324         {"rda_err_cnt"},
325         {"rti_err_cnt"},
326         {"mc_err_cnt"}
327 };
328
329 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
330 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
331 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
332
333 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
334 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335
336 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
337 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338
339 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
340 #define S2IO_STRINGS_LEN        (S2IO_TEST_LEN * ETH_GSTRING_LEN)
341
342 #define S2IO_TIMER_CONF(timer, handle, arg, exp)        \
343         init_timer(&timer);                             \
344         timer.function = handle;                        \
345         timer.data = (unsigned long)arg;                \
346         mod_timer(&timer, (jiffies + exp))              \
347
348 /* copy mac addr to def_mac_addr array */
349 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350 {
351         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
352         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
353         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
354         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
355         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
356         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
357 }
358
359 /* Add the vlan */
360 static void s2io_vlan_rx_register(struct net_device *dev,
361                                   struct vlan_group *grp)
362 {
363         int i;
364         struct s2io_nic *nic = netdev_priv(dev);
365         unsigned long flags[MAX_TX_FIFOS];
366         struct mac_info *mac_control = &nic->mac_control;
367         struct config_param *config = &nic->config;
368
369         for (i = 0; i < config->tx_fifo_num; i++) {
370                 struct fifo_info *fifo = &mac_control->fifos[i];
371
372                 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
373         }
374
375         nic->vlgrp = grp;
376
377         for (i = config->tx_fifo_num - 1; i >= 0; i--) {
378                 struct fifo_info *fifo = &mac_control->fifos[i];
379
380                 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
381         }
382 }
383
384 /* Unregister the vlan */
385 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
386 {
387         int i;
388         struct s2io_nic *nic = netdev_priv(dev);
389         unsigned long flags[MAX_TX_FIFOS];
390         struct mac_info *mac_control = &nic->mac_control;
391         struct config_param *config = &nic->config;
392
393         for (i = 0; i < config->tx_fifo_num; i++) {
394                 struct fifo_info *fifo = &mac_control->fifos[i];
395
396                 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
397         }
398
399         if (nic->vlgrp)
400                 vlan_group_set_device(nic->vlgrp, vid, NULL);
401
402         for (i = config->tx_fifo_num - 1; i >= 0; i--) {
403                 struct fifo_info *fifo = &mac_control->fifos[i];
404
405                 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
406         }
407 }
408
409 /*
410  * Constants to be programmed into the Xena's registers, to configure
411  * the XAUI.
412  */
413
414 #define END_SIGN        0x0
415 static const u64 herc_act_dtx_cfg[] = {
416         /* Set address */
417         0x8000051536750000ULL, 0x80000515367500E0ULL,
418         /* Write data */
419         0x8000051536750004ULL, 0x80000515367500E4ULL,
420         /* Set address */
421         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
422         /* Write data */
423         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
424         /* Set address */
425         0x801205150D440000ULL, 0x801205150D4400E0ULL,
426         /* Write data */
427         0x801205150D440004ULL, 0x801205150D4400E4ULL,
428         /* Set address */
429         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
430         /* Write data */
431         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
432         /* Done */
433         END_SIGN
434 };
435
436 static const u64 xena_dtx_cfg[] = {
437         /* Set address */
438         0x8000051500000000ULL, 0x80000515000000E0ULL,
439         /* Write data */
440         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
441         /* Set address */
442         0x8001051500000000ULL, 0x80010515000000E0ULL,
443         /* Write data */
444         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
445         /* Set address */
446         0x8002051500000000ULL, 0x80020515000000E0ULL,
447         /* Write data */
448         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
449         END_SIGN
450 };
451
452 /*
453  * Constants for Fixing the MacAddress problem seen mostly on
454  * Alpha machines.
455  */
456 static const u64 fix_mac[] = {
457         0x0060000000000000ULL, 0x0060600000000000ULL,
458         0x0040600000000000ULL, 0x0000600000000000ULL,
459         0x0020600000000000ULL, 0x0060600000000000ULL,
460         0x0020600000000000ULL, 0x0060600000000000ULL,
461         0x0020600000000000ULL, 0x0060600000000000ULL,
462         0x0020600000000000ULL, 0x0060600000000000ULL,
463         0x0020600000000000ULL, 0x0060600000000000ULL,
464         0x0020600000000000ULL, 0x0060600000000000ULL,
465         0x0020600000000000ULL, 0x0060600000000000ULL,
466         0x0020600000000000ULL, 0x0060600000000000ULL,
467         0x0020600000000000ULL, 0x0060600000000000ULL,
468         0x0020600000000000ULL, 0x0060600000000000ULL,
469         0x0020600000000000ULL, 0x0000600000000000ULL,
470         0x0040600000000000ULL, 0x0060600000000000ULL,
471         END_SIGN
472 };
473
474 MODULE_LICENSE("GPL");
475 MODULE_VERSION(DRV_VERSION);
476
477
478 /* Module Loadable parameters. */
479 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
480 S2IO_PARM_INT(rx_ring_num, 1);
481 S2IO_PARM_INT(multiq, 0);
482 S2IO_PARM_INT(rx_ring_mode, 1);
483 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
484 S2IO_PARM_INT(rmac_pause_time, 0x100);
485 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
486 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
487 S2IO_PARM_INT(shared_splits, 0);
488 S2IO_PARM_INT(tmac_util_period, 5);
489 S2IO_PARM_INT(rmac_util_period, 5);
490 S2IO_PARM_INT(l3l4hdr_size, 128);
491 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
492 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
493 /* Frequency of Rx desc syncs expressed as power of 2 */
494 S2IO_PARM_INT(rxsync_frequency, 3);
495 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
496 S2IO_PARM_INT(intr_type, 2);
497 /* Large receive offload feature */
498 static unsigned int lro_enable;
499 module_param_named(lro, lro_enable, uint, 0);
500
501 /* Max pkts to be aggregated by LRO at one time. If not specified,
502  * aggregation happens until we hit max IP pkt size(64K)
503  */
504 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
505 S2IO_PARM_INT(indicate_max_pkts, 0);
506
507 S2IO_PARM_INT(napi, 1);
508 S2IO_PARM_INT(ufo, 0);
509 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
510
511 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
512 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
513 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
514 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
515 static unsigned int rts_frm_len[MAX_RX_RINGS] =
516 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
517
518 module_param_array(tx_fifo_len, uint, NULL, 0);
519 module_param_array(rx_ring_sz, uint, NULL, 0);
520 module_param_array(rts_frm_len, uint, NULL, 0);
521
522 /*
523  * S2IO device table.
524  * This table lists all the devices that this driver supports.
525  */
526 static struct pci_device_id s2io_tbl[] __devinitdata = {
527         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
528          PCI_ANY_ID, PCI_ANY_ID},
529         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
530          PCI_ANY_ID, PCI_ANY_ID},
531         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
532          PCI_ANY_ID, PCI_ANY_ID},
533         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
534          PCI_ANY_ID, PCI_ANY_ID},
535         {0,}
536 };
537
538 MODULE_DEVICE_TABLE(pci, s2io_tbl);
539
540 static struct pci_error_handlers s2io_err_handler = {
541         .error_detected = s2io_io_error_detected,
542         .slot_reset = s2io_io_slot_reset,
543         .resume = s2io_io_resume,
544 };
545
546 static struct pci_driver s2io_driver = {
547         .name = "S2IO",
548         .id_table = s2io_tbl,
549         .probe = s2io_init_nic,
550         .remove = __devexit_p(s2io_rem_nic),
551         .err_handler = &s2io_err_handler,
552 };
553
554 /* A simplifier macro used both by init and free shared_mem Fns(). */
555 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
556
557 /* netqueue manipulation helper functions */
558 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
559 {
560         if (!sp->config.multiq) {
561                 int i;
562
563                 for (i = 0; i < sp->config.tx_fifo_num; i++)
564                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
565         }
566         netif_tx_stop_all_queues(sp->dev);
567 }
568
569 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
570 {
571         if (!sp->config.multiq)
572                 sp->mac_control.fifos[fifo_no].queue_state =
573                         FIFO_QUEUE_STOP;
574
575         netif_tx_stop_all_queues(sp->dev);
576 }
577
578 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
579 {
580         if (!sp->config.multiq) {
581                 int i;
582
583                 for (i = 0; i < sp->config.tx_fifo_num; i++)
584                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
585         }
586         netif_tx_start_all_queues(sp->dev);
587 }
588
589 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
590 {
591         if (!sp->config.multiq)
592                 sp->mac_control.fifos[fifo_no].queue_state =
593                         FIFO_QUEUE_START;
594
595         netif_tx_start_all_queues(sp->dev);
596 }
597
598 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
599 {
600         if (!sp->config.multiq) {
601                 int i;
602
603                 for (i = 0; i < sp->config.tx_fifo_num; i++)
604                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
605         }
606         netif_tx_wake_all_queues(sp->dev);
607 }
608
609 static inline void s2io_wake_tx_queue(
610         struct fifo_info *fifo, int cnt, u8 multiq)
611 {
612
613         if (multiq) {
614                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
615                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
616         } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
617                 if (netif_queue_stopped(fifo->dev)) {
618                         fifo->queue_state = FIFO_QUEUE_START;
619                         netif_wake_queue(fifo->dev);
620                 }
621         }
622 }
623
624 /**
625  * init_shared_mem - Allocation and Initialization of Memory
626  * @nic: Device private variable.
627  * Description: The function allocates all the memory areas shared
628  * between the NIC and the driver. This includes Tx descriptors,
629  * Rx descriptors and the statistics block.
630  */
631
632 static int init_shared_mem(struct s2io_nic *nic)
633 {
634         u32 size;
635         void *tmp_v_addr, *tmp_v_addr_next;
636         dma_addr_t tmp_p_addr, tmp_p_addr_next;
637         struct RxD_block *pre_rxd_blk = NULL;
638         int i, j, blk_cnt;
639         int lst_size, lst_per_page;
640         struct net_device *dev = nic->dev;
641         unsigned long tmp;
642         struct buffAdd *ba;
643
644         struct mac_info *mac_control;
645         struct config_param *config;
646         unsigned long long mem_allocated = 0;
647
648         mac_control = &nic->mac_control;
649         config = &nic->config;
650
651         /* Allocation and initialization of TXDLs in FIFOs */
652         size = 0;
653         for (i = 0; i < config->tx_fifo_num; i++) {
654                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
655
656                 size += tx_cfg->fifo_len;
657         }
658         if (size > MAX_AVAILABLE_TXDS) {
659                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
660                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n",
661                           size);
662                 return -EINVAL;
663         }
664
665         size = 0;
666         for (i = 0; i < config->tx_fifo_num; i++) {
667                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
668
669                 size = tx_cfg->fifo_len;
670                 /*
671                  * Legal values are from 2 to 8192
672                  */
673                 if (size < 2) {
674                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
675                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
676                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
677                                   "are 2 to 8192\n");
678                         return -EINVAL;
679                 }
680         }
681
682         lst_size = (sizeof(struct TxD) * config->max_txds);
683         lst_per_page = PAGE_SIZE / lst_size;
684
685         for (i = 0; i < config->tx_fifo_num; i++) {
686                 struct fifo_info *fifo = &mac_control->fifos[i];
687                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
688                 int fifo_len = tx_cfg->fifo_len;
689                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
690
691                 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
692                 if (!fifo->list_info) {
693                         DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
694                         return -ENOMEM;
695                 }
696                 mem_allocated += list_holder_size;
697         }
698         for (i = 0; i < config->tx_fifo_num; i++) {
699                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
700                                                 lst_per_page);
701                 struct fifo_info *fifo = &mac_control->fifos[i];
702                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
703
704                 fifo->tx_curr_put_info.offset = 0;
705                 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
706                 fifo->tx_curr_get_info.offset = 0;
707                 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
708                 fifo->fifo_no = i;
709                 fifo->nic = nic;
710                 fifo->max_txds = MAX_SKB_FRAGS + 2;
711                 fifo->dev = dev;
712
713                 for (j = 0; j < page_num; j++) {
714                         int k = 0;
715                         dma_addr_t tmp_p;
716                         void *tmp_v;
717                         tmp_v = pci_alloc_consistent(nic->pdev,
718                                                      PAGE_SIZE, &tmp_p);
719                         if (!tmp_v) {
720                                 DBG_PRINT(INFO_DBG, "pci_alloc_consistent ");
721                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
722                                 return -ENOMEM;
723                         }
724                         /* If we got a zero DMA address(can happen on
725                          * certain platforms like PPC), reallocate.
726                          * Store virtual address of page we don't want,
727                          * to be freed later.
728                          */
729                         if (!tmp_p) {
730                                 mac_control->zerodma_virt_addr = tmp_v;
731                                 DBG_PRINT(INIT_DBG,
732                                           "%s: Zero DMA address for TxDL. ",
733                                           dev->name);
734                                 DBG_PRINT(INIT_DBG,
735                                           "Virtual address %p\n", tmp_v);
736                                 tmp_v = pci_alloc_consistent(nic->pdev,
737                                                              PAGE_SIZE, &tmp_p);
738                                 if (!tmp_v) {
739                                         DBG_PRINT(INFO_DBG,
740                                                   "pci_alloc_consistent ");
741                                         DBG_PRINT(INFO_DBG,
742                                                   "failed for TxDL\n");
743                                         return -ENOMEM;
744                                 }
745                                 mem_allocated += PAGE_SIZE;
746                         }
747                         while (k < lst_per_page) {
748                                 int l = (j * lst_per_page) + k;
749                                 if (l == tx_cfg->fifo_len)
750                                         break;
751                                 fifo->list_info[l].list_virt_addr =
752                                         tmp_v + (k * lst_size);
753                                 fifo->list_info[l].list_phy_addr =
754                                         tmp_p + (k * lst_size);
755                                 k++;
756                         }
757                 }
758         }
759
760         for (i = 0; i < config->tx_fifo_num; i++) {
761                 struct fifo_info *fifo = &mac_control->fifos[i];
762                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
763
764                 size = tx_cfg->fifo_len;
765                 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
766                 if (!fifo->ufo_in_band_v)
767                         return -ENOMEM;
768                 mem_allocated += (size * sizeof(u64));
769         }
770
771         /* Allocation and initialization of RXDs in Rings */
772         size = 0;
773         for (i = 0; i < config->rx_ring_num; i++) {
774                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
775                 struct ring_info *ring = &mac_control->rings[i];
776
777                 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
778                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
779                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", i);
780                         DBG_PRINT(ERR_DBG, "RxDs per Block");
781                         return FAILURE;
782                 }
783                 size += rx_cfg->num_rxd;
784                 ring->block_count = rx_cfg->num_rxd /
785                         (rxd_count[nic->rxd_mode] + 1);
786                 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
787         }
788         if (nic->rxd_mode == RXD_MODE_1)
789                 size = (size * (sizeof(struct RxD1)));
790         else
791                 size = (size * (sizeof(struct RxD3)));
792
793         for (i = 0; i < config->rx_ring_num; i++) {
794                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
795                 struct ring_info *ring = &mac_control->rings[i];
796
797                 ring->rx_curr_get_info.block_index = 0;
798                 ring->rx_curr_get_info.offset = 0;
799                 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
800                 ring->rx_curr_put_info.block_index = 0;
801                 ring->rx_curr_put_info.offset = 0;
802                 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
803                 ring->nic = nic;
804                 ring->ring_no = i;
805                 ring->lro = lro_enable;
806
807                 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
808                 /*  Allocating all the Rx blocks */
809                 for (j = 0; j < blk_cnt; j++) {
810                         struct rx_block_info *rx_blocks;
811                         int l;
812
813                         rx_blocks = &ring->rx_blocks[j];
814                         size = SIZE_OF_BLOCK;   /* size is always page size */
815                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
816                                                           &tmp_p_addr);
817                         if (tmp_v_addr == NULL) {
818                                 /*
819                                  * In case of failure, free_shared_mem()
820                                  * is called, which should free any
821                                  * memory that was alloced till the
822                                  * failure happened.
823                                  */
824                                 rx_blocks->block_virt_addr = tmp_v_addr;
825                                 return -ENOMEM;
826                         }
827                         mem_allocated += size;
828                         memset(tmp_v_addr, 0, size);
829
830                         size = sizeof(struct rxd_info) *
831                                 rxd_count[nic->rxd_mode];
832                         rx_blocks->block_virt_addr = tmp_v_addr;
833                         rx_blocks->block_dma_addr = tmp_p_addr;
834                         rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
835                         if (!rx_blocks->rxds)
836                                 return -ENOMEM;
837                         mem_allocated += size;
838                         for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
839                                 rx_blocks->rxds[l].virt_addr =
840                                         rx_blocks->block_virt_addr +
841                                         (rxd_size[nic->rxd_mode] * l);
842                                 rx_blocks->rxds[l].dma_addr =
843                                         rx_blocks->block_dma_addr +
844                                         (rxd_size[nic->rxd_mode] * l);
845                         }
846                 }
847                 /* Interlinking all Rx Blocks */
848                 for (j = 0; j < blk_cnt; j++) {
849                         int next = (j + 1) % blk_cnt;
850                         tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
851                         tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
852                         tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
853                         tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
854
855                         pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
856                         pre_rxd_blk->reserved_2_pNext_RxD_block =
857                                 (unsigned long)tmp_v_addr_next;
858                         pre_rxd_blk->pNext_RxD_Blk_physical =
859                                 (u64)tmp_p_addr_next;
860                 }
861         }
862         if (nic->rxd_mode == RXD_MODE_3B) {
863                 /*
864                  * Allocation of Storages for buffer addresses in 2BUFF mode
865                  * and the buffers as well.
866                  */
867                 for (i = 0; i < config->rx_ring_num; i++) {
868                         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
869                         struct ring_info *ring = &mac_control->rings[i];
870
871                         blk_cnt = rx_cfg->num_rxd /
872                                 (rxd_count[nic->rxd_mode] + 1);
873                         size = sizeof(struct buffAdd *) * blk_cnt;
874                         ring->ba = kmalloc(size, GFP_KERNEL);
875                         if (!ring->ba)
876                                 return -ENOMEM;
877                         mem_allocated += size;
878                         for (j = 0; j < blk_cnt; j++) {
879                                 int k = 0;
880
881                                 size = sizeof(struct buffAdd) *
882                                         (rxd_count[nic->rxd_mode] + 1);
883                                 ring->ba[j] = kmalloc(size, GFP_KERNEL);
884                                 if (!ring->ba[j])
885                                         return -ENOMEM;
886                                 mem_allocated += size;
887                                 while (k != rxd_count[nic->rxd_mode]) {
888                                         ba = &ring->ba[j][k];
889                                         size = BUF0_LEN + ALIGN_SIZE;
890                                         ba->ba_0_org = kmalloc(size, GFP_KERNEL);
891                                         if (!ba->ba_0_org)
892                                                 return -ENOMEM;
893                                         mem_allocated += size;
894                                         tmp = (unsigned long)ba->ba_0_org;
895                                         tmp += ALIGN_SIZE;
896                                         tmp &= ~((unsigned long)ALIGN_SIZE);
897                                         ba->ba_0 = (void *)tmp;
898
899                                         size = BUF1_LEN + ALIGN_SIZE;
900                                         ba->ba_1_org = kmalloc(size, GFP_KERNEL);
901                                         if (!ba->ba_1_org)
902                                                 return -ENOMEM;
903                                         mem_allocated += size;
904                                         tmp = (unsigned long)ba->ba_1_org;
905                                         tmp += ALIGN_SIZE;
906                                         tmp &= ~((unsigned long)ALIGN_SIZE);
907                                         ba->ba_1 = (void *)tmp;
908                                         k++;
909                                 }
910                         }
911                 }
912         }
913
914         /* Allocation and initialization of Statistics block */
915         size = sizeof(struct stat_block);
916         mac_control->stats_mem =
917                 pci_alloc_consistent(nic->pdev, size,
918                                      &mac_control->stats_mem_phy);
919
920         if (!mac_control->stats_mem) {
921                 /*
922                  * In case of failure, free_shared_mem() is called, which
923                  * should free any memory that was alloced till the
924                  * failure happened.
925                  */
926                 return -ENOMEM;
927         }
928         mem_allocated += size;
929         mac_control->stats_mem_sz = size;
930
931         tmp_v_addr = mac_control->stats_mem;
932         mac_control->stats_info = (struct stat_block *)tmp_v_addr;
933         memset(tmp_v_addr, 0, size);
934         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
935                   (unsigned long long)tmp_p_addr);
936         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
937         return SUCCESS;
938 }
939
940 /**
941  * free_shared_mem - Free the allocated Memory
942  * @nic:  Device private variable.
943  * Description: This function is to free all memory locations allocated by
944  * the init_shared_mem() function and return it to the kernel.
945  */
946
947 static void free_shared_mem(struct s2io_nic *nic)
948 {
949         int i, j, blk_cnt, size;
950         void *tmp_v_addr;
951         dma_addr_t tmp_p_addr;
952         struct mac_info *mac_control;
953         struct config_param *config;
954         int lst_size, lst_per_page;
955         struct net_device *dev;
956         int page_num = 0;
957
958         if (!nic)
959                 return;
960
961         dev = nic->dev;
962
963         mac_control = &nic->mac_control;
964         config = &nic->config;
965
966         lst_size = sizeof(struct TxD) * config->max_txds;
967         lst_per_page = PAGE_SIZE / lst_size;
968
969         for (i = 0; i < config->tx_fifo_num; i++) {
970                 struct fifo_info *fifo = &mac_control->fifos[i];
971                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
972
973                 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
974                 for (j = 0; j < page_num; j++) {
975                         int mem_blks = (j * lst_per_page);
976                         struct list_info_hold *fli;
977
978                         if (!fifo->list_info)
979                                 return;
980
981                         fli = &fifo->list_info[mem_blks];
982                         if (!fli->list_virt_addr)
983                                 break;
984                         pci_free_consistent(nic->pdev, PAGE_SIZE,
985                                             fli->list_virt_addr,
986                                             fli->list_phy_addr);
987                         nic->mac_control.stats_info->sw_stat.mem_freed
988                                 += PAGE_SIZE;
989                 }
990                 /* If we got a zero DMA address during allocation,
991                  * free the page now
992                  */
993                 if (mac_control->zerodma_virt_addr) {
994                         pci_free_consistent(nic->pdev, PAGE_SIZE,
995                                             mac_control->zerodma_virt_addr,
996                                             (dma_addr_t)0);
997                         DBG_PRINT(INIT_DBG,
998                                   "%s: Freeing TxDL with zero DMA addr. ",
999                                   dev->name);
1000                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1001                                   mac_control->zerodma_virt_addr);
1002                         nic->mac_control.stats_info->sw_stat.mem_freed
1003                                 += PAGE_SIZE;
1004                 }
1005                 kfree(fifo->list_info);
1006                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1007                         nic->config.tx_cfg[i].fifo_len *
1008                         sizeof(struct list_info_hold);
1009         }
1010
1011         size = SIZE_OF_BLOCK;
1012         for (i = 0; i < config->rx_ring_num; i++) {
1013                 struct ring_info *ring = &mac_control->rings[i];
1014
1015                 blk_cnt = ring->block_count;
1016                 for (j = 0; j < blk_cnt; j++) {
1017                         tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1018                         tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1019                         if (tmp_v_addr == NULL)
1020                                 break;
1021                         pci_free_consistent(nic->pdev, size,
1022                                             tmp_v_addr, tmp_p_addr);
1023                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1024                         kfree(ring->rx_blocks[j].rxds);
1025                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1026                                 sizeof(struct rxd_info) * rxd_count[nic->rxd_mode];
1027                 }
1028         }
1029
1030         if (nic->rxd_mode == RXD_MODE_3B) {
1031                 /* Freeing buffer storage addresses in 2BUFF mode. */
1032                 for (i = 0; i < config->rx_ring_num; i++) {
1033                         struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1034                         struct ring_info *ring = &mac_control->rings[i];
1035
1036                         blk_cnt = rx_cfg->num_rxd /
1037                                 (rxd_count[nic->rxd_mode] + 1);
1038                         for (j = 0; j < blk_cnt; j++) {
1039                                 int k = 0;
1040                                 if (!ring->ba[j])
1041                                         continue;
1042                                 while (k != rxd_count[nic->rxd_mode]) {
1043                                         struct buffAdd *ba = &ring->ba[j][k];
1044                                         kfree(ba->ba_0_org);
1045                                         nic->mac_control.stats_info->sw_stat.\
1046                                                 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1047                                         kfree(ba->ba_1_org);
1048                                         nic->mac_control.stats_info->sw_stat.\
1049                                                 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1050                                         k++;
1051                                 }
1052                                 kfree(ring->ba[j]);
1053                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1054                                         (sizeof(struct buffAdd) *
1055                                          (rxd_count[nic->rxd_mode] + 1));
1056                         }
1057                         kfree(ring->ba);
1058                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1059                                 (sizeof(struct buffAdd *) * blk_cnt);
1060                 }
1061         }
1062
1063         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1064                 struct fifo_info *fifo = &mac_control->fifos[i];
1065                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1066
1067                 if (fifo->ufo_in_band_v) {
1068                         nic->mac_control.stats_info->sw_stat.mem_freed
1069                                 += (tx_cfg->fifo_len * sizeof(u64));
1070                         kfree(fifo->ufo_in_band_v);
1071                 }
1072         }
1073
1074         if (mac_control->stats_mem) {
1075                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1076                         mac_control->stats_mem_sz;
1077                 pci_free_consistent(nic->pdev,
1078                                     mac_control->stats_mem_sz,
1079                                     mac_control->stats_mem,
1080                                     mac_control->stats_mem_phy);
1081         }
1082 }
1083
1084 /**
1085  * s2io_verify_pci_mode -
1086  */
1087
1088 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1089 {
1090         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1091         register u64 val64 = 0;
1092         int     mode;
1093
1094         val64 = readq(&bar0->pci_mode);
1095         mode = (u8)GET_PCI_MODE(val64);
1096
1097         if (val64 & PCI_MODE_UNKNOWN_MODE)
1098                 return -1;      /* Unknown PCI mode */
1099         return mode;
1100 }
1101
1102 #define NEC_VENID   0x1033
1103 #define NEC_DEVID   0x0125
1104 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1105 {
1106         struct pci_dev *tdev = NULL;
1107         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1108                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1109                         if (tdev->bus == s2io_pdev->bus->parent) {
1110                                 pci_dev_put(tdev);
1111                                 return 1;
1112                         }
1113                 }
1114         }
1115         return 0;
1116 }
1117
1118 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1119 /**
1120  * s2io_print_pci_mode -
1121  */
1122 static int s2io_print_pci_mode(struct s2io_nic *nic)
1123 {
1124         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1125         register u64 val64 = 0;
1126         int     mode;
1127         struct config_param *config = &nic->config;
1128
1129         val64 = readq(&bar0->pci_mode);
1130         mode = (u8)GET_PCI_MODE(val64);
1131
1132         if (val64 & PCI_MODE_UNKNOWN_MODE)
1133                 return -1;      /* Unknown PCI mode */
1134
1135         config->bus_speed = bus_speed[mode];
1136
1137         if (s2io_on_nec_bridge(nic->pdev)) {
1138                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1139                           nic->dev->name);
1140                 return mode;
1141         }
1142
1143         DBG_PRINT(ERR_DBG, "%s: Device is on %d bit ",
1144                   nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64);
1145
1146         switch (mode) {
1147         case PCI_MODE_PCI_33:
1148                 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1149                 break;
1150         case PCI_MODE_PCI_66:
1151                 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1152                 break;
1153         case PCI_MODE_PCIX_M1_66:
1154                 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1155                 break;
1156         case PCI_MODE_PCIX_M1_100:
1157                 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1158                 break;
1159         case PCI_MODE_PCIX_M1_133:
1160                 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1161                 break;
1162         case PCI_MODE_PCIX_M2_66:
1163                 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1164                 break;
1165         case PCI_MODE_PCIX_M2_100:
1166                 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1167                 break;
1168         case PCI_MODE_PCIX_M2_133:
1169                 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1170                 break;
1171         default:
1172                 return -1;      /* Unsupported bus speed */
1173         }
1174
1175         return mode;
1176 }
1177
1178 /**
1179  *  init_tti - Initialization transmit traffic interrupt scheme
1180  *  @nic: device private variable
1181  *  @link: link status (UP/DOWN) used to enable/disable continuous
1182  *  transmit interrupts
1183  *  Description: The function configures transmit traffic interrupts
1184  *  Return Value:  SUCCESS on success and
1185  *  '-1' on failure
1186  */
1187
1188 static int init_tti(struct s2io_nic *nic, int link)
1189 {
1190         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1191         register u64 val64 = 0;
1192         int i;
1193         struct config_param *config;
1194
1195         config = &nic->config;
1196
1197         for (i = 0; i < config->tx_fifo_num; i++) {
1198                 /*
1199                  * TTI Initialization. Default Tx timer gets us about
1200                  * 250 interrupts per sec. Continuous interrupts are enabled
1201                  * by default.
1202                  */
1203                 if (nic->device_type == XFRAME_II_DEVICE) {
1204                         int count = (nic->config.bus_speed * 125)/2;
1205                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1206                 } else
1207                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1208
1209                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1210                         TTI_DATA1_MEM_TX_URNG_B(0x10) |
1211                         TTI_DATA1_MEM_TX_URNG_C(0x30) |
1212                         TTI_DATA1_MEM_TX_TIMER_AC_EN;
1213                 if (i == 0)
1214                         if (use_continuous_tx_intrs && (link == LINK_UP))
1215                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1216                 writeq(val64, &bar0->tti_data1_mem);
1217
1218                 if (nic->config.intr_type == MSI_X) {
1219                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1220                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1221                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1222                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1223                 } else {
1224                         if ((nic->config.tx_steering_type ==
1225                              TX_DEFAULT_STEERING) &&
1226                             (config->tx_fifo_num > 1) &&
1227                             (i >= nic->udp_fifo_idx) &&
1228                             (i < (nic->udp_fifo_idx +
1229                                   nic->total_udp_fifos)))
1230                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1231                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1232                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1233                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1234                         else
1235                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1236                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1237                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1238                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1239                 }
1240
1241                 writeq(val64, &bar0->tti_data2_mem);
1242
1243                 val64 = TTI_CMD_MEM_WE |
1244                         TTI_CMD_MEM_STROBE_NEW_CMD |
1245                         TTI_CMD_MEM_OFFSET(i);
1246                 writeq(val64, &bar0->tti_command_mem);
1247
1248                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1249                                           TTI_CMD_MEM_STROBE_NEW_CMD,
1250                                           S2IO_BIT_RESET) != SUCCESS)
1251                         return FAILURE;
1252         }
1253
1254         return SUCCESS;
1255 }
1256
1257 /**
1258  *  init_nic - Initialization of hardware
1259  *  @nic: device private variable
1260  *  Description: The function sequentially configures every block
1261  *  of the H/W from their reset values.
1262  *  Return Value:  SUCCESS on success and
1263  *  '-1' on failure (endian settings incorrect).
1264  */
1265
1266 static int init_nic(struct s2io_nic *nic)
1267 {
1268         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1269         struct net_device *dev = nic->dev;
1270         register u64 val64 = 0;
1271         void __iomem *add;
1272         u32 time;
1273         int i, j;
1274         struct mac_info *mac_control;
1275         struct config_param *config;
1276         int dtx_cnt = 0;
1277         unsigned long long mem_share;
1278         int mem_size;
1279
1280         mac_control = &nic->mac_control;
1281         config = &nic->config;
1282
1283         /* to set the swapper controle on the card */
1284         if (s2io_set_swapper(nic)) {
1285                 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1286                 return -EIO;
1287         }
1288
1289         /*
1290          * Herc requires EOI to be removed from reset before XGXS, so..
1291          */
1292         if (nic->device_type & XFRAME_II_DEVICE) {
1293                 val64 = 0xA500000000ULL;
1294                 writeq(val64, &bar0->sw_reset);
1295                 msleep(500);
1296                 val64 = readq(&bar0->sw_reset);
1297         }
1298
1299         /* Remove XGXS from reset state */
1300         val64 = 0;
1301         writeq(val64, &bar0->sw_reset);
1302         msleep(500);
1303         val64 = readq(&bar0->sw_reset);
1304
1305         /* Ensure that it's safe to access registers by checking
1306          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1307          */
1308         if (nic->device_type == XFRAME_II_DEVICE) {
1309                 for (i = 0; i < 50; i++) {
1310                         val64 = readq(&bar0->adapter_status);
1311                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1312                                 break;
1313                         msleep(10);
1314                 }
1315                 if (i == 50)
1316                         return -ENODEV;
1317         }
1318
1319         /*  Enable Receiving broadcasts */
1320         add = &bar0->mac_cfg;
1321         val64 = readq(&bar0->mac_cfg);
1322         val64 |= MAC_RMAC_BCAST_ENABLE;
1323         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1324         writel((u32)val64, add);
1325         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1326         writel((u32) (val64 >> 32), (add + 4));
1327
1328         /* Read registers in all blocks */
1329         val64 = readq(&bar0->mac_int_mask);
1330         val64 = readq(&bar0->mc_int_mask);
1331         val64 = readq(&bar0->xgxs_int_mask);
1332
1333         /*  Set MTU */
1334         val64 = dev->mtu;
1335         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1336
1337         if (nic->device_type & XFRAME_II_DEVICE) {
1338                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1339                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1340                                           &bar0->dtx_control, UF);
1341                         if (dtx_cnt & 0x1)
1342                                 msleep(1); /* Necessary!! */
1343                         dtx_cnt++;
1344                 }
1345         } else {
1346                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1347                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1348                                           &bar0->dtx_control, UF);
1349                         val64 = readq(&bar0->dtx_control);
1350                         dtx_cnt++;
1351                 }
1352         }
1353
1354         /*  Tx DMA Initialization */
1355         val64 = 0;
1356         writeq(val64, &bar0->tx_fifo_partition_0);
1357         writeq(val64, &bar0->tx_fifo_partition_1);
1358         writeq(val64, &bar0->tx_fifo_partition_2);
1359         writeq(val64, &bar0->tx_fifo_partition_3);
1360
1361         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1362                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1363
1364                 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1365                         vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1366
1367                 if (i == (config->tx_fifo_num - 1)) {
1368                         if (i % 2 == 0)
1369                                 i++;
1370                 }
1371
1372                 switch (i) {
1373                 case 1:
1374                         writeq(val64, &bar0->tx_fifo_partition_0);
1375                         val64 = 0;
1376                         j = 0;
1377                         break;
1378                 case 3:
1379                         writeq(val64, &bar0->tx_fifo_partition_1);
1380                         val64 = 0;
1381                         j = 0;
1382                         break;
1383                 case 5:
1384                         writeq(val64, &bar0->tx_fifo_partition_2);
1385                         val64 = 0;
1386                         j = 0;
1387                         break;
1388                 case 7:
1389                         writeq(val64, &bar0->tx_fifo_partition_3);
1390                         val64 = 0;
1391                         j = 0;
1392                         break;
1393                 default:
1394                         j++;
1395                         break;
1396                 }
1397         }
1398
1399         /*
1400          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1401          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1402          */
1403         if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1404                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1405
1406         val64 = readq(&bar0->tx_fifo_partition_0);
1407         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1408                   &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1409
1410         /*
1411          * Initialization of Tx_PA_CONFIG register to ignore packet
1412          * integrity checking.
1413          */
1414         val64 = readq(&bar0->tx_pa_cfg);
1415         val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1416                 TX_PA_CFG_IGNORE_SNAP_OUI |
1417                 TX_PA_CFG_IGNORE_LLC_CTRL |
1418                 TX_PA_CFG_IGNORE_L2_ERR;
1419         writeq(val64, &bar0->tx_pa_cfg);
1420
1421         /* Rx DMA intialization. */
1422         val64 = 0;
1423         for (i = 0; i < config->rx_ring_num; i++) {
1424                 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1425
1426                 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1427         }
1428         writeq(val64, &bar0->rx_queue_priority);
1429
1430         /*
1431          * Allocating equal share of memory to all the
1432          * configured Rings.
1433          */
1434         val64 = 0;
1435         if (nic->device_type & XFRAME_II_DEVICE)
1436                 mem_size = 32;
1437         else
1438                 mem_size = 64;
1439
1440         for (i = 0; i < config->rx_ring_num; i++) {
1441                 switch (i) {
1442                 case 0:
1443                         mem_share = (mem_size / config->rx_ring_num +
1444                                      mem_size % config->rx_ring_num);
1445                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1446                         continue;
1447                 case 1:
1448                         mem_share = (mem_size / config->rx_ring_num);
1449                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1450                         continue;
1451                 case 2:
1452                         mem_share = (mem_size / config->rx_ring_num);
1453                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1454                         continue;
1455                 case 3:
1456                         mem_share = (mem_size / config->rx_ring_num);
1457                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1458                         continue;
1459                 case 4:
1460                         mem_share = (mem_size / config->rx_ring_num);
1461                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1462                         continue;
1463                 case 5:
1464                         mem_share = (mem_size / config->rx_ring_num);
1465                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1466                         continue;
1467                 case 6:
1468                         mem_share = (mem_size / config->rx_ring_num);
1469                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1470                         continue;
1471                 case 7:
1472                         mem_share = (mem_size / config->rx_ring_num);
1473                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1474                         continue;
1475                 }
1476         }
1477         writeq(val64, &bar0->rx_queue_cfg);
1478
1479         /*
1480          * Filling Tx round robin registers
1481          * as per the number of FIFOs for equal scheduling priority
1482          */
1483         switch (config->tx_fifo_num) {
1484         case 1:
1485                 val64 = 0x0;
1486                 writeq(val64, &bar0->tx_w_round_robin_0);
1487                 writeq(val64, &bar0->tx_w_round_robin_1);
1488                 writeq(val64, &bar0->tx_w_round_robin_2);
1489                 writeq(val64, &bar0->tx_w_round_robin_3);
1490                 writeq(val64, &bar0->tx_w_round_robin_4);
1491                 break;
1492         case 2:
1493                 val64 = 0x0001000100010001ULL;
1494                 writeq(val64, &bar0->tx_w_round_robin_0);
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 writeq(val64, &bar0->tx_w_round_robin_2);
1497                 writeq(val64, &bar0->tx_w_round_robin_3);
1498                 val64 = 0x0001000100000000ULL;
1499                 writeq(val64, &bar0->tx_w_round_robin_4);
1500                 break;
1501         case 3:
1502                 val64 = 0x0001020001020001ULL;
1503                 writeq(val64, &bar0->tx_w_round_robin_0);
1504                 val64 = 0x0200010200010200ULL;
1505                 writeq(val64, &bar0->tx_w_round_robin_1);
1506                 val64 = 0x0102000102000102ULL;
1507                 writeq(val64, &bar0->tx_w_round_robin_2);
1508                 val64 = 0x0001020001020001ULL;
1509                 writeq(val64, &bar0->tx_w_round_robin_3);
1510                 val64 = 0x0200010200000000ULL;
1511                 writeq(val64, &bar0->tx_w_round_robin_4);
1512                 break;
1513         case 4:
1514                 val64 = 0x0001020300010203ULL;
1515                 writeq(val64, &bar0->tx_w_round_robin_0);
1516                 writeq(val64, &bar0->tx_w_round_robin_1);
1517                 writeq(val64, &bar0->tx_w_round_robin_2);
1518                 writeq(val64, &bar0->tx_w_round_robin_3);
1519                 val64 = 0x0001020300000000ULL;
1520                 writeq(val64, &bar0->tx_w_round_robin_4);
1521                 break;
1522         case 5:
1523                 val64 = 0x0001020304000102ULL;
1524                 writeq(val64, &bar0->tx_w_round_robin_0);
1525                 val64 = 0x0304000102030400ULL;
1526                 writeq(val64, &bar0->tx_w_round_robin_1);
1527                 val64 = 0x0102030400010203ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_2);
1529                 val64 = 0x0400010203040001ULL;
1530                 writeq(val64, &bar0->tx_w_round_robin_3);
1531                 val64 = 0x0203040000000000ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_4);
1533                 break;
1534         case 6:
1535                 val64 = 0x0001020304050001ULL;
1536                 writeq(val64, &bar0->tx_w_round_robin_0);
1537                 val64 = 0x0203040500010203ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_1);
1539                 val64 = 0x0405000102030405ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_2);
1541                 val64 = 0x0001020304050001ULL;
1542                 writeq(val64, &bar0->tx_w_round_robin_3);
1543                 val64 = 0x0203040500000000ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_4);
1545                 break;
1546         case 7:
1547                 val64 = 0x0001020304050600ULL;
1548                 writeq(val64, &bar0->tx_w_round_robin_0);
1549                 val64 = 0x0102030405060001ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_1);
1551                 val64 = 0x0203040506000102ULL;
1552                 writeq(val64, &bar0->tx_w_round_robin_2);
1553                 val64 = 0x0304050600010203ULL;
1554                 writeq(val64, &bar0->tx_w_round_robin_3);
1555                 val64 = 0x0405060000000000ULL;
1556                 writeq(val64, &bar0->tx_w_round_robin_4);
1557                 break;
1558         case 8:
1559                 val64 = 0x0001020304050607ULL;
1560                 writeq(val64, &bar0->tx_w_round_robin_0);
1561                 writeq(val64, &bar0->tx_w_round_robin_1);
1562                 writeq(val64, &bar0->tx_w_round_robin_2);
1563                 writeq(val64, &bar0->tx_w_round_robin_3);
1564                 val64 = 0x0001020300000000ULL;
1565                 writeq(val64, &bar0->tx_w_round_robin_4);
1566                 break;
1567         }
1568
1569         /* Enable all configured Tx FIFO partitions */
1570         val64 = readq(&bar0->tx_fifo_partition_0);
1571         val64 |= (TX_FIFO_PARTITION_EN);
1572         writeq(val64, &bar0->tx_fifo_partition_0);
1573
1574         /* Filling the Rx round robin registers as per the
1575          * number of Rings and steering based on QoS with
1576          * equal priority.
1577          */
1578         switch (config->rx_ring_num) {
1579         case 1:
1580                 val64 = 0x0;
1581                 writeq(val64, &bar0->rx_w_round_robin_0);
1582                 writeq(val64, &bar0->rx_w_round_robin_1);
1583                 writeq(val64, &bar0->rx_w_round_robin_2);
1584                 writeq(val64, &bar0->rx_w_round_robin_3);
1585                 writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587                 val64 = 0x8080808080808080ULL;
1588                 writeq(val64, &bar0->rts_qos_steering);
1589                 break;
1590         case 2:
1591                 val64 = 0x0001000100010001ULL;
1592                 writeq(val64, &bar0->rx_w_round_robin_0);
1593                 writeq(val64, &bar0->rx_w_round_robin_1);
1594                 writeq(val64, &bar0->rx_w_round_robin_2);
1595                 writeq(val64, &bar0->rx_w_round_robin_3);
1596                 val64 = 0x0001000100000000ULL;
1597                 writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599                 val64 = 0x8080808040404040ULL;
1600                 writeq(val64, &bar0->rts_qos_steering);
1601                 break;
1602         case 3:
1603                 val64 = 0x0001020001020001ULL;
1604                 writeq(val64, &bar0->rx_w_round_robin_0);
1605                 val64 = 0x0200010200010200ULL;
1606                 writeq(val64, &bar0->rx_w_round_robin_1);
1607                 val64 = 0x0102000102000102ULL;
1608                 writeq(val64, &bar0->rx_w_round_robin_2);
1609                 val64 = 0x0001020001020001ULL;
1610                 writeq(val64, &bar0->rx_w_round_robin_3);
1611                 val64 = 0x0200010200000000ULL;
1612                 writeq(val64, &bar0->rx_w_round_robin_4);
1613
1614                 val64 = 0x8080804040402020ULL;
1615                 writeq(val64, &bar0->rts_qos_steering);
1616                 break;
1617         case 4:
1618                 val64 = 0x0001020300010203ULL;
1619                 writeq(val64, &bar0->rx_w_round_robin_0);
1620                 writeq(val64, &bar0->rx_w_round_robin_1);
1621                 writeq(val64, &bar0->rx_w_round_robin_2);
1622                 writeq(val64, &bar0->rx_w_round_robin_3);
1623                 val64 = 0x0001020300000000ULL;
1624                 writeq(val64, &bar0->rx_w_round_robin_4);
1625
1626                 val64 = 0x8080404020201010ULL;
1627                 writeq(val64, &bar0->rts_qos_steering);
1628                 break;
1629         case 5:
1630                 val64 = 0x0001020304000102ULL;
1631                 writeq(val64, &bar0->rx_w_round_robin_0);
1632                 val64 = 0x0304000102030400ULL;
1633                 writeq(val64, &bar0->rx_w_round_robin_1);
1634                 val64 = 0x0102030400010203ULL;
1635                 writeq(val64, &bar0->rx_w_round_robin_2);
1636                 val64 = 0x0400010203040001ULL;
1637                 writeq(val64, &bar0->rx_w_round_robin_3);
1638                 val64 = 0x0203040000000000ULL;
1639                 writeq(val64, &bar0->rx_w_round_robin_4);
1640
1641                 val64 = 0x8080404020201008ULL;
1642                 writeq(val64, &bar0->rts_qos_steering);
1643                 break;
1644         case 6:
1645                 val64 = 0x0001020304050001ULL;
1646                 writeq(val64, &bar0->rx_w_round_robin_0);
1647                 val64 = 0x0203040500010203ULL;
1648                 writeq(val64, &bar0->rx_w_round_robin_1);
1649                 val64 = 0x0405000102030405ULL;
1650                 writeq(val64, &bar0->rx_w_round_robin_2);
1651                 val64 = 0x0001020304050001ULL;
1652                 writeq(val64, &bar0->rx_w_round_robin_3);
1653                 val64 = 0x0203040500000000ULL;
1654                 writeq(val64, &bar0->rx_w_round_robin_4);
1655
1656                 val64 = 0x8080404020100804ULL;
1657                 writeq(val64, &bar0->rts_qos_steering);
1658                 break;
1659         case 7:
1660                 val64 = 0x0001020304050600ULL;
1661                 writeq(val64, &bar0->rx_w_round_robin_0);
1662                 val64 = 0x0102030405060001ULL;
1663                 writeq(val64, &bar0->rx_w_round_robin_1);
1664                 val64 = 0x0203040506000102ULL;
1665                 writeq(val64, &bar0->rx_w_round_robin_2);
1666                 val64 = 0x0304050600010203ULL;
1667                 writeq(val64, &bar0->rx_w_round_robin_3);
1668                 val64 = 0x0405060000000000ULL;
1669                 writeq(val64, &bar0->rx_w_round_robin_4);
1670
1671                 val64 = 0x8080402010080402ULL;
1672                 writeq(val64, &bar0->rts_qos_steering);
1673                 break;
1674         case 8:
1675                 val64 = 0x0001020304050607ULL;
1676                 writeq(val64, &bar0->rx_w_round_robin_0);
1677                 writeq(val64, &bar0->rx_w_round_robin_1);
1678                 writeq(val64, &bar0->rx_w_round_robin_2);
1679                 writeq(val64, &bar0->rx_w_round_robin_3);
1680                 val64 = 0x0001020300000000ULL;
1681                 writeq(val64, &bar0->rx_w_round_robin_4);
1682
1683                 val64 = 0x8040201008040201ULL;
1684                 writeq(val64, &bar0->rts_qos_steering);
1685                 break;
1686         }
1687
1688         /* UDP Fix */
1689         val64 = 0;
1690         for (i = 0; i < 8; i++)
1691                 writeq(val64, &bar0->rts_frm_len_n[i]);
1692
1693         /* Set the default rts frame length for the rings configured */
1694         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1695         for (i = 0 ; i < config->rx_ring_num ; i++)
1696                 writeq(val64, &bar0->rts_frm_len_n[i]);
1697
1698         /* Set the frame length for the configured rings
1699          * desired by the user
1700          */
1701         for (i = 0; i < config->rx_ring_num; i++) {
1702                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1703                  * specified frame length steering.
1704                  * If the user provides the frame length then program
1705                  * the rts_frm_len register for those values or else
1706                  * leave it as it is.
1707                  */
1708                 if (rts_frm_len[i] != 0) {
1709                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1710                                &bar0->rts_frm_len_n[i]);
1711                 }
1712         }
1713
1714         /* Disable differentiated services steering logic */
1715         for (i = 0; i < 64; i++) {
1716                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1717                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1718                                   dev->name);
1719                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1720                         return -ENODEV;
1721                 }
1722         }
1723
1724         /* Program statistics memory */
1725         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1726
1727         if (nic->device_type == XFRAME_II_DEVICE) {
1728                 val64 = STAT_BC(0x320);
1729                 writeq(val64, &bar0->stat_byte_cnt);
1730         }
1731
1732         /*
1733          * Initializing the sampling rate for the device to calculate the
1734          * bandwidth utilization.
1735          */
1736         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1737                 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1738         writeq(val64, &bar0->mac_link_util);
1739
1740         /*
1741          * Initializing the Transmit and Receive Traffic Interrupt
1742          * Scheme.
1743          */
1744
1745         /* Initialize TTI */
1746         if (SUCCESS != init_tti(nic, nic->last_link_state))
1747                 return -ENODEV;
1748
1749         /* RTI Initialization */
1750         if (nic->device_type == XFRAME_II_DEVICE) {
1751                 /*
1752                  * Programmed to generate Apprx 500 Intrs per
1753                  * second
1754                  */
1755                 int count = (nic->config.bus_speed * 125)/4;
1756                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1757         } else
1758                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1759         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1760                 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1761                 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1762                 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1763
1764         writeq(val64, &bar0->rti_data1_mem);
1765
1766         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1767                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1768         if (nic->config.intr_type == MSI_X)
1769                 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1770                           RTI_DATA2_MEM_RX_UFC_D(0x40));
1771         else
1772                 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1773                           RTI_DATA2_MEM_RX_UFC_D(0x80));
1774         writeq(val64, &bar0->rti_data2_mem);
1775
1776         for (i = 0; i < config->rx_ring_num; i++) {
1777                 val64 = RTI_CMD_MEM_WE |
1778                         RTI_CMD_MEM_STROBE_NEW_CMD |
1779                         RTI_CMD_MEM_OFFSET(i);
1780                 writeq(val64, &bar0->rti_command_mem);
1781
1782                 /*
1783                  * Once the operation completes, the Strobe bit of the
1784                  * command register will be reset. We poll for this
1785                  * particular condition. We wait for a maximum of 500ms
1786                  * for the operation to complete, if it's not complete
1787                  * by then we return error.
1788                  */
1789                 time = 0;
1790                 while (true) {
1791                         val64 = readq(&bar0->rti_command_mem);
1792                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1793                                 break;
1794
1795                         if (time > 10) {
1796                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1797                                           dev->name);
1798                                 return -ENODEV;
1799                         }
1800                         time++;
1801                         msleep(50);
1802                 }
1803         }
1804
1805         /*
1806          * Initializing proper values as Pause threshold into all
1807          * the 8 Queues on Rx side.
1808          */
1809         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1810         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1811
1812         /* Disable RMAC PAD STRIPPING */
1813         add = &bar0->mac_cfg;
1814         val64 = readq(&bar0->mac_cfg);
1815         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1816         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1817         writel((u32) (val64), add);
1818         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1819         writel((u32) (val64 >> 32), (add + 4));
1820         val64 = readq(&bar0->mac_cfg);
1821
1822         /* Enable FCS stripping by adapter */
1823         add = &bar0->mac_cfg;
1824         val64 = readq(&bar0->mac_cfg);
1825         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1826         if (nic->device_type == XFRAME_II_DEVICE)
1827                 writeq(val64, &bar0->mac_cfg);
1828         else {
1829                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1830                 writel((u32) (val64), add);
1831                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1832                 writel((u32) (val64 >> 32), (add + 4));
1833         }
1834
1835         /*
1836          * Set the time value to be inserted in the pause frame
1837          * generated by xena.
1838          */
1839         val64 = readq(&bar0->rmac_pause_cfg);
1840         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1841         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1842         writeq(val64, &bar0->rmac_pause_cfg);
1843
1844         /*
1845          * Set the Threshold Limit for Generating the pause frame
1846          * If the amount of data in any Queue exceeds ratio of
1847          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1848          * pause frame is generated
1849          */
1850         val64 = 0;
1851         for (i = 0; i < 4; i++) {
1852                 val64 |= (((u64)0xFF00 |
1853                            nic->mac_control.mc_pause_threshold_q0q3)
1854                           << (i * 2 * 8));
1855         }
1856         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1857
1858         val64 = 0;
1859         for (i = 0; i < 4; i++) {
1860                 val64 |= (((u64)0xFF00 |
1861                            nic->mac_control.mc_pause_threshold_q4q7)
1862                           << (i * 2 * 8));
1863         }
1864         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1865
1866         /*
1867          * TxDMA will stop Read request if the number of read split has
1868          * exceeded the limit pointed by shared_splits
1869          */
1870         val64 = readq(&bar0->pic_control);
1871         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1872         writeq(val64, &bar0->pic_control);
1873
1874         if (nic->config.bus_speed == 266) {
1875                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1876                 writeq(0x0, &bar0->read_retry_delay);
1877                 writeq(0x0, &bar0->write_retry_delay);
1878         }
1879
1880         /*
1881          * Programming the Herc to split every write transaction
1882          * that does not start on an ADB to reduce disconnects.
1883          */
1884         if (nic->device_type == XFRAME_II_DEVICE) {
1885                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1886                         MISC_LINK_STABILITY_PRD(3);
1887                 writeq(val64, &bar0->misc_control);
1888                 val64 = readq(&bar0->pic_control2);
1889                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1890                 writeq(val64, &bar0->pic_control2);
1891         }
1892         if (strstr(nic->product_name, "CX4")) {
1893                 val64 = TMAC_AVG_IPG(0x17);
1894                 writeq(val64, &bar0->tmac_avg_ipg);
1895         }
1896
1897         return SUCCESS;
1898 }
1899 #define LINK_UP_DOWN_INTERRUPT          1
1900 #define MAC_RMAC_ERR_TIMER              2
1901
1902 static int s2io_link_fault_indication(struct s2io_nic *nic)
1903 {
1904         if (nic->device_type == XFRAME_II_DEVICE)
1905                 return LINK_UP_DOWN_INTERRUPT;
1906         else
1907                 return MAC_RMAC_ERR_TIMER;
1908 }
1909
1910 /**
1911  *  do_s2io_write_bits -  update alarm bits in alarm register
1912  *  @value: alarm bits
1913  *  @flag: interrupt status
1914  *  @addr: address value
1915  *  Description: update alarm bits in alarm register
1916  *  Return Value:
1917  *  NONE.
1918  */
1919 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1920 {
1921         u64 temp64;
1922
1923         temp64 = readq(addr);
1924
1925         if (flag == ENABLE_INTRS)
1926                 temp64 &= ~((u64)value);
1927         else
1928                 temp64 |= ((u64)value);
1929         writeq(temp64, addr);
1930 }
1931
1932 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1933 {
1934         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1935         register u64 gen_int_mask = 0;
1936         u64 interruptible;
1937
1938         writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1939         if (mask & TX_DMA_INTR) {
1940                 gen_int_mask |= TXDMA_INT_M;
1941
1942                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1943                                    TXDMA_PCC_INT | TXDMA_TTI_INT |
1944                                    TXDMA_LSO_INT | TXDMA_TPA_INT |
1945                                    TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1946
1947                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1948                                    PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1949                                    PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1950                                    &bar0->pfc_err_mask);
1951
1952                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1953                                    TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1954                                    TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1955
1956                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1957                                    PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1958                                    PCC_N_SERR | PCC_6_COF_OV_ERR |
1959                                    PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1960                                    PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1961                                    PCC_TXB_ECC_SG_ERR,
1962                                    flag, &bar0->pcc_err_mask);
1963
1964                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1965                                    TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1966
1967                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1968                                    LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1969                                    LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1970                                    flag, &bar0->lso_err_mask);
1971
1972                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1973                                    flag, &bar0->tpa_err_mask);
1974
1975                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1976         }
1977
1978         if (mask & TX_MAC_INTR) {
1979                 gen_int_mask |= TXMAC_INT_M;
1980                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1981                                    &bar0->mac_int_mask);
1982                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1983                                    TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1984                                    TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1985                                    flag, &bar0->mac_tmac_err_mask);
1986         }
1987
1988         if (mask & TX_XGXS_INTR) {
1989                 gen_int_mask |= TXXGXS_INT_M;
1990                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1991                                    &bar0->xgxs_int_mask);
1992                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1993                                    TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1994                                    flag, &bar0->xgxs_txgxs_err_mask);
1995         }
1996
1997         if (mask & RX_DMA_INTR) {
1998                 gen_int_mask |= RXDMA_INT_M;
1999                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
2000                                    RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
2001                                    flag, &bar0->rxdma_int_mask);
2002                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
2003                                    RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
2004                                    RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
2005                                    RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
2006                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
2007                                    PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
2008                                    PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2009                                    &bar0->prc_pcix_err_mask);
2010                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2011                                    RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2012                                    &bar0->rpa_err_mask);
2013                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2014                                    RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2015                                    RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2016                                    RDA_FRM_ECC_SG_ERR |
2017                                    RDA_MISC_ERR|RDA_PCIX_ERR,
2018                                    flag, &bar0->rda_err_mask);
2019                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2020                                    RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2021                                    flag, &bar0->rti_err_mask);
2022         }
2023
2024         if (mask & RX_MAC_INTR) {
2025                 gen_int_mask |= RXMAC_INT_M;
2026                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2027                                    &bar0->mac_int_mask);
2028                 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2029                                  RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2030                                  RMAC_DOUBLE_ECC_ERR);
2031                 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2032                         interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2033                 do_s2io_write_bits(interruptible,
2034                                    flag, &bar0->mac_rmac_err_mask);
2035         }
2036
2037         if (mask & RX_XGXS_INTR) {
2038                 gen_int_mask |= RXXGXS_INT_M;
2039                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2040                                    &bar0->xgxs_int_mask);
2041                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2042                                    &bar0->xgxs_rxgxs_err_mask);
2043         }
2044
2045         if (mask & MC_INTR) {
2046                 gen_int_mask |= MC_INT_M;
2047                 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2048                                    flag, &bar0->mc_int_mask);
2049                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2050                                    MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2051                                    &bar0->mc_err_mask);
2052         }
2053         nic->general_int_mask = gen_int_mask;
2054
2055         /* Remove this line when alarm interrupts are enabled */
2056         nic->general_int_mask = 0;
2057 }
2058
2059 /**
2060  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2061  *  @nic: device private variable,
2062  *  @mask: A mask indicating which Intr block must be modified and,
2063  *  @flag: A flag indicating whether to enable or disable the Intrs.
2064  *  Description: This function will either disable or enable the interrupts
2065  *  depending on the flag argument. The mask argument can be used to
2066  *  enable/disable any Intr block.
2067  *  Return Value: NONE.
2068  */
2069
2070 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2071 {
2072         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2073         register u64 temp64 = 0, intr_mask = 0;
2074
2075         intr_mask = nic->general_int_mask;
2076
2077         /*  Top level interrupt classification */
2078         /*  PIC Interrupts */
2079         if (mask & TX_PIC_INTR) {
2080                 /*  Enable PIC Intrs in the general intr mask register */
2081                 intr_mask |= TXPIC_INT_M;
2082                 if (flag == ENABLE_INTRS) {
2083                         /*
2084                          * If Hercules adapter enable GPIO otherwise
2085                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2086                          * interrupts for now.
2087                          * TODO
2088                          */
2089                         if (s2io_link_fault_indication(nic) ==
2090                             LINK_UP_DOWN_INTERRUPT) {
2091                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2092                                                    &bar0->pic_int_mask);
2093                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2094                                                    &bar0->gpio_int_mask);
2095                         } else
2096                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2097                 } else if (flag == DISABLE_INTRS) {
2098                         /*
2099                          * Disable PIC Intrs in the general
2100                          * intr mask register
2101                          */
2102                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2103                 }
2104         }
2105
2106         /*  Tx traffic interrupts */
2107         if (mask & TX_TRAFFIC_INTR) {
2108                 intr_mask |= TXTRAFFIC_INT_M;
2109                 if (flag == ENABLE_INTRS) {
2110                         /*
2111                          * Enable all the Tx side interrupts
2112                          * writing 0 Enables all 64 TX interrupt levels
2113                          */
2114                         writeq(0x0, &bar0->tx_traffic_mask);
2115                 } else if (flag == DISABLE_INTRS) {
2116                         /*
2117                          * Disable Tx Traffic Intrs in the general intr mask
2118                          * register.
2119                          */
2120                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2121                 }
2122         }
2123
2124         /*  Rx traffic interrupts */
2125         if (mask & RX_TRAFFIC_INTR) {
2126                 intr_mask |= RXTRAFFIC_INT_M;
2127                 if (flag == ENABLE_INTRS) {
2128                         /* writing 0 Enables all 8 RX interrupt levels */
2129                         writeq(0x0, &bar0->rx_traffic_mask);
2130                 } else if (flag == DISABLE_INTRS) {
2131                         /*
2132                          * Disable Rx Traffic Intrs in the general intr mask
2133                          * register.
2134                          */
2135                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2136                 }
2137         }
2138
2139         temp64 = readq(&bar0->general_int_mask);
2140         if (flag == ENABLE_INTRS)
2141                 temp64 &= ~((u64)intr_mask);
2142         else
2143                 temp64 = DISABLE_ALL_INTRS;
2144         writeq(temp64, &bar0->general_int_mask);
2145
2146         nic->general_int_mask = readq(&bar0->general_int_mask);
2147 }
2148
2149 /**
2150  *  verify_pcc_quiescent- Checks for PCC quiescent state
2151  *  Return: 1 If PCC is quiescence
2152  *          0 If PCC is not quiescence
2153  */
2154 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2155 {
2156         int ret = 0, herc;
2157         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2158         u64 val64 = readq(&bar0->adapter_status);
2159
2160         herc = (sp->device_type == XFRAME_II_DEVICE);
2161
2162         if (flag == false) {
2163                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2164                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2165                                 ret = 1;
2166                 } else {
2167                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2168                                 ret = 1;
2169                 }
2170         } else {
2171                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2172                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2173                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2174                                 ret = 1;
2175                 } else {
2176                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2177                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2178                                 ret = 1;
2179                 }
2180         }
2181
2182         return ret;
2183 }
2184 /**
2185  *  verify_xena_quiescence - Checks whether the H/W is ready
2186  *  Description: Returns whether the H/W is ready to go or not. Depending
2187  *  on whether adapter enable bit was written or not the comparison
2188  *  differs and the calling function passes the input argument flag to
2189  *  indicate this.
2190  *  Return: 1 If xena is quiescence
2191  *          0 If Xena is not quiescence
2192  */
2193
2194 static int verify_xena_quiescence(struct s2io_nic *sp)
2195 {
2196         int  mode;
2197         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2198         u64 val64 = readq(&bar0->adapter_status);
2199         mode = s2io_verify_pci_mode(sp);
2200
2201         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2202                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2203                 return 0;
2204         }
2205         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2206                 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2207                 return 0;
2208         }
2209         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2210                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2211                 return 0;
2212         }
2213         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2214                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2215                 return 0;
2216         }
2217         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2218                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2219                 return 0;
2220         }
2221         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2222                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2223                 return 0;
2224         }
2225         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2226                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2227                 return 0;
2228         }
2229         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2230                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2231                 return 0;
2232         }
2233
2234         /*
2235          * In PCI 33 mode, the P_PLL is not used, and therefore,
2236          * the the P_PLL_LOCK bit in the adapter_status register will
2237          * not be asserted.
2238          */
2239         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2240             sp->device_type == XFRAME_II_DEVICE &&
2241             mode != PCI_MODE_PCI_33) {
2242                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2243                 return 0;
2244         }
2245         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2246               ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2247                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2248                 return 0;
2249         }
2250         return 1;
2251 }
2252
2253 /**
2254  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2255  * @sp: Pointer to device specifc structure
2256  * Description :
2257  * New procedure to clear mac address reading  problems on Alpha platforms
2258  *
2259  */
2260
2261 static void fix_mac_address(struct s2io_nic *sp)
2262 {
2263         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2264         u64 val64;
2265         int i = 0;
2266
2267         while (fix_mac[i] != END_SIGN) {
2268                 writeq(fix_mac[i++], &bar0->gpio_control);
2269                 udelay(10);
2270                 val64 = readq(&bar0->gpio_control);
2271         }
2272 }
2273
2274 /**
2275  *  start_nic - Turns the device on
2276  *  @nic : device private variable.
2277  *  Description:
2278  *  This function actually turns the device on. Before this  function is
2279  *  called,all Registers are configured from their reset states
2280  *  and shared memory is allocated but the NIC is still quiescent. On
2281  *  calling this function, the device interrupts are cleared and the NIC is
2282  *  literally switched on by writing into the adapter control register.
2283  *  Return Value:
2284  *  SUCCESS on success and -1 on failure.
2285  */
2286
2287 static int start_nic(struct s2io_nic *nic)
2288 {
2289         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2290         struct net_device *dev = nic->dev;
2291         register u64 val64 = 0;
2292         u16 subid, i;
2293         struct mac_info *mac_control;
2294         struct config_param *config;
2295
2296         mac_control = &nic->mac_control;
2297         config = &nic->config;
2298
2299         /*  PRC Initialization and configuration */
2300         for (i = 0; i < config->rx_ring_num; i++) {
2301                 struct ring_info *ring = &mac_control->rings[i];
2302
2303                 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2304                        &bar0->prc_rxd0_n[i]);
2305
2306                 val64 = readq(&bar0->prc_ctrl_n[i]);
2307                 if (nic->rxd_mode == RXD_MODE_1)
2308                         val64 |= PRC_CTRL_RC_ENABLED;
2309                 else
2310                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2311                 if (nic->device_type == XFRAME_II_DEVICE)
2312                         val64 |= PRC_CTRL_GROUP_READS;
2313                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2314                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2315                 writeq(val64, &bar0->prc_ctrl_n[i]);
2316         }
2317
2318         if (nic->rxd_mode == RXD_MODE_3B) {
2319                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2320                 val64 = readq(&bar0->rx_pa_cfg);
2321                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2322                 writeq(val64, &bar0->rx_pa_cfg);
2323         }
2324
2325         if (vlan_tag_strip == 0) {
2326                 val64 = readq(&bar0->rx_pa_cfg);
2327                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2328                 writeq(val64, &bar0->rx_pa_cfg);
2329                 nic->vlan_strip_flag = 0;
2330         }
2331
2332         /*
2333          * Enabling MC-RLDRAM. After enabling the device, we timeout
2334          * for around 100ms, which is approximately the time required
2335          * for the device to be ready for operation.
2336          */
2337         val64 = readq(&bar0->mc_rldram_mrs);
2338         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2339         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2340         val64 = readq(&bar0->mc_rldram_mrs);
2341
2342         msleep(100);    /* Delay by around 100 ms. */
2343
2344         /* Enabling ECC Protection. */
2345         val64 = readq(&bar0->adapter_control);
2346         val64 &= ~ADAPTER_ECC_EN;
2347         writeq(val64, &bar0->adapter_control);
2348
2349         /*
2350          * Verify if the device is ready to be enabled, if so enable
2351          * it.
2352          */
2353         val64 = readq(&bar0->adapter_status);
2354         if (!verify_xena_quiescence(nic)) {
2355                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2356                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2357                           (unsigned long long)val64);
2358                 return FAILURE;
2359         }
2360
2361         /*
2362          * With some switches, link might be already up at this point.
2363          * Because of this weird behavior, when we enable laser,
2364          * we may not get link. We need to handle this. We cannot
2365          * figure out which switch is misbehaving. So we are forced to
2366          * make a global change.
2367          */
2368
2369         /* Enabling Laser. */
2370         val64 = readq(&bar0->adapter_control);
2371         val64 |= ADAPTER_EOI_TX_ON;
2372         writeq(val64, &bar0->adapter_control);
2373
2374         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2375                 /*
2376                  * Dont see link state interrupts initally on some switches,
2377                  * so directly scheduling the link state task here.
2378                  */
2379                 schedule_work(&nic->set_link_task);
2380         }
2381         /* SXE-002: Initialize link and activity LED */
2382         subid = nic->pdev->subsystem_device;
2383         if (((subid & 0xFF) >= 0x07) &&
2384             (nic->device_type == XFRAME_I_DEVICE)) {
2385                 val64 = readq(&bar0->gpio_control);
2386                 val64 |= 0x0000800000000000ULL;
2387                 writeq(val64, &bar0->gpio_control);
2388                 val64 = 0x0411040400000000ULL;
2389                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2390         }
2391
2392         return SUCCESS;
2393 }
2394 /**
2395  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2396  */
2397 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2398                                         struct TxD *txdlp, int get_off)
2399 {
2400         struct s2io_nic *nic = fifo_data->nic;
2401         struct sk_buff *skb;
2402         struct TxD *txds;
2403         u16 j, frg_cnt;
2404
2405         txds = txdlp;
2406         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2407                 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2408                                  sizeof(u64), PCI_DMA_TODEVICE);
2409                 txds++;
2410         }
2411
2412         skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2413         if (!skb) {
2414                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2415                 return NULL;
2416         }
2417         pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2418                          skb->len - skb->data_len, PCI_DMA_TODEVICE);
2419         frg_cnt = skb_shinfo(skb)->nr_frags;
2420         if (frg_cnt) {
2421                 txds++;
2422                 for (j = 0; j < frg_cnt; j++, txds++) {
2423                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2424                         if (!txds->Buffer_Pointer)
2425                                 break;
2426                         pci_unmap_page(nic->pdev,
2427                                        (dma_addr_t)txds->Buffer_Pointer,
2428                                        frag->size, PCI_DMA_TODEVICE);
2429                 }
2430         }
2431         memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2432         return skb;
2433 }
2434
2435 /**
2436  *  free_tx_buffers - Free all queued Tx buffers
2437  *  @nic : device private variable.
2438  *  Description:
2439  *  Free all queued Tx buffers.
2440  *  Return Value: void
2441  */
2442
2443 static void free_tx_buffers(struct s2io_nic *nic)
2444 {
2445         struct net_device *dev = nic->dev;
2446         struct sk_buff *skb;
2447         struct TxD *txdp;
2448         int i, j;
2449         struct mac_info *mac_control;
2450         struct config_param *config;
2451         int cnt = 0;
2452
2453         mac_control = &nic->mac_control;
2454         config = &nic->config;
2455
2456         for (i = 0; i < config->tx_fifo_num; i++) {
2457                 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2458                 struct fifo_info *fifo = &mac_control->fifos[i];
2459                 unsigned long flags;
2460
2461                 spin_lock_irqsave(&fifo->tx_lock, flags);
2462                 for (j = 0; j < tx_cfg->fifo_len; j++) {
2463                         txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
2464                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2465                         if (skb) {
2466                                 nic->mac_control.stats_info->sw_stat.mem_freed
2467                                         += skb->truesize;
2468                                 dev_kfree_skb(skb);
2469                                 cnt++;
2470                         }
2471                 }
2472                 DBG_PRINT(INTR_DBG,
2473                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2474                           dev->name, cnt, i);
2475                 fifo->tx_curr_get_info.offset = 0;
2476                 fifo->tx_curr_put_info.offset = 0;
2477                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2478         }
2479 }
2480
2481 /**
2482  *   stop_nic -  To stop the nic
2483  *   @nic ; device private variable.
2484  *   Description:
2485  *   This function does exactly the opposite of what the start_nic()
2486  *   function does. This function is called to stop the device.
2487  *   Return Value:
2488  *   void.
2489  */
2490
2491 static void stop_nic(struct s2io_nic *nic)
2492 {
2493         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2494         register u64 val64 = 0;
2495         u16 interruptible;
2496         struct mac_info *mac_control;
2497         struct config_param *config;
2498
2499         mac_control = &nic->mac_control;
2500         config = &nic->config;
2501
2502         /*  Disable all interrupts */
2503         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2504         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2505         interruptible |= TX_PIC_INTR;
2506         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2507
2508         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2509         val64 = readq(&bar0->adapter_control);
2510         val64 &= ~(ADAPTER_CNTL_EN);
2511         writeq(val64, &bar0->adapter_control);
2512 }
2513
2514 /**
2515  *  fill_rx_buffers - Allocates the Rx side skbs
2516  *  @ring_info: per ring structure
2517  *  @from_card_up: If this is true, we will map the buffer to get
2518  *     the dma address for buf0 and buf1 to give it to the card.
2519  *     Else we will sync the already mapped buffer to give it to the card.
2520  *  Description:
2521  *  The function allocates Rx side skbs and puts the physical
2522  *  address of these buffers into the RxD buffer pointers, so that the NIC
2523  *  can DMA the received frame into these locations.
2524  *  The NIC supports 3 receive modes, viz
2525  *  1. single buffer,
2526  *  2. three buffer and
2527  *  3. Five buffer modes.
2528  *  Each mode defines how many fragments the received frame will be split
2529  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2530  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2531  *  is split into 3 fragments. As of now only single buffer mode is
2532  *  supported.
2533  *   Return Value:
2534  *  SUCCESS on success or an appropriate -ve value on failure.
2535  */
2536 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2537                            int from_card_up)
2538 {
2539         struct sk_buff *skb;
2540         struct RxD_t *rxdp;
2541         int off, size, block_no, block_no1;
2542         u32 alloc_tab = 0;
2543         u32 alloc_cnt;
2544         u64 tmp;
2545         struct buffAdd *ba;
2546         struct RxD_t *first_rxdp = NULL;
2547         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2548         int rxd_index = 0;
2549         struct RxD1 *rxdp1;
2550         struct RxD3 *rxdp3;
2551         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2552
2553         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2554
2555         block_no1 = ring->rx_curr_get_info.block_index;
2556         while (alloc_tab < alloc_cnt) {
2557                 block_no = ring->rx_curr_put_info.block_index;
2558
2559                 off = ring->rx_curr_put_info.offset;
2560
2561                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2562
2563                 rxd_index = off + 1;
2564                 if (block_no)
2565                         rxd_index += (block_no * ring->rxd_count);
2566
2567                 if ((block_no == block_no1) &&
2568                     (off == ring->rx_curr_get_info.offset) &&
2569                     (rxdp->Host_Control)) {
2570                         DBG_PRINT(INTR_DBG, "%s: Get and Put", ring->dev->name);
2571                         DBG_PRINT(INTR_DBG, " info equated\n");
2572                         goto end;
2573                 }
2574                 if (off && (off == ring->rxd_count)) {
2575                         ring->rx_curr_put_info.block_index++;
2576                         if (ring->rx_curr_put_info.block_index ==
2577                             ring->block_count)
2578                                 ring->rx_curr_put_info.block_index = 0;
2579                         block_no = ring->rx_curr_put_info.block_index;
2580                         off = 0;
2581                         ring->rx_curr_put_info.offset = off;
2582                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2583                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2584                                   ring->dev->name, rxdp);
2585
2586                 }
2587
2588                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2589                     ((ring->rxd_mode == RXD_MODE_3B) &&
2590                      (rxdp->Control_2 & s2BIT(0)))) {
2591                         ring->rx_curr_put_info.offset = off;
2592                         goto end;
2593                 }
2594                 /* calculate size of skb based on ring mode */
2595                 size = ring->mtu +
2596                         HEADER_ETHERNET_II_802_3_SIZE +
2597                         HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2598                 if (ring->rxd_mode == RXD_MODE_1)
2599                         size += NET_IP_ALIGN;
2600                 else
2601                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2602
2603                 /* allocate skb */
2604                 skb = dev_alloc_skb(size);
2605                 if (!skb) {
2606                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2607                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2608                         if (first_rxdp) {
2609                                 wmb();
2610                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2611                         }
2612                         stats->mem_alloc_fail_cnt++;
2613
2614                         return -ENOMEM ;
2615                 }
2616                 stats->mem_allocated += skb->truesize;
2617
2618                 if (ring->rxd_mode == RXD_MODE_1) {
2619                         /* 1 buffer mode - normal operation mode */
2620                         rxdp1 = (struct RxD1 *)rxdp;
2621                         memset(rxdp, 0, sizeof(struct RxD1));
2622                         skb_reserve(skb, NET_IP_ALIGN);
2623                         rxdp1->Buffer0_ptr =
2624                                 pci_map_single(ring->pdev, skb->data,
2625                                                size - NET_IP_ALIGN,
2626                                                PCI_DMA_FROMDEVICE);
2627                         if (pci_dma_mapping_error(nic->pdev,
2628                                                   rxdp1->Buffer0_ptr))
2629                                 goto pci_map_failed;
2630
2631                         rxdp->Control_2 =
2632                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2633                         rxdp->Host_Control = (unsigned long)skb;
2634                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2635                         /*
2636                          * 2 buffer mode -
2637                          * 2 buffer mode provides 128
2638                          * byte aligned receive buffers.
2639                          */
2640
2641                         rxdp3 = (struct RxD3 *)rxdp;
2642                         /* save buffer pointers to avoid frequent dma mapping */
2643                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2644                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2645                         memset(rxdp, 0, sizeof(struct RxD3));
2646                         /* restore the buffer pointers for dma sync*/
2647                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2648                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2649
2650                         ba = &ring->ba[block_no][off];
2651                         skb_reserve(skb, BUF0_LEN);
2652                         tmp = (u64)(unsigned long)skb->data;
2653                         tmp += ALIGN_SIZE;
2654                         tmp &= ~ALIGN_SIZE;
2655                         skb->data = (void *) (unsigned long)tmp;
2656                         skb_reset_tail_pointer(skb);
2657
2658                         if (from_card_up) {
2659                                 rxdp3->Buffer0_ptr =
2660                                         pci_map_single(ring->pdev, ba->ba_0,
2661                                                        BUF0_LEN,
2662                                                        PCI_DMA_FROMDEVICE);
2663                                 if (pci_dma_mapping_error(nic->pdev,
2664                                                           rxdp3->Buffer0_ptr))
2665                                         goto pci_map_failed;
2666                         } else
2667                                 pci_dma_sync_single_for_device(ring->pdev,
2668                                                                (dma_addr_t)rxdp3->Buffer0_ptr,
2669                                                                BUF0_LEN,
2670                                                                PCI_DMA_FROMDEVICE);
2671
2672                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2673                         if (ring->rxd_mode == RXD_MODE_3B) {
2674                                 /* Two buffer mode */
2675
2676                                 /*
2677                                  * Buffer2 will have L3/L4 header plus
2678                                  * L4 payload
2679                                  */
2680                                 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2681                                                                     skb->data,
2682                                                                     ring->mtu + 4,
2683                                                                     PCI_DMA_FROMDEVICE);
2684
2685                                 if (pci_dma_mapping_error(nic->pdev,
2686                                                           rxdp3->Buffer2_ptr))
2687                                         goto pci_map_failed;
2688
2689                                 if (from_card_up) {
2690                                         rxdp3->Buffer1_ptr =
2691                                                 pci_map_single(ring->pdev,
2692                                                                ba->ba_1,
2693                                                                BUF1_LEN,
2694                                                                PCI_DMA_FROMDEVICE);
2695
2696                                         if (pci_dma_mapping_error(nic->pdev,
2697                                                                   rxdp3->Buffer1_ptr)) {
2698                                                 pci_unmap_single(ring->pdev,
2699                                                                  (dma_addr_t)(unsigned long)
2700                                                                  skb->data,
2701                                                                  ring->mtu + 4,
2702                                                                  PCI_DMA_FROMDEVICE);
2703                                                 goto pci_map_failed;
2704                                         }
2705                                 }
2706                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2707                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2708                                         (ring->mtu + 4);
2709                         }
2710                         rxdp->Control_2 |= s2BIT(0);
2711                         rxdp->Host_Control = (unsigned long) (skb);
2712                 }
2713                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2714                         rxdp->Control_1 |= RXD_OWN_XENA;
2715                 off++;
2716                 if (off == (ring->rxd_count + 1))
2717                         off = 0;
2718                 ring->rx_curr_put_info.offset = off;
2719
2720                 rxdp->Control_2 |= SET_RXD_MARKER;
2721                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2722                         if (first_rxdp) {
2723                                 wmb();
2724                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2725                         }
2726                         first_rxdp = rxdp;
2727                 }
2728                 ring->rx_bufs_left += 1;
2729                 alloc_tab++;
2730         }
2731
2732 end:
2733         /* Transfer ownership of first descriptor to adapter just before
2734          * exiting. Before that, use memory barrier so that ownership
2735          * and other fields are seen by adapter correctly.
2736          */
2737         if (first_rxdp) {
2738                 wmb();
2739                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2740         }
2741
2742         return SUCCESS;
2743
2744 pci_map_failed:
2745         stats->pci_map_fail_cnt++;
2746         stats->mem_freed += skb->truesize;
2747         dev_kfree_skb_irq(skb);
2748         return -ENOMEM;
2749 }
2750
2751 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2752 {
2753         struct net_device *dev = sp->dev;
2754         int j;
2755         struct sk_buff *skb;
2756         struct RxD_t *rxdp;
2757         struct mac_info *mac_control;
2758         struct buffAdd *ba;
2759         struct RxD1 *rxdp1;
2760         struct RxD3 *rxdp3;
2761
2762         mac_control = &sp->mac_control;
2763         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2764                 rxdp = mac_control->rings[ring_no].
2765                         rx_blocks[blk].rxds[j].virt_addr;
2766                 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2767                 if (!skb)
2768                         continue;
2769                 if (sp->rxd_mode == RXD_MODE_1) {
2770                         rxdp1 = (struct RxD1 *)rxdp;
2771                         pci_unmap_single(sp->pdev,
2772                                          (dma_addr_t)rxdp1->Buffer0_ptr,
2773                                          dev->mtu +
2774                                          HEADER_ETHERNET_II_802_3_SIZE +
2775                                          HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2776                                          PCI_DMA_FROMDEVICE);
2777                         memset(rxdp, 0, sizeof(struct RxD1));
2778                 } else if (sp->rxd_mode == RXD_MODE_3B) {
2779                         rxdp3 = (struct RxD3 *)rxdp;
2780                         ba = &mac_control->rings[ring_no].ba[blk][j];
2781                         pci_unmap_single(sp->pdev,
2782                                          (dma_addr_t)rxdp3->Buffer0_ptr,
2783                                          BUF0_LEN,
2784                                          PCI_DMA_FROMDEVICE);
2785                         pci_unmap_single(sp->pdev,
2786                                          (dma_addr_t)rxdp3->Buffer1_ptr,
2787                                          BUF1_LEN,
2788                                          PCI_DMA_FROMDEVICE);
2789                         pci_unmap_single(sp->pdev,
2790                                          (dma_addr_t)rxdp3->Buffer2_ptr,
2791                                          dev->mtu + 4,
2792                                          PCI_DMA_FROMDEVICE);
2793                         memset(rxdp, 0, sizeof(struct RxD3));
2794                 }
2795                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2796                 dev_kfree_skb(skb);
2797                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2798         }
2799 }
2800
2801 /**
2802  *  free_rx_buffers - Frees all Rx buffers
2803  *  @sp: device private variable.
2804  *  Description:
2805  *  This function will free all Rx buffers allocated by host.
2806  *  Return Value:
2807  *  NONE.
2808  */
2809
2810 static void free_rx_buffers(struct s2io_nic *sp)
2811 {
2812         struct net_device *dev = sp->dev;
2813         int i, blk = 0, buf_cnt = 0;
2814         struct mac_info *mac_control;
2815         struct config_param *config;
2816
2817         mac_control = &sp->mac_control;
2818         config = &sp->config;
2819
2820         for (i = 0; i < config->rx_ring_num; i++) {
2821                 struct ring_info *ring = &mac_control->rings[i];
2822
2823                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2824                         free_rxd_blk(sp, i, blk);
2825
2826                 ring->rx_curr_put_info.block_index = 0;
2827                 ring->rx_curr_get_info.block_index = 0;
2828                 ring->rx_curr_put_info.offset = 0;
2829                 ring->rx_curr_get_info.offset = 0;
2830                 ring->rx_bufs_left = 0;
2831                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2832                           dev->name, buf_cnt, i);
2833         }
2834 }
2835
2836 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2837 {
2838         if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2839                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2840                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2841         }
2842         return 0;
2843 }
2844
2845 /**
2846  * s2io_poll - Rx interrupt handler for NAPI support
2847  * @napi : pointer to the napi structure.
2848  * @budget : The number of packets that were budgeted to be processed
2849  * during  one pass through the 'Poll" function.
2850  * Description:
2851  * Comes into picture only if NAPI support has been incorporated. It does
2852  * the same thing that rx_intr_handler does, but not in a interrupt context
2853  * also It will process only a given number of packets.
2854  * Return value:
2855  * 0 on success and 1 if there are No Rx packets to be processed.
2856  */
2857
2858 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2859 {
2860         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2861         struct net_device *dev = ring->dev;
2862         struct config_param *config;
2863         struct mac_info *mac_control;
2864         int pkts_processed = 0;
2865         u8 __iomem *addr = NULL;
2866         u8 val8 = 0;
2867         struct s2io_nic *nic = netdev_priv(dev);
2868         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2869         int budget_org = budget;
2870
2871         config = &nic->config;
2872         mac_control = &nic->mac_control;
2873
2874         if (unlikely(!is_s2io_card_up(nic)))
2875                 return 0;
2876
2877         pkts_processed = rx_intr_handler(ring, budget);
2878         s2io_chk_rx_buffers(nic, ring);
2879
2880         if (pkts_processed < budget_org) {
2881                 napi_complete(napi);
2882                 /*Re Enable MSI-Rx Vector*/
2883                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2884                 addr += 7 - ring->ring_no;
2885                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2886                 writeb(val8, addr);
2887                 val8 = readb(addr);
2888         }
2889         return pkts_processed;
2890 }
2891
2892 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2893 {
2894         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2895         struct config_param *config;
2896         struct mac_info *mac_control;
2897         int pkts_processed = 0;
2898         int ring_pkts_processed, i;
2899         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2900         int budget_org = budget;
2901
2902         config = &nic->config;
2903         mac_control = &nic->mac_control;
2904
2905         if (unlikely(!is_s2io_card_up(nic)))
2906                 return 0;
2907
2908         for (i = 0; i < config->rx_ring_num; i++) {
2909                 struct ring_info *ring = &mac_control->rings[i];
2910                 ring_pkts_processed = rx_intr_handler(ring, budget);
2911                 s2io_chk_rx_buffers(nic, ring);
2912                 pkts_processed += ring_pkts_processed;
2913                 budget -= ring_pkts_processed;
2914                 if (budget <= 0)
2915                         break;
2916         }
2917         if (pkts_processed < budget_org) {
2918                 napi_complete(napi);
2919                 /* Re enable the Rx interrupts for the ring */
2920                 writeq(0, &bar0->rx_traffic_mask);
2921                 readl(&bar0->rx_traffic_mask);
2922         }
2923         return pkts_processed;
2924 }
2925
2926 #ifdef CONFIG_NET_POLL_CONTROLLER
2927 /**
2928  * s2io_netpoll - netpoll event handler entry point
2929  * @dev : pointer to the device structure.
2930  * Description:
2931  *      This function will be called by upper layer to check for events on the
2932  * interface in situations where interrupts are disabled. It is used for
2933  * specific in-kernel networking tasks, such as remote consoles and kernel
2934  * debugging over the network (example netdump in RedHat).
2935  */
2936 static void s2io_netpoll(struct net_device *dev)
2937 {
2938         struct s2io_nic *nic = netdev_priv(dev);
2939         struct mac_info *mac_control;
2940         struct config_param *config;
2941         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2942         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2943         int i;
2944
2945         if (pci_channel_offline(nic->pdev))
2946                 return;
2947
2948         disable_irq(dev->irq);
2949
2950         mac_control = &nic->mac_control;
2951         config = &nic->config;
2952
2953         writeq(val64, &bar0->rx_traffic_int);
2954         writeq(val64, &bar0->tx_traffic_int);
2955
2956         /* we need to free up the transmitted skbufs or else netpoll will
2957          * run out of skbs and will fail and eventually netpoll application such
2958          * as netdump will fail.
2959          */
2960         for (i = 0; i < config->tx_fifo_num; i++)
2961                 tx_intr_handler(&mac_control->fifos[i]);
2962
2963         /* check for received packet and indicate up to network */
2964         for (i = 0; i < config->rx_ring_num; i++) {
2965                 struct ring_info *ring = &mac_control->rings[i];
2966
2967                 rx_intr_handler(ring, 0);
2968         }
2969
2970         for (i = 0; i < config->rx_ring_num; i++) {
2971                 struct ring_info *ring = &mac_control->rings[i];
2972
2973                 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2974                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2975                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2976                         break;
2977                 }
2978         }
2979         enable_irq(dev->irq);
2980         return;
2981 }
2982 #endif
2983
2984 /**
2985  *  rx_intr_handler - Rx interrupt handler
2986  *  @ring_info: per ring structure.
2987  *  @budget: budget for napi processing.
2988  *  Description:
2989  *  If the interrupt is because of a received frame or if the
2990  *  receive ring contains fresh as yet un-processed frames,this function is
2991  *  called. It picks out the RxD at which place the last Rx processing had
2992  *  stopped and sends the skb to the OSM's Rx handler and then increments
2993  *  the offset.
2994  *  Return Value:
2995  *  No. of napi packets processed.
2996  */
2997 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2998 {
2999         int get_block, put_block;
3000         struct rx_curr_get_info get_info, put_info;
3001         struct RxD_t *rxdp;
3002         struct sk_buff *skb;
3003         int pkt_cnt = 0, napi_pkts = 0;
3004         int i;
3005         struct RxD1 *rxdp1;
3006         struct RxD3 *rxdp3;
3007
3008         get_info = ring_data->rx_curr_get_info;
3009         get_block = get_info.block_index;
3010         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
3011         put_block = put_info.block_index;
3012         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
3013
3014         while (RXD_IS_UP2DT(rxdp)) {
3015                 /*
3016                  * If your are next to put index then it's
3017                  * FIFO full condition
3018                  */
3019                 if ((get_block == put_block) &&
3020                     (get_info.offset + 1) == put_info.offset) {
3021                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
3022                                   ring_data->dev->name);
3023                         break;
3024                 }
3025                 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
3026                 if (skb == NULL) {
3027                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
3028                                   ring_data->dev->name);
3029                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3030                         return 0;
3031                 }
3032                 if (ring_data->rxd_mode == RXD_MODE_1) {
3033                         rxdp1 = (struct RxD1 *)rxdp;
3034                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3035                                          rxdp1->Buffer0_ptr,
3036                                          ring_data->mtu +
3037                                          HEADER_ETHERNET_II_802_3_SIZE +
3038                                          HEADER_802_2_SIZE +
3039                                          HEADER_SNAP_SIZE,
3040                                          PCI_DMA_FROMDEVICE);
3041                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3042                         rxdp3 = (struct RxD3 *)rxdp;
3043                         pci_dma_sync_single_for_cpu(ring_data->pdev,
3044                                                     (dma_addr_t)rxdp3->Buffer0_ptr,
3045                                                     BUF0_LEN,
3046                                                     PCI_DMA_FROMDEVICE);
3047                         pci_unmap_single(ring_data->pdev,
3048                                          (dma_addr_t)rxdp3->Buffer2_ptr,
3049                                          ring_data->mtu + 4,
3050                                          PCI_DMA_FROMDEVICE);
3051                 }
3052                 prefetch(skb->data);
3053                 rx_osm_handler(ring_data, rxdp);
3054                 get_info.offset++;
3055                 ring_data->rx_curr_get_info.offset = get_info.offset;
3056                 rxdp = ring_data->rx_blocks[get_block].
3057                         rxds[get_info.offset].virt_addr;
3058                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3059                         get_info.offset = 0;
3060                         ring_data->rx_curr_get_info.offset = get_info.offset;
3061                         get_block++;
3062                         if (get_block == ring_data->block_count)
3063                                 get_block = 0;
3064                         ring_data->rx_curr_get_info.block_index = get_block;
3065                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3066                 }
3067
3068                 if (ring_data->nic->config.napi) {
3069                         budget--;
3070                         napi_pkts++;
3071                         if (!budget)
3072                                 break;
3073                 }
3074                 pkt_cnt++;
3075                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3076                         break;
3077         }
3078         if (ring_data->lro) {
3079                 /* Clear all LRO sessions before exiting */
3080                 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
3081                         struct lro *lro = &ring_data->lro0_n[i];
3082                         if (lro->in_use) {
3083                                 update_L3L4_header(ring_data->nic, lro);
3084                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3085                                 clear_lro_session(lro);
3086                         }
3087                 }
3088         }
3089         return napi_pkts;
3090 }
3091
3092 /**
3093  *  tx_intr_handler - Transmit interrupt handler
3094  *  @nic : device private variable
3095  *  Description:
3096  *  If an interrupt was raised to indicate DMA complete of the
3097  *  Tx packet, this function is called. It identifies the last TxD
3098  *  whose buffer was freed and frees all skbs whose data have already
3099  *  DMA'ed into the NICs internal memory.
3100  *  Return Value:
3101  *  NONE
3102  */
3103
3104 static void tx_intr_handler(struct fifo_info *fifo_data)
3105 {
3106         struct s2io_nic *nic = fifo_data->nic;
3107         struct tx_curr_get_info get_info, put_info;
3108         struct sk_buff *skb = NULL;
3109         struct TxD *txdlp;
3110         int pkt_cnt = 0;
3111         unsigned long flags = 0;
3112         u8 err_mask;
3113
3114         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3115                 return;
3116
3117         get_info = fifo_data->tx_curr_get_info;
3118         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3119         txdlp = (struct TxD *)
3120                 fifo_data->list_info[get_info.offset].list_virt_addr;
3121         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3122                (get_info.offset != put_info.offset) &&
3123                (txdlp->Host_Control)) {
3124                 /* Check for TxD errors */
3125                 if (txdlp->Control_1 & TXD_T_CODE) {
3126                         unsigned long long err;
3127                         err = txdlp->Control_1 & TXD_T_CODE;
3128                         if (err & 0x1) {
3129                                 nic->mac_control.stats_info->sw_stat.
3130                                         parity_err_cnt++;
3131                         }
3132
3133                         /* update t_code statistics */
3134                         err_mask = err >> 48;
3135                         switch (err_mask) {
3136                         case 2:
3137                                 nic->mac_control.stats_info->sw_stat.
3138                                         tx_buf_abort_cnt++;
3139                                 break;
3140
3141                         case 3:
3142                                 nic->mac_control.stats_info->sw_stat.
3143                                         tx_desc_abort_cnt++;
3144                                 break;
3145
3146                         case 7:
3147                                 nic->mac_control.stats_info->sw_stat.
3148                                         tx_parity_err_cnt++;
3149                                 break;
3150
3151                         case 10:
3152                                 nic->mac_control.stats_info->sw_stat.
3153                                         tx_link_loss_cnt++;
3154                                 break;
3155
3156                         case 15:
3157                                 nic->mac_control.stats_info->sw_stat.
3158                                         tx_list_proc_err_cnt++;
3159                                 break;
3160                         }
3161                 }
3162
3163                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3164                 if (skb == NULL) {
3165                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3166                         DBG_PRINT(ERR_DBG, "%s: Null skb ", __func__);
3167                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3168                         return;
3169                 }
3170                 pkt_cnt++;
3171
3172                 /* Updating the statistics block */
3173                 nic->dev->stats.tx_bytes += skb->len;
3174                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3175                 dev_kfree_skb_irq(skb);
3176
3177                 get_info.offset++;
3178                 if (get_info.offset == get_info.fifo_len + 1)
3179                         get_info.offset = 0;
3180                 txdlp = (struct TxD *)
3181                         fifo_data->list_info[get_info.offset].list_virt_addr;
3182                 fifo_data->tx_curr_get_info.offset = get_info.offset;
3183         }
3184
3185         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3186
3187         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3188 }
3189
3190 /**
3191  *  s2io_mdio_write - Function to write in to MDIO registers
3192  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3193  *  @addr     : address value
3194  *  @value    : data value
3195  *  @dev      : pointer to net_device structure
3196  *  Description:
3197  *  This function is used to write values to the MDIO registers
3198  *  NONE
3199  */
3200 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3201                             struct net_device *dev)
3202 {
3203         u64 val64;
3204         struct s2io_nic *sp = netdev_priv(dev);
3205         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3206
3207         /* address transaction */
3208         val64 = MDIO_MMD_INDX_ADDR(addr) |
3209                 MDIO_MMD_DEV_ADDR(mmd_type) |
3210                 MDIO_MMS_PRT_ADDR(0x0);
3211         writeq(val64, &bar0->mdio_control);
3212         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3213         writeq(val64, &bar0->mdio_control);
3214         udelay(100);
3215
3216         /* Data transaction */
3217         val64 = MDIO_MMD_INDX_ADDR(addr) |
3218                 MDIO_MMD_DEV_ADDR(mmd_type) |
3219                 MDIO_MMS_PRT_ADDR(0x0) |
3220                 MDIO_MDIO_DATA(value) |
3221                 MDIO_OP(MDIO_OP_WRITE_TRANS);
3222         writeq(val64, &bar0->mdio_control);
3223         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3224         writeq(val64, &bar0->mdio_control);
3225         udelay(100);
3226
3227         val64 = MDIO_MMD_INDX_ADDR(addr) |
3228                 MDIO_MMD_DEV_ADDR(mmd_type) |
3229                 MDIO_MMS_PRT_ADDR(0x0) |
3230                 MDIO_OP(MDIO_OP_READ_TRANS);
3231         writeq(val64, &bar0->mdio_control);
3232         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3233         writeq(val64, &bar0->mdio_control);
3234         udelay(100);
3235 }
3236
3237 /**
3238  *  s2io_mdio_read - Function to write in to MDIO registers
3239  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3240  *  @addr     : address value
3241  *  @dev      : pointer to net_device structure
3242  *  Description:
3243  *  This function is used to read values to the MDIO registers
3244  *  NONE
3245  */
3246 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3247 {
3248         u64 val64 = 0x0;
3249         u64 rval64 = 0x0;
3250         struct s2io_nic *sp = netdev_priv(dev);
3251         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3252
3253         /* address transaction */
3254         val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3255                          | MDIO_MMD_DEV_ADDR(mmd_type)
3256                          | MDIO_MMS_PRT_ADDR(0x0));
3257         writeq(val64, &bar0->mdio_control);
3258         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3259         writeq(val64, &bar0->mdio_control);
3260         udelay(100);
3261
3262         /* Data transaction */
3263         val64 = MDIO_MMD_INDX_ADDR(addr) |
3264                 MDIO_MMD_DEV_ADDR(mmd_type) |
3265                 MDIO_MMS_PRT_ADDR(0x0) |
3266                 MDIO_OP(MDIO_OP_READ_TRANS);
3267         writeq(val64, &bar0->mdio_control);
3268         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3269         writeq(val64, &bar0->mdio_control);
3270         udelay(100);
3271
3272         /* Read the value from regs */
3273         rval64 = readq(&bar0->mdio_control);
3274         rval64 = rval64 & 0xFFFF0000;
3275         rval64 = rval64 >> 16;
3276         return rval64;
3277 }
3278
3279 /**
3280  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3281  *  @counter      : couter value to be updated
3282  *  @flag         : flag to indicate the status
3283  *  @type         : counter type
3284  *  Description:
3285  *  This function is to check the status of the xpak counters value
3286  *  NONE
3287  */
3288
3289 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3290                                   u16 flag, u16 type)
3291 {
3292         u64 mask = 0x3;
3293         u64 val64;
3294         int i;
3295         for (i = 0; i < index; i++)
3296                 mask = mask << 0x2;
3297
3298         if (flag > 0) {
3299                 *counter = *counter + 1;
3300                 val64 = *regs_stat & mask;
3301                 val64 = val64 >> (index * 0x2);
3302                 val64 = val64 + 1;
3303                 if (val64 == 3) {
3304                         switch (type) {
3305                         case 1:
3306                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3307                                           "service. Excessive temperatures may "
3308                                           "result in premature transceiver "
3309                                           "failure \n");
3310                                 break;
3311                         case 2:
3312                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3313                                           "service Excessive bias currents may "
3314                                           "indicate imminent laser diode "
3315                                           "failure \n");
3316                                 break;
3317                         case 3:
3318                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3319                                           "service Excessive laser output "
3320                                           "power may saturate far-end "
3321                                           "receiver\n");
3322                                 break;
3323                         default:
3324                                 DBG_PRINT(ERR_DBG,
3325                                           "Incorrect XPAK Alarm type\n");
3326                         }
3327                         val64 = 0x0;
3328                 }
3329                 val64 = val64 << (index * 0x2);
3330                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3331
3332         } else {
3333                 *regs_stat = *regs_stat & (~mask);
3334         }
3335 }
3336
3337 /**
3338  *  s2io_updt_xpak_counter - Function to update the xpak counters
3339  *  @dev         : pointer to net_device struct
3340  *  Description:
3341  *  This function is to upate the status of the xpak counters value
3342  *  NONE
3343  */
3344 static void s2io_updt_xpak_counter(struct net_device *dev)
3345 {
3346         u16 flag  = 0x0;
3347         u16 type  = 0x0;
3348         u16 val16 = 0x0;
3349         u64 val64 = 0x0;
3350         u64 addr  = 0x0;
3351
3352         struct s2io_nic *sp = netdev_priv(dev);
3353         struct stat_block *stat_info = sp->mac_control.stats_info;
3354
3355         /* Check the communication with the MDIO slave */
3356         addr = MDIO_CTRL1;
3357         val64 = 0x0;
3358         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3359         if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3360                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3361                           "Returned %llx\n", (unsigned long long)val64);
3362                 return;
3363         }
3364
3365         /* Check for the expected value of control reg 1 */
3366         if (val64 != MDIO_CTRL1_SPEED10G) {
3367                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3368                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x%x\n",
3369                           (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3370                 return;
3371         }
3372
3373         /* Loading the DOM register to MDIO register */
3374         addr = 0xA100;
3375         s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3376         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3377
3378         /* Reading the Alarm flags */
3379         addr = 0xA070;
3380         val64 = 0x0;
3381         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3382
3383         flag = CHECKBIT(val64, 0x7);
3384         type = 1;
3385         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3386                               &stat_info->xpak_stat.xpak_regs_stat,
3387                               0x0, flag, type);
3388
3389         if (CHECKBIT(val64, 0x6))
3390                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3391
3392         flag = CHECKBIT(val64, 0x3);
3393         type = 2;
3394         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3395                               &stat_info->xpak_stat.xpak_regs_stat,
3396                               0x2, flag, type);
3397
3398         if (CHECKBIT(val64, 0x2))
3399                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3400
3401         flag = CHECKBIT(val64, 0x1);
3402         type = 3;
3403         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3404                               &stat_info->xpak_stat.xpak_regs_stat,
3405                               0x4, flag, type);
3406
3407         if (CHECKBIT(val64, 0x0))
3408                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3409
3410         /* Reading the Warning flags */
3411         addr = 0xA074;
3412         val64 = 0x0;
3413         val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3414
3415         if (CHECKBIT(val64, 0x7))
3416                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3417
3418         if (CHECKBIT(val64, 0x6))
3419                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3420
3421         if (CHECKBIT(val64, 0x3))
3422                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3423
3424         if (CHECKBIT(val64, 0x2))
3425                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3426
3427         if (CHECKBIT(val64, 0x1))
3428                 stat_info->xpak_stat.warn_laser_output_power_high++;
3429
3430         if (CHECKBIT(val64, 0x0))
3431                 stat_info->xpak_stat.warn_laser_output_power_low++;
3432 }
3433
3434 /**
3435  *  wait_for_cmd_complete - waits for a command to complete.
3436  *  @sp : private member of the device structure, which is a pointer to the
3437  *  s2io_nic structure.
3438  *  Description: Function that waits for a command to Write into RMAC
3439  *  ADDR DATA registers to be completed and returns either success or
3440  *  error depending on whether the command was complete or not.
3441  *  Return value:
3442  *   SUCCESS on success and FAILURE on failure.
3443  */
3444
3445 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3446                                  int bit_state)
3447 {
3448         int ret = FAILURE, cnt = 0, delay = 1;
3449         u64 val64;
3450
3451         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3452                 return FAILURE;
3453
3454         do {
3455                 val64 = readq(addr);
3456                 if (bit_state == S2IO_BIT_RESET) {
3457                         if (!(val64 & busy_bit)) {
3458                                 ret = SUCCESS;
3459                                 break;
3460                         }
3461                 } else {
3462                         if (!(val64 & busy_bit)) {
3463                                 ret = SUCCESS;
3464                                 break;
3465                         }
3466                 }
3467
3468                 if (in_interrupt())
3469                         mdelay(delay);
3470                 else
3471                         msleep(delay);
3472
3473                 if (++cnt >= 10)
3474                         delay = 50;
3475         } while (cnt < 20);
3476         return ret;
3477 }
3478 /*
3479  * check_pci_device_id - Checks if the device id is supported
3480  * @id : device id
3481  * Description: Function to check if the pci device id is supported by driver.
3482  * Return value: Actual device id if supported else PCI_ANY_ID
3483  */
3484 static u16 check_pci_device_id(u16 id)
3485 {
3486         switch (id) {
3487         case PCI_DEVICE_ID_HERC_WIN:
3488         case PCI_DEVICE_ID_HERC_UNI:
3489                 return XFRAME_II_DEVICE;
3490         case PCI_DEVICE_ID_S2IO_UNI:
3491         case PCI_DEVICE_ID_S2IO_WIN:
3492                 return XFRAME_I_DEVICE;
3493         default:
3494                 return PCI_ANY_ID;
3495         }
3496 }
3497
3498 /**
3499  *  s2io_reset - Resets the card.
3500  *  @sp : private member of the device structure.
3501  *  Description: Function to Reset the card. This function then also
3502  *  restores the previously saved PCI configuration space registers as
3503  *  the card reset also resets the configuration space.
3504  *  Return value:
3505  *  void.
3506  */
3507
3508 static void s2io_reset(struct s2io_nic *sp)
3509 {
3510         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3511         u64 val64;
3512         u16 subid, pci_cmd;
3513         int i;
3514         u16 val16;
3515         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3516         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3517
3518         DBG_PRINT(INIT_DBG, "%s - Resetting XFrame card %s\n",
3519                   __func__, sp->dev->name);
3520
3521         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3522         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3523
3524         val64 = SW_RESET_ALL;
3525         writeq(val64, &bar0->sw_reset);
3526         if (strstr(sp->product_name, "CX4"))
3527                 msleep(750);
3528         msleep(250);
3529         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3530
3531                 /* Restore the PCI state saved during initialization. */
3532                 pci_restore_state(sp->pdev);
3533                 pci_read_config_word(sp->pdev, 0x2, &val16);
3534                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3535                         break;
3536                 msleep(200);
3537         }
3538
3539         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3540                 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3541
3542         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3543
3544         s2io_init_pci(sp);
3545
3546         /* Set swapper to enable I/O register access */
3547         s2io_set_swapper(sp);
3548
3549         /* restore mac_addr entries */
3550         do_s2io_restore_unicast_mc(sp);
3551
3552         /* Restore the MSIX table entries from local variables */
3553         restore_xmsi_data(sp);
3554
3555         /* Clear certain PCI/PCI-X fields after reset */
3556         if (sp->device_type == XFRAME_II_DEVICE) {
3557                 /* Clear "detected parity error" bit */
3558                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3559
3560                 /* Clearing PCIX Ecc status register */
3561                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3562
3563                 /* Clearing PCI_STATUS error reflected here */
3564                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3565         }
3566
3567         /* Reset device statistics maintained by OS */
3568         memset(&sp->stats, 0, sizeof(struct net_device_stats));
3569
3570         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3571         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3572         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3573         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3574         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3575         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3576         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3577         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3578         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3579         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3580         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3581         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3582         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3583         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3584         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3585         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3586         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3587         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3588         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3589
3590         /* SXE-002: Configure link and activity LED to turn it off */
3591         subid = sp->pdev->subsystem_device;
3592         if (((subid & 0xFF) >= 0x07) &&
3593             (sp->device_type == XFRAME_I_DEVICE)) {
3594                 val64 = readq(&bar0->gpio_control);
3595                 val64 |= 0x0000800000000000ULL;
3596                 writeq(val64, &bar0->gpio_control);
3597                 val64 = 0x0411040400000000ULL;
3598                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3599         }
3600
3601         /*
3602          * Clear spurious ECC interrupts that would have occured on
3603          * XFRAME II cards after reset.
3604          */
3605         if (sp->device_type == XFRAME_II_DEVICE) {
3606                 val64 = readq(&bar0->pcc_err_reg);
3607                 writeq(val64, &bar0->pcc_err_reg);
3608         }
3609
3610         sp->device_enabled_once = false;
3611 }
3612
3613 /**
3614  *  s2io_set_swapper - to set the swapper controle on the card
3615  *  @sp : private member of the device structure,
3616  *  pointer to the s2io_nic structure.
3617  *  Description: Function to set the swapper control on the card
3618  *  correctly depending on the 'endianness' of the system.
3619  *  Return value:
3620  *  SUCCESS on success and FAILURE on failure.
3621  */
3622
3623 static int s2io_set_swapper(struct s2io_nic *sp)
3624 {
3625         struct net_device *dev = sp->dev;
3626         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3627         u64 val64, valt, valr;
3628
3629         /*
3630          * Set proper endian settings and verify the same by reading
3631          * the PIF Feed-back register.
3632          */
3633
3634         val64 = readq(&bar0->pif_rd_swapper_fb);
3635         if (val64 != 0x0123456789ABCDEFULL) {
3636                 int i = 0;
3637                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3638                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3639                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3640                                 0};                     /* FE=0, SE=0 */
3641
3642                 while (i < 4) {
3643                         writeq(value[i], &bar0->swapper_ctrl);
3644                         val64 = readq(&bar0->pif_rd_swapper_fb);
3645                         if (val64 == 0x0123456789ABCDEFULL)
3646                                 break;
3647                         i++;
3648                 }
3649                 if (i == 4) {
3650                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3651                                   dev->name);
3652                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3653                                   (unsigned long long)val64);
3654                         return FAILURE;
3655                 }
3656                 valr = value[i];
3657         } else {
3658                 valr = readq(&bar0->swapper_ctrl);
3659         }
3660
3661         valt = 0x0123456789ABCDEFULL;
3662         writeq(valt, &bar0->xmsi_address);
3663         val64 = readq(&bar0->xmsi_address);
3664
3665         if (val64 != valt) {
3666                 int i = 0;
3667                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3668                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3669                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3670                                 0};                     /* FE=0, SE=0 */
3671
3672                 while (i < 4) {
3673                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3674                         writeq(valt, &bar0->xmsi_address);
3675                         val64 = readq(&bar0->xmsi_address);
3676                         if (val64 == valt)
3677                                 break;
3678                         i++;
3679                 }
3680                 if (i == 4) {
3681                         unsigned long long x = val64;
3682                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3683                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3684                         return FAILURE;
3685                 }
3686         }
3687         val64 = readq(&bar0->swapper_ctrl);
3688         val64 &= 0xFFFF000000000000ULL;
3689
3690 #ifdef __BIG_ENDIAN
3691         /*
3692          * The device by default set to a big endian format, so a
3693          * big endian driver need not set anything.
3694          */
3695         val64 |= (SWAPPER_CTRL_TXP_FE |
3696                   SWAPPER_CTRL_TXP_SE |
3697                   SWAPPER_CTRL_TXD_R_FE |
3698                   SWAPPER_CTRL_TXD_W_FE |
3699                   SWAPPER_CTRL_TXF_R_FE |
3700                   SWAPPER_CTRL_RXD_R_FE |
3701                   SWAPPER_CTRL_RXD_W_FE |
3702                   SWAPPER_CTRL_RXF_W_FE |
3703                   SWAPPER_CTRL_XMSI_FE |
3704                   SWAPPER_CTRL_STATS_FE |
3705                   SWAPPER_CTRL_STATS_SE);
3706         if (sp->config.intr_type == INTA)
3707                 val64 |= SWAPPER_CTRL_XMSI_SE;
3708         writeq(val64, &bar0->swapper_ctrl);
3709 #else
3710         /*
3711          * Initially we enable all bits to make it accessible by the
3712          * driver, then we selectively enable only those bits that
3713          * we want to set.
3714          */
3715         val64 |= (SWAPPER_CTRL_TXP_FE |
3716                   SWAPPER_CTRL_TXP_SE |
3717                   SWAPPER_CTRL_TXD_R_FE |
3718                   SWAPPER_CTRL_TXD_R_SE |
3719                   SWAPPER_CTRL_TXD_W_FE |
3720                   SWAPPER_CTRL_TXD_W_SE |
3721                   SWAPPER_CTRL_TXF_R_FE |
3722                   SWAPPER_CTRL_RXD_R_FE |
3723                   SWAPPER_CTRL_RXD_R_SE |
3724                   SWAPPER_CTRL_RXD_W_FE |
3725                   SWAPPER_CTRL_RXD_W_SE |
3726                   SWAPPER_CTRL_RXF_W_FE |
3727                   SWAPPER_CTRL_XMSI_FE |
3728                   SWAPPER_CTRL_STATS_FE |
3729                   SWAPPER_CTRL_STATS_SE);
3730         if (sp->config.intr_type == INTA)
3731                 val64 |= SWAPPER_CTRL_XMSI_SE;
3732         writeq(val64, &bar0->swapper_ctrl);
3733 #endif
3734         val64 = readq(&bar0->swapper_ctrl);
3735
3736         /*
3737          * Verifying if endian settings are accurate by reading a
3738          * feedback register.
3739          */
3740         val64 = readq(&bar0->pif_rd_swapper_fb);
3741         if (val64 != 0x0123456789ABCDEFULL) {
3742                 /* Endian settings are incorrect, calls for another dekko. */
3743                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3744                           dev->name);
3745                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3746                           (unsigned long long)val64);
3747                 return FAILURE;
3748         }
3749
3750         return SUCCESS;
3751 }
3752
3753 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3754 {
3755         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3756         u64 val64;
3757         int ret = 0, cnt = 0;
3758
3759         do {
3760                 val64 = readq(&bar0->xmsi_access);
3761                 if (!(val64 & s2BIT(15)))
3762                         break;
3763                 mdelay(1);
3764                 cnt++;
3765         } while (cnt < 5);
3766         if (cnt == 5) {
3767                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3768                 ret = 1;
3769         }
3770
3771         return ret;
3772 }
3773
3774 static void restore_xmsi_data(struct s2io_nic *nic)
3775 {
3776         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3777         u64 val64;
3778         int i, msix_index;
3779
3780         if (nic->device_type == XFRAME_I_DEVICE)
3781                 return;
3782
3783         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3784                 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3785                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3786                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3787                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3788                 writeq(val64, &bar0->xmsi_access);
3789                 if (wait_for_msix_trans(nic, msix_index)) {
3790                         DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3791                         continue;
3792                 }
3793         }
3794 }
3795
3796 static void store_xmsi_data(struct s2io_nic *nic)
3797 {
3798         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3799         u64 val64, addr, data;
3800         int i, msix_index;
3801
3802         if (nic->device_type == XFRAME_I_DEVICE)
3803                 return;
3804
3805         /* Store and display */
3806         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3807                 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3808                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3809                 writeq(val64, &bar0->xmsi_access);
3810                 if (wait_for_msix_trans(nic, msix_index)) {
3811                         DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3812                         continue;
3813                 }
3814                 addr = readq(&bar0->xmsi_address);
3815                 data = readq(&bar0->xmsi_data);
3816                 if (addr && data) {
3817                         nic->msix_info[i].addr = addr;
3818                         nic->msix_info[i].data = data;
3819                 }
3820         }
3821 }
3822
3823 static int s2io_enable_msi_x(struct s2io_nic *nic)
3824 {
3825         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3826         u64 rx_mat;
3827         u16 msi_control; /* Temp variable */
3828         int ret, i, j, msix_indx = 1;
3829         int size;
3830
3831         size = nic->num_entries * sizeof(struct msix_entry);
3832         nic->entries = kzalloc(size, GFP_KERNEL);
3833         if (!nic->entries) {
3834                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3835                           __func__);
3836                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3837                 return -ENOMEM;
3838         }
3839         nic->mac_control.stats_info->sw_stat.mem_allocated += size;
3840
3841         size = nic->num_entries * sizeof(struct s2io_msix_entry);
3842         nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3843         if (!nic->s2io_entries) {
3844                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3845                           __func__);
3846                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3847                 kfree(nic->entries);
3848                 nic->mac_control.stats_info->sw_stat.mem_freed
3849                         += (nic->num_entries * sizeof(struct msix_entry));
3850                 return -ENOMEM;
3851         }
3852         nic->mac_control.stats_info->sw_stat.mem_allocated += size;
3853
3854         nic->entries[0].entry = 0;
3855         nic->s2io_entries[0].entry = 0;
3856         nic->s2io_entries[0].in_use = MSIX_FLG;
3857         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3858         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3859
3860         for (i = 1; i < nic->num_entries; i++) {
3861                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3862                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3863                 nic->s2io_entries[i].arg = NULL;
3864                 nic->s2io_entries[i].in_use = 0;
3865         }
3866
3867         rx_mat = readq(&bar0->rx_mat);
3868         for (j = 0; j < nic->config.rx_ring_num; j++) {
3869                 rx_mat |= RX_MAT_SET(j, msix_indx);
3870                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3871                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3872                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3873                 msix_indx += 8;
3874         }
3875         writeq(rx_mat, &bar0->rx_mat);
3876         readq(&bar0->rx_mat);
3877
3878         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3879         /* We fail init if error or we get less vectors than min required */
3880         if (ret) {
3881                 DBG_PRINT(ERR_DBG, "s2io: Enabling MSI-X failed\n");
3882                 kfree(nic->entries);
3883                 nic->mac_control.stats_info->sw_stat.mem_freed
3884                         += (nic->num_entries * sizeof(struct msix_entry));
3885                 kfree(nic->s2io_entries);
3886                 nic->mac_control.stats_info->sw_stat.mem_freed
3887                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3888                 nic->entries = NULL;
3889                 nic->s2io_entries = NULL;
3890                 return -ENOMEM;
3891         }
3892
3893         /*
3894          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3895          * in the herc NIC. (Temp change, needs to be removed later)
3896          */
3897         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3898         msi_control |= 0x1; /* Enable MSI */
3899         pci_write_config_word(nic->pdev, 0x42, msi_control);
3900
3901         return 0;
3902 }
3903
3904 /* Handle software interrupt used during MSI(X) test */
3905 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3906 {
3907         struct s2io_nic *sp = dev_id;
3908
3909         sp->msi_detected = 1;
3910         wake_up(&sp->msi_wait);
3911
3912         return IRQ_HANDLED;
3913 }
3914
3915 /* Test interrupt path by forcing a a software IRQ */
3916 static int s2io_test_msi(struct s2io_nic *sp)
3917 {
3918         struct pci_dev *pdev = sp->pdev;
3919         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3920         int err;
3921         u64 val64, saved64;
3922
3923         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3924                           sp->name, sp);
3925         if (err) {
3926                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3927                           sp->dev->name, pci_name(pdev), pdev->irq);
3928                 return err;
3929         }
3930
3931         init_waitqueue_head(&sp->msi_wait);
3932         sp->msi_detected = 0;
3933
3934         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3935         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3936         val64 |= SCHED_INT_CTRL_TIMER_EN;
3937         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3938         writeq(val64, &bar0->scheduled_int_ctrl);
3939
3940         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3941
3942         if (!sp->msi_detected) {
3943                 /* MSI(X) test failed, go back to INTx mode */
3944                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3945                           "using MSI(X) during test\n", sp->dev->name,
3946                           pci_name(pdev));
3947
3948                 err = -EOPNOTSUPP;
3949         }
3950
3951         free_irq(sp->entries[1].vector, sp);
3952
3953         writeq(saved64, &bar0->scheduled_int_ctrl);
3954
3955         return err;
3956 }
3957
3958 static void remove_msix_isr(struct s2io_nic *sp)
3959 {
3960         int i;
3961         u16 msi_control;
3962
3963         for (i = 0; i < sp->num_entries; i++) {
3964                 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3965                         int vector = sp->entries[i].vector;
3966                         void *arg = sp->s2io_entries[i].arg;
3967                         free_irq(vector, arg);
3968                 }
3969         }
3970
3971         kfree(sp->entries);
3972         kfree(sp->s2io_entries);
3973         sp->entries = NULL;
3974         sp->s2io_entries = NULL;
3975
3976         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3977         msi_control &= 0xFFFE; /* Disable MSI */
3978         pci_write_config_word(sp->pdev, 0x42, msi_control);
3979
3980         pci_disable_msix(sp->pdev);
3981 }
3982
3983 static void remove_inta_isr(struct s2io_nic *sp)
3984 {
3985         struct net_device *dev = sp->dev;
3986
3987         free_irq(sp->pdev->irq, dev);
3988 }
3989
3990 /* ********************************************************* *
3991  * Functions defined below concern the OS part of the driver *
3992  * ********************************************************* */
3993
3994 /**
3995  *  s2io_open - open entry point of the driver
3996  *  @dev : pointer to the device structure.
3997  *  Description:
3998  *  This function is the open entry point of the driver. It mainly calls a
3999  *  function to allocate Rx buffers and inserts them into the buffer
4000  *  descriptors and then enables the Rx part of the NIC.
4001  *  Return value:
4002  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4003  *   file on failure.
4004  */
4005
4006 static int s2io_open(struct net_device *dev)
4007 {
4008         struct s2io_nic *sp = netdev_priv(dev);
4009         int err = 0;
4010
4011         /*
4012          * Make sure you have link off by default every time
4013          * Nic is initialized
4014          */
4015         netif_carrier_off(dev);
4016         sp->last_link_state = 0;
4017
4018         /* Initialize H/W and enable interrupts */
4019         err = s2io_card_up(sp);
4020         if (err) {
4021                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4022                           dev->name);
4023                 goto hw_init_failed;
4024         }
4025
4026         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4027                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4028                 s2io_card_down(sp);
4029                 err = -ENODEV;
4030                 goto hw_init_failed;
4031         }
4032         s2io_start_all_tx_queue(sp);
4033         return 0;
4034
4035 hw_init_failed:
4036         if (sp->config.intr_type == MSI_X) {
4037                 if (sp->entries) {
4038                         kfree(sp->entries);
4039                         sp->mac_control.stats_info->sw_stat.mem_freed
4040                                 += (sp->num_entries * sizeof(struct msix_entry));
4041                 }
4042                 if (sp->s2io_entries) {
4043                         kfree(sp->s2io_entries);
4044                         sp->mac_control.stats_info->sw_stat.mem_freed
4045                                 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4046                 }
4047         }
4048         return err;
4049 }
4050
4051 /**
4052  *  s2io_close -close entry point of the driver
4053  *  @dev : device pointer.
4054  *  Description:
4055  *  This is the stop entry point of the driver. It needs to undo exactly
4056  *  whatever was done by the open entry point,thus it's usually referred to
4057  *  as the close function.Among other things this function mainly stops the
4058  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4059  *  Return value:
4060  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4061  *  file on failure.
4062  */
4063
4064 static int s2io_close(struct net_device *dev)
4065 {
4066         struct s2io_nic *sp = netdev_priv(dev);
4067         struct config_param *config = &sp->config;
4068         u64 tmp64;
4069         int offset;
4070
4071         /* Return if the device is already closed               *
4072          *  Can happen when s2io_card_up failed in change_mtu    *
4073          */
4074         if (!is_s2io_card_up(sp))
4075                 return 0;
4076
4077         s2io_stop_all_tx_queue(sp);
4078         /* delete all populated mac entries */
4079         for (offset = 1; offset < config->max_mc_addr; offset++) {
4080                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4081                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4082                         do_s2io_delete_unicast_mc(sp, tmp64);
4083         }
4084
4085         s2io_card_down(sp);
4086
4087         return 0;
4088 }
4089
4090 /**
4091  *  s2io_xmit - Tx entry point of te driver
4092  *  @skb : the socket buffer containing the Tx data.
4093  *  @dev : device pointer.
4094  *  Description :
4095  *  This function is the Tx entry point of the driver. S2IO NIC supports
4096  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4097  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4098  *  not be upadted.
4099  *  Return value:
4100  *  0 on success & 1 on failure.
4101  */
4102
4103 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4104 {
4105         struct s2io_nic *sp = netdev_priv(dev);
4106         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4107         register u64 val64;
4108         struct TxD *txdp;
4109         struct TxFIFO_element __iomem *tx_fifo;
4110         unsigned long flags = 0;
4111         u16 vlan_tag = 0;
4112         struct fifo_info *fifo = NULL;
4113         struct mac_info *mac_control;
4114         struct config_param *config;
4115         int do_spin_lock = 1;
4116         int offload_type;
4117         int enable_per_list_interrupt = 0;
4118         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4119
4120         mac_control = &sp->mac_control;
4121         config = &sp->config;
4122
4123         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4124
4125         if (unlikely(skb->len <= 0)) {
4126                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4127                 dev_kfree_skb_any(skb);
4128                 return NETDEV_TX_OK;
4129         }
4130
4131         if (!is_s2io_card_up(sp)) {
4132                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4133                           dev->name);
4134                 dev_kfree_skb(skb);
4135                 return NETDEV_TX_OK;
4136         }
4137
4138         queue = 0;
4139         if (sp->vlgrp && vlan_tx_tag_present(skb))
4140                 vlan_tag = vlan_tx_tag_get(skb);
4141         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4142                 if (skb->protocol == htons(ETH_P_IP)) {
4143                         struct iphdr *ip;
4144                         struct tcphdr *th;
4145                         ip = ip_hdr(skb);
4146
4147                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4148                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4149                                                        ip->ihl*4);
4150
4151                                 if (ip->protocol == IPPROTO_TCP) {
4152                                         queue_len = sp->total_tcp_fifos;
4153                                         queue = (ntohs(th->source) +
4154                                                  ntohs(th->dest)) &
4155                                                 sp->fifo_selector[queue_len - 1];
4156                                         if (queue >= queue_len)
4157                                                 queue = queue_len - 1;
4158                                 } else if (ip->protocol == IPPROTO_UDP) {
4159                                         queue_len = sp->total_udp_fifos;
4160                                         queue = (ntohs(th->source) +
4161                                                  ntohs(th->dest)) &
4162                                                 sp->fifo_selector[queue_len - 1];
4163                                         if (queue >= queue_len)
4164                                                 queue = queue_len - 1;
4165                                         queue += sp->udp_fifo_idx;
4166                                         if (skb->len > 1024)
4167                                                 enable_per_list_interrupt = 1;
4168                                         do_spin_lock = 0;
4169                                 }
4170                         }
4171                 }
4172         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4173                 /* get fifo number based on skb->priority value */
4174                 queue = config->fifo_mapping
4175                         [skb->priority & (MAX_TX_FIFOS - 1)];
4176         fifo = &mac_control->fifos[queue];
4177
4178         if (do_spin_lock)
4179                 spin_lock_irqsave(&fifo->tx_lock, flags);
4180         else {
4181                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4182                         return NETDEV_TX_LOCKED;
4183         }
4184
4185         if (sp->config.multiq) {
4186                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4187                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4188                         return NETDEV_TX_BUSY;
4189                 }
4190         } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4191                 if (netif_queue_stopped(dev)) {
4192                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4193                         return NETDEV_TX_BUSY;
4194                 }
4195         }
4196
4197         put_off = (u16)fifo->tx_curr_put_info.offset;
4198         get_off = (u16)fifo->tx_curr_get_info.offset;
4199         txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
4200
4201         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4202         /* Avoid "put" pointer going beyond "get" pointer */
4203         if (txdp->Host_Control ||
4204             ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4205                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4206                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4207                 dev_kfree_skb(skb);
4208                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4209                 return NETDEV_TX_OK;
4210         }
4211
4212         offload_type = s2io_offload_type(skb);
4213         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4214                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4215                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4216         }
4217         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4218                 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4219                                     TXD_TX_CKO_TCP_EN |
4220                                     TXD_TX_CKO_UDP_EN);
4221         }
4222         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4223         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4224         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4225         if (enable_per_list_interrupt)
4226                 if (put_off & (queue_len >> 5))
4227                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4228         if (vlan_tag) {
4229                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4230                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4231         }
4232
4233         frg_len = skb->len - skb->data_len;
4234         if (offload_type == SKB_GSO_UDP) {
4235                 int ufo_size;
4236
4237                 ufo_size = s2io_udp_mss(skb);
4238                 ufo_size &= ~7;
4239                 txdp->Control_1 |= TXD_UFO_EN;
4240                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4241                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4242 #ifdef __BIG_ENDIAN
4243                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4244                 fifo->ufo_in_band_v[put_off] =
4245                         (__force u64)skb_shinfo(skb)->ip6_frag_id;
4246 #else
4247                 fifo->ufo_in_band_v[put_off] =
4248                         (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4249 #endif
4250                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4251                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4252                                                       fifo->ufo_in_band_v,
4253                                                       sizeof(u64),
4254                                                       PCI_DMA_TODEVICE);
4255                 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4256                         goto pci_map_failed;
4257                 txdp++;
4258         }
4259
4260         txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4261                                               frg_len, PCI_DMA_TODEVICE);
4262         if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4263                 goto pci_map_failed;
4264
4265         txdp->Host_Control = (unsigned long)skb;
4266         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4267         if (offload_type == SKB_GSO_UDP)
4268                 txdp->Control_1 |= TXD_UFO_EN;
4269
4270         frg_cnt = skb_shinfo(skb)->nr_frags;
4271         /* For fragmented SKB. */
4272         for (i = 0; i < frg_cnt; i++) {
4273                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4274                 /* A '0' length fragment will be ignored */
4275                 if (!frag->size)
4276                         continue;
4277                 txdp++;
4278                 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4279                                                          frag->page_offset,
4280                                                          frag->size,
4281                                                          PCI_DMA_TODEVICE);
4282                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4283                 if (offload_type == SKB_GSO_UDP)
4284                         txdp->Control_1 |= TXD_UFO_EN;
4285         }
4286         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4287
4288         if (offload_type == SKB_GSO_UDP)
4289                 frg_cnt++; /* as Txd0 was used for inband header */
4290
4291         tx_fifo = mac_control->tx_FIFO_start[queue];
4292         val64 = fifo->list_info[put_off].list_phy_addr;
4293         writeq(val64, &tx_fifo->TxDL_Pointer);
4294
4295         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4296                  TX_FIFO_LAST_LIST);
4297         if (offload_type)
4298                 val64 |= TX_FIFO_SPECIAL_FUNC;
4299
4300         writeq(val64, &tx_fifo->List_Control);
4301
4302         mmiowb();
4303
4304         put_off++;
4305         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4306                 put_off = 0;
4307         fifo->tx_curr_put_info.offset = put_off;
4308
4309         /* Avoid "put" pointer going beyond "get" pointer */
4310         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4311                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4312                 DBG_PRINT(TX_DBG,
4313                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4314                           put_off, get_off);
4315                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4316         }
4317         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4318         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4319
4320         if (sp->config.intr_type == MSI_X)
4321                 tx_intr_handler(fifo);
4322
4323         return NETDEV_TX_OK;
4324 pci_map_failed:
4325         stats->pci_map_fail_cnt++;
4326         s2io_stop_tx_queue(sp, fifo->fifo_no);
4327         stats->mem_freed += skb->truesize;
4328         dev_kfree_skb(skb);
4329         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4330         return NETDEV_TX_OK;
4331 }
4332
4333 static void
4334 s2io_alarm_handle(unsigned long data)
4335 {
4336         struct s2io_nic *sp = (struct s2io_nic *)data;
4337         struct net_device *dev = sp->dev;
4338
4339         s2io_handle_errors(dev);
4340         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4341 }
4342
4343 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4344 {
4345         struct ring_info *ring = (struct ring_info *)dev_id;
4346         struct s2io_nic *sp = ring->nic;
4347         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4348
4349         if (unlikely(!is_s2io_card_up(sp)))
4350                 return IRQ_HANDLED;
4351
4352         if (sp->config.napi) {
4353                 u8 __iomem *addr = NULL;
4354                 u8 val8 = 0;
4355
4356                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4357                 addr += (7 - ring->ring_no);
4358                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4359                 writeb(val8, addr);
4360                 val8 = readb(addr);
4361                 napi_schedule(&ring->napi);
4362         } else {
4363                 rx_intr_handler(ring, 0);
4364                 s2io_chk_rx_buffers(sp, ring);
4365         }
4366
4367         return IRQ_HANDLED;
4368 }
4369
4370 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4371 {
4372         int i;
4373         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4374         struct s2io_nic *sp = fifos->nic;
4375         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4376         struct config_param *config  = &sp->config;
4377         u64 reason;
4378
4379         if (unlikely(!is_s2io_card_up(sp)))
4380                 return IRQ_NONE;
4381
4382         reason = readq(&bar0->general_int_status);
4383         if (unlikely(reason == S2IO_MINUS_ONE))
4384                 /* Nothing much can be done. Get out */
4385                 return IRQ_HANDLED;
4386
4387         if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4388                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4389
4390                 if (reason & GEN_INTR_TXPIC)
4391                         s2io_txpic_intr_handle(sp);
4392
4393                 if (reason & GEN_INTR_TXTRAFFIC)
4394                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4395
4396                 for (i = 0; i < config->tx_fifo_num; i++)
4397                         tx_intr_handler(&fifos[i]);
4398
4399                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4400                 readl(&bar0->general_int_status);
4401                 return IRQ_HANDLED;
4402         }
4403         /* The interrupt was not raised by us */
4404         return IRQ_NONE;
4405 }
4406
4407 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4408 {
4409         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4410         u64 val64;
4411
4412         val64 = readq(&bar0->pic_int_status);
4413         if (val64 & PIC_INT_GPIO) {
4414                 val64 = readq(&bar0->gpio_int_reg);
4415                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4416                     (val64 & GPIO_INT_REG_LINK_UP)) {
4417                         /*
4418                          * This is unstable state so clear both up/down
4419                          * interrupt and adapter to re-evaluate the link state.
4420                          */
4421                         val64 |= GPIO_INT_REG_LINK_DOWN;
4422                         val64 |= GPIO_INT_REG_LINK_UP;
4423                         writeq(val64, &bar0->gpio_int_reg);
4424                         val64 = readq(&bar0->gpio_int_mask);
4425                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4426                                    GPIO_INT_MASK_LINK_DOWN);
4427                         writeq(val64, &bar0->gpio_int_mask);
4428                 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4429                         val64 = readq(&bar0->adapter_status);
4430                         /* Enable Adapter */
4431                         val64 = readq(&bar0->adapter_control);
4432                         val64 |= ADAPTER_CNTL_EN;
4433                         writeq(val64, &bar0->adapter_control);
4434                         val64 |= ADAPTER_LED_ON;
4435                         writeq(val64, &bar0->adapter_control);
4436                         if (!sp->device_enabled_once)
4437                                 sp->device_enabled_once = 1;
4438
4439                         s2io_link(sp, LINK_UP);
4440                         /*
4441                          * unmask link down interrupt and mask link-up
4442                          * intr
4443                          */
4444                         val64 = readq(&bar0->gpio_int_mask);
4445                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4446                         val64 |= GPIO_INT_MASK_LINK_UP;
4447                         writeq(val64, &bar0->gpio_int_mask);
4448
4449                 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4450                         val64 = readq(&bar0->adapter_status);
4451                         s2io_link(sp, LINK_DOWN);
4452                         /* Link is down so unmaks link up interrupt */
4453                         val64 = readq(&bar0->gpio_int_mask);
4454                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4455                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4456                         writeq(val64, &bar0->gpio_int_mask);
4457
4458                         /* turn off LED */
4459                         val64 = readq(&bar0->adapter_control);
4460                         val64 = val64 & (~ADAPTER_LED_ON);
4461                         writeq(val64, &bar0->adapter_control);
4462                 }
4463         }
4464         val64 = readq(&bar0->gpio_int_mask);
4465 }
4466
4467 /**
4468  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4469  *  @value: alarm bits
4470  *  @addr: address value
4471  *  @cnt: counter variable
4472  *  Description: Check for alarm and increment the counter
4473  *  Return Value:
4474  *  1 - if alarm bit set
4475  *  0 - if alarm bit is not set
4476  */
4477 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4478                                  unsigned long long *cnt)
4479 {
4480         u64 val64;
4481         val64 = readq(addr);
4482         if (val64 & value) {
4483                 writeq(val64, addr);
4484                 (*cnt)++;
4485                 return 1;
4486         }
4487         return 0;
4488
4489 }
4490
4491 /**
4492  *  s2io_handle_errors - Xframe error indication handler
4493  *  @nic: device private variable
4494  *  Description: Handle alarms such as loss of link, single or
4495  *  double ECC errors, critical and serious errors.
4496  *  Return Value:
4497  *  NONE
4498  */
4499 static void s2io_handle_errors(void *dev_id)
4500 {
4501         struct net_device *dev = (struct net_device *)dev_id;
4502         struct s2io_nic *sp = netdev_priv(dev);
4503         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4504         u64 temp64 = 0, val64 = 0;
4505         int i = 0;
4506
4507         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4508         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4509
4510         if (!is_s2io_card_up(sp))
4511                 return;
4512
4513         if (pci_channel_offline(sp->pdev))
4514                 return;
4515
4516         memset(&sw_stat->ring_full_cnt, 0,
4517                sizeof(sw_stat->ring_full_cnt));
4518
4519         /* Handling the XPAK counters update */
4520         if (stats->xpak_timer_count < 72000) {
4521                 /* waiting for an hour */
4522                 stats->xpak_timer_count++;
4523         } else {
4524                 s2io_updt_xpak_counter(dev);
4525                 /* reset the count to zero */
4526                 stats->xpak_timer_count = 0;
4527         }
4528
4529         /* Handling link status change error Intr */
4530         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4531                 val64 = readq(&bar0->mac_rmac_err_reg);
4532                 writeq(val64, &bar0->mac_rmac_err_reg);
4533                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4534                         schedule_work(&sp->set_link_task);
4535         }
4536
4537         /* In case of a serious error, the device will be Reset. */
4538         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4539                                   &sw_stat->serious_err_cnt))
4540                 goto reset;
4541
4542         /* Check for data parity error */
4543         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4544                                   &sw_stat->parity_err_cnt))
4545                 goto reset;
4546
4547         /* Check for ring full counter */
4548         if (sp->device_type == XFRAME_II_DEVICE) {
4549                 val64 = readq(&bar0->ring_bump_counter1);
4550                 for (i = 0; i < 4; i++) {
4551                         temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4552                         temp64 >>= 64 - ((i+1)*16);
4553                         sw_stat->ring_full_cnt[i] += temp64;
4554                 }
4555
4556                 val64 = readq(&bar0->ring_bump_counter2);
4557                 for (i = 0; i < 4; i++) {
4558                         temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4559                         temp64 >>= 64 - ((i+1)*16);
4560                         sw_stat->ring_full_cnt[i+4] += temp64;
4561                 }
4562         }
4563
4564         val64 = readq(&bar0->txdma_int_status);
4565         /*check for pfc_err*/
4566         if (val64 & TXDMA_PFC_INT) {
4567                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4568                                           PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4569                                           PFC_PCIX_ERR,
4570                                           &bar0->pfc_err_reg,
4571                                           &sw_stat->pfc_err_cnt))
4572                         goto reset;
4573                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4574                                       &bar0->pfc_err_reg,
4575                                       &sw_stat->pfc_err_cnt);
4576         }
4577
4578         /*check for tda_err*/
4579         if (val64 & TXDMA_TDA_INT) {
4580                 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4581                                           TDA_SM0_ERR_ALARM |
4582                                           TDA_SM1_ERR_ALARM,
4583                                           &bar0->tda_err_reg,
4584                                           &sw_stat->tda_err_cnt))
4585                         goto reset;
4586                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4587                                       &bar0->tda_err_reg,
4588                                       &sw_stat->tda_err_cnt);
4589         }
4590         /*check for pcc_err*/
4591         if (val64 & TXDMA_PCC_INT) {
4592                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4593                                           PCC_N_SERR | PCC_6_COF_OV_ERR |
4594                                           PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4595                                           PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4596                                           PCC_TXB_ECC_DB_ERR,
4597                                           &bar0->pcc_err_reg,
4598                                           &sw_stat->pcc_err_cnt))
4599                         goto reset;
4600                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4601                                       &bar0->pcc_err_reg,
4602                                       &sw_stat->pcc_err_cnt);
4603         }
4604
4605         /*check for tti_err*/
4606         if (val64 & TXDMA_TTI_INT) {
4607                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4608                                           &bar0->tti_err_reg,
4609                                           &sw_stat->tti_err_cnt))
4610                         goto reset;
4611                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4612                                       &bar0->tti_err_reg,
4613                                       &sw_stat->tti_err_cnt);
4614         }
4615
4616         /*check for lso_err*/
4617         if (val64 & TXDMA_LSO_INT) {
4618                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4619                                           LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4620                                           &bar0->lso_err_reg,
4621                                           &sw_stat->lso_err_cnt))
4622                         goto reset;
4623                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4624                                       &bar0->lso_err_reg,
4625                                       &sw_stat->lso_err_cnt);
4626         }
4627
4628         /*check for tpa_err*/
4629         if (val64 & TXDMA_TPA_INT) {
4630                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4631                                           &bar0->tpa_err_reg,
4632                                           &sw_stat->tpa_err_cnt))
4633                         goto reset;
4634                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4635                                       &bar0->tpa_err_reg,
4636                                       &sw_stat->tpa_err_cnt);
4637         }
4638
4639         /*check for sm_err*/
4640         if (val64 & TXDMA_SM_INT) {
4641                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4642                                           &bar0->sm_err_reg,
4643                                           &sw_stat->sm_err_cnt))
4644                         goto reset;
4645         }
4646
4647         val64 = readq(&bar0->mac_int_status);
4648         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4649                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4650                                           &bar0->mac_tmac_err_reg,
4651                                           &sw_stat->mac_tmac_err_cnt))
4652                         goto reset;
4653                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4654                                       TMAC_DESC_ECC_SG_ERR |
4655                                       TMAC_DESC_ECC_DB_ERR,
4656                                       &bar0->mac_tmac_err_reg,
4657                                       &sw_stat->mac_tmac_err_cnt);
4658         }
4659
4660         val64 = readq(&bar0->xgxs_int_status);
4661         if (val64 & XGXS_INT_STATUS_TXGXS) {
4662                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4663                                           &bar0->xgxs_txgxs_err_reg,
4664                                           &sw_stat->xgxs_txgxs_err_cnt))
4665                         goto reset;
4666                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4667                                       &bar0->xgxs_txgxs_err_reg,
4668                                       &sw_stat->xgxs_txgxs_err_cnt);
4669         }
4670
4671         val64 = readq(&bar0->rxdma_int_status);
4672         if (val64 & RXDMA_INT_RC_INT_M) {
4673                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4674                                           RC_FTC_ECC_DB_ERR |
4675                                           RC_PRCn_SM_ERR_ALARM |
4676                                           RC_FTC_SM_ERR_ALARM,
4677                                           &bar0->rc_err_reg,
4678                                           &sw_stat->rc_err_cnt))
4679                         goto reset;
4680                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4681                                       RC_FTC_ECC_SG_ERR |
4682                                       RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4683                                       &sw_stat->rc_err_cnt);
4684                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4685                                           PRC_PCI_AB_WR_Rn |
4686                                           PRC_PCI_AB_F_WR_Rn,
4687                                           &bar0->prc_pcix_err_reg,
4688                                           &sw_stat->prc_pcix_err_cnt))
4689                         goto reset;
4690                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4691                                       PRC_PCI_DP_WR_Rn |
4692                                       PRC_PCI_DP_F_WR_Rn,
4693                                       &bar0->prc_pcix_err_reg,
4694                                       &sw_stat->prc_pcix_err_cnt);
4695         }
4696
4697         if (val64 & RXDMA_INT_RPA_INT_M) {
4698                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4699                                           &bar0->rpa_err_reg,
4700                                           &sw_stat->rpa_err_cnt))
4701                         goto reset;
4702                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4703                                       &bar0->rpa_err_reg,
4704                                       &sw_stat->rpa_err_cnt);
4705         }
4706
4707         if (val64 & RXDMA_INT_RDA_INT_M) {
4708                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4709                                           RDA_FRM_ECC_DB_N_AERR |
4710                                           RDA_SM1_ERR_ALARM |
4711                                           RDA_SM0_ERR_ALARM |
4712                                           RDA_RXD_ECC_DB_SERR,
4713                                           &bar0->rda_err_reg,
4714                                           &sw_stat->rda_err_cnt))
4715                         goto reset;
4716                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4717                                       RDA_FRM_ECC_SG_ERR |
4718                                       RDA_MISC_ERR |
4719                                       RDA_PCIX_ERR,
4720                                       &bar0->rda_err_reg,
4721                                       &sw_stat->rda_err_cnt);
4722         }
4723
4724         if (val64 & RXDMA_INT_RTI_INT_M) {
4725                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4726                                           &bar0->rti_err_reg,
4727                                           &sw_stat->rti_err_cnt))
4728                         goto reset;
4729                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4730                                       &bar0->rti_err_reg,
4731                                       &sw_stat->rti_err_cnt);
4732         }
4733
4734         val64 = readq(&bar0->mac_int_status);
4735         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4736                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4737                                           &bar0->mac_rmac_err_reg,
4738                                           &sw_stat->mac_rmac_err_cnt))
4739                         goto reset;
4740                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4741                                       RMAC_SINGLE_ECC_ERR |
4742                                       RMAC_DOUBLE_ECC_ERR,
4743                                       &bar0->mac_rmac_err_reg,
4744                                       &sw_stat->mac_rmac_err_cnt);
4745         }
4746
4747         val64 = readq(&bar0->xgxs_int_status);
4748         if (val64 & XGXS_INT_STATUS_RXGXS) {
4749                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4750                                           &bar0->xgxs_rxgxs_err_reg,
4751                                           &sw_stat->xgxs_rxgxs_err_cnt))
4752                         goto reset;
4753         }
4754
4755         val64 = readq(&bar0->mc_int_status);
4756         if (val64 & MC_INT_STATUS_MC_INT) {
4757                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4758                                           &bar0->mc_err_reg,
4759                                           &sw_stat->mc_err_cnt))
4760                         goto reset;
4761
4762                 /* Handling Ecc errors */
4763                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4764                         writeq(val64, &bar0->mc_err_reg);
4765                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4766                                 sw_stat->double_ecc_errs++;
4767                                 if (sp->device_type != XFRAME_II_DEVICE) {
4768                                         /*
4769                                          * Reset XframeI only if critical error
4770                                          */
4771                                         if (val64 &
4772                                             (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4773                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4774                                                 goto reset;
4775                                 }
4776                         } else
4777                                 sw_stat->single_ecc_errs++;
4778                 }
4779         }
4780         return;
4781
4782 reset:
4783         s2io_stop_all_tx_queue(sp);
4784         schedule_work(&sp->rst_timer_task);
4785         sw_stat->soft_reset_cnt++;
4786         return;
4787 }
4788
4789 /**
4790  *  s2io_isr - ISR handler of the device .
4791  *  @irq: the irq of the device.
4792  *  @dev_id: a void pointer to the dev structure of the NIC.
4793  *  Description:  This function is the ISR handler of the device. It
4794  *  identifies the reason for the interrupt and calls the relevant
4795  *  service routines. As a contongency measure, this ISR allocates the
4796  *  recv buffers, if their numbers are below the panic value which is
4797  *  presently set to 25% of the original number of rcv buffers allocated.
4798  *  Return value:
4799  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4800  *   IRQ_NONE: will be returned if interrupt is not from our device
4801  */
4802 static irqreturn_t s2io_isr(int irq, void *dev_id)
4803 {
4804         struct net_device *dev = (struct net_device *)dev_id;
4805         struct s2io_nic *sp = netdev_priv(dev);
4806         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4807         int i;
4808         u64 reason = 0;
4809         struct mac_info *mac_control;
4810         struct config_param *config;
4811
4812         /* Pretend we handled any irq's from a disconnected card */
4813         if (pci_channel_offline(sp->pdev))
4814                 return IRQ_NONE;
4815
4816         if (!is_s2io_card_up(sp))
4817                 return IRQ_NONE;
4818
4819         mac_control = &sp->mac_control;
4820         config = &sp->config;
4821
4822         /*
4823          * Identify the cause for interrupt and call the appropriate
4824          * interrupt handler. Causes for the interrupt could be;
4825          * 1. Rx of packet.
4826          * 2. Tx complete.
4827          * 3. Link down.
4828          */
4829         reason = readq(&bar0->general_int_status);
4830
4831         if (unlikely(reason == S2IO_MINUS_ONE))
4832                 return IRQ_HANDLED;     /* Nothing much can be done. Get out */
4833
4834         if (reason &
4835             (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4836                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4837
4838                 if (config->napi) {
4839                         if (reason & GEN_INTR_RXTRAFFIC) {
4840                                 napi_schedule(&sp->napi);
4841                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4842                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4843                                 readl(&bar0->rx_traffic_int);
4844                         }
4845                 } else {
4846                         /*
4847                          * rx_traffic_int reg is an R1 register, writing all 1's
4848                          * will ensure that the actual interrupt causing bit
4849                          * get's cleared and hence a read can be avoided.
4850                          */
4851                         if (reason & GEN_INTR_RXTRAFFIC)
4852                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4853
4854                         for (i = 0; i < config->rx_ring_num; i++) {
4855                                 struct ring_info *ring = &mac_control->rings[i];
4856
4857                                 rx_intr_handler(ring, 0);
4858                         }
4859                 }
4860
4861                 /*
4862                  * tx_traffic_int reg is an R1 register, writing all 1's
4863                  * will ensure that the actual interrupt causing bit get's
4864                  * cleared and hence a read can be avoided.
4865                  */
4866                 if (reason & GEN_INTR_TXTRAFFIC)
4867                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4868
4869                 for (i = 0; i < config->tx_fifo_num; i++)
4870                         tx_intr_handler(&mac_control->fifos[i]);
4871
4872                 if (reason & GEN_INTR_TXPIC)
4873                         s2io_txpic_intr_handle(sp);
4874
4875                 /*
4876                  * Reallocate the buffers from the interrupt handler itself.
4877                  */
4878                 if (!config->napi) {
4879                         for (i = 0; i < config->rx_ring_num; i++) {
4880                                 struct ring_info *ring = &mac_control->rings[i];
4881
4882                                 s2io_chk_rx_buffers(sp, ring);
4883                         }
4884                 }
4885                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4886                 readl(&bar0->general_int_status);
4887
4888                 return IRQ_HANDLED;
4889
4890         } else if (!reason) {
4891                 /* The interrupt was not raised by us */
4892                 return IRQ_NONE;
4893         }
4894
4895         return IRQ_HANDLED;
4896 }
4897
4898 /**
4899  * s2io_updt_stats -
4900  */
4901 static void s2io_updt_stats(struct s2io_nic *sp)
4902 {
4903         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4904         u64 val64;
4905         int cnt = 0;
4906
4907         if (is_s2io_card_up(sp)) {
4908                 /* Apprx 30us on a 133 MHz bus */
4909                 val64 = SET_UPDT_CLICKS(10) |
4910                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4911                 writeq(val64, &bar0->stat_cfg);
4912                 do {
4913                         udelay(100);
4914                         val64 = readq(&bar0->stat_cfg);
4915                         if (!(val64 & s2BIT(0)))
4916                                 break;
4917                         cnt++;
4918                         if (cnt == 5)
4919                                 break; /* Updt failed */
4920                 } while (1);
4921         }
4922 }
4923
4924 /**
4925  *  s2io_get_stats - Updates the device statistics structure.
4926  *  @dev : pointer to the device structure.
4927  *  Description:
4928  *  This function updates the device statistics structure in the s2io_nic
4929  *  structure and returns a pointer to the same.
4930  *  Return value:
4931  *  pointer to the updated net_device_stats structure.
4932  */
4933
4934 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4935 {
4936         struct s2io_nic *sp = netdev_priv(dev);
4937         struct mac_info *mac_control;
4938         struct config_param *config;
4939         int i;
4940
4941
4942         mac_control = &sp->mac_control;
4943         config = &sp->config;
4944
4945         /* Configure Stats for immediate updt */
4946         s2io_updt_stats(sp);
4947
4948         /* Using sp->stats as a staging area, because reset (due to mtu
4949            change, for example) will clear some hardware counters */
4950         dev->stats.tx_packets +=
4951                 le32_to_cpu(mac_control->stats_info->tmac_frms) -
4952                 sp->stats.tx_packets;
4953         sp->stats.tx_packets =
4954                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4955         dev->stats.tx_errors +=
4956                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4957                 sp->stats.tx_errors;
4958         sp->stats.tx_errors =
4959                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4960         dev->stats.rx_errors +=
4961                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4962                 sp->stats.rx_errors;
4963         sp->stats.rx_errors =
4964                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4965         dev->stats.multicast =
4966                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
4967                 sp->stats.multicast;
4968         sp->stats.multicast =
4969                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4970         dev->stats.rx_length_errors =
4971                 le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
4972                 sp->stats.rx_length_errors;
4973         sp->stats.rx_length_errors =
4974                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4975
4976         /* collect per-ring rx_packets and rx_bytes */
4977         dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4978         for (i = 0; i < config->rx_ring_num; i++) {
4979                 struct ring_info *ring = &mac_control->rings[i];
4980
4981                 dev->stats.rx_packets += ring->rx_packets;
4982                 dev->stats.rx_bytes += ring->rx_bytes;
4983         }
4984
4985         return &dev->stats;
4986 }
4987
4988 /**
4989  *  s2io_set_multicast - entry point for multicast address enable/disable.
4990  *  @dev : pointer to the device structure
4991  *  Description:
4992  *  This function is a driver entry point which gets called by the kernel
4993  *  whenever multicast addresses must be enabled/disabled. This also gets
4994  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4995  *  determine, if multicast address must be enabled or if promiscuous mode
4996  *  is to be disabled etc.
4997  *  Return value:
4998  *  void.
4999  */
5000
5001 static void s2io_set_multicast(struct net_device *dev)
5002 {
5003         int i, j, prev_cnt;
5004         struct dev_mc_list *mclist;
5005         struct s2io_nic *sp = netdev_priv(dev);
5006         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5007         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
5008                 0xfeffffffffffULL;
5009         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
5010         void __iomem *add;
5011         struct config_param *config = &sp->config;
5012
5013         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5014                 /*  Enable all Multicast addresses */
5015                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5016                        &bar0->rmac_addr_data0_mem);
5017                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5018                        &bar0->rmac_addr_data1_mem);
5019                 val64 = RMAC_ADDR_CMD_MEM_WE |
5020                         RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5021                         RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
5022                 writeq(val64, &bar0->rmac_addr_cmd_mem);
5023                 /* Wait till command completes */
5024                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5025                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5026                                       S2IO_BIT_RESET);
5027
5028                 sp->m_cast_flg = 1;
5029                 sp->all_multi_pos = config->max_mc_addr - 1;
5030         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5031                 /*  Disable all Multicast addresses */
5032                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5033                        &bar0->rmac_addr_data0_mem);
5034                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5035                        &bar0->rmac_addr_data1_mem);
5036                 val64 = RMAC_ADDR_CMD_MEM_WE |
5037                         RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5038                         RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
5039                 writeq(val64, &bar0->rmac_addr_cmd_mem);
5040                 /* Wait till command completes */
5041                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5042                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5043                                       S2IO_BIT_RESET);
5044
5045                 sp->m_cast_flg = 0;
5046                 sp->all_multi_pos = 0;
5047         }
5048
5049         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5050                 /*  Put the NIC into promiscuous mode */
5051                 add = &bar0->mac_cfg;
5052                 val64 = readq(&bar0->mac_cfg);
5053                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5054
5055                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5056                 writel((u32)val64, add);
5057                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5058                 writel((u32) (val64 >> 32), (add + 4));
5059
5060                 if (vlan_tag_strip != 1) {
5061                         val64 = readq(&bar0->rx_pa_cfg);
5062                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5063                         writeq(val64, &bar0->rx_pa_cfg);
5064                         sp->vlan_strip_flag = 0;
5065                 }
5066
5067                 val64 = readq(&bar0->mac_cfg);
5068                 sp->promisc_flg = 1;
5069                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5070                           dev->name);
5071         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5072                 /*  Remove the NIC from promiscuous mode */
5073                 add = &bar0->mac_cfg;
5074                 val64 = readq(&bar0->mac_cfg);
5075                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5076
5077                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5078                 writel((u32)val64, add);
5079                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5080                 writel((u32) (val64 >> 32), (add + 4));
5081
5082                 if (vlan_tag_strip != 0) {
5083                         val64 = readq(&bar0->rx_pa_cfg);
5084                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5085                         writeq(val64, &bar0->rx_pa_cfg);
5086                         sp->vlan_strip_flag = 1;
5087                 }
5088
5089                 val64 = readq(&bar0->mac_cfg);
5090                 sp->promisc_flg = 0;
5091                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5092                           dev->name);
5093         }
5094
5095         /*  Update individual M_CAST address list */
5096         if ((!sp->m_cast_flg) && dev->mc_count) {
5097                 if (dev->mc_count >
5098                     (config->max_mc_addr - config->max_mac_addr)) {
5099                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5100                                   dev->name);
5101                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5102                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5103                         return;
5104                 }
5105
5106                 prev_cnt = sp->mc_addr_count;
5107                 sp->mc_addr_count = dev->mc_count;
5108
5109                 /* Clear out the previous list of Mc in the H/W. */
5110                 for (i = 0; i < prev_cnt; i++) {
5111                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5112                                &bar0->rmac_addr_data0_mem);
5113                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5114                                &bar0->rmac_addr_data1_mem);
5115                         val64 = RMAC_ADDR_CMD_MEM_WE |
5116                                 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5117                                 RMAC_ADDR_CMD_MEM_OFFSET
5118                                 (config->mc_start_offset + i);
5119                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5120
5121                         /* Wait for command completes */
5122                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5123                                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5124                                                   S2IO_BIT_RESET)) {
5125                                 DBG_PRINT(ERR_DBG, "%s: Adding ", dev->name);
5126                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5127                                 return;
5128                         }
5129                 }
5130
5131                 /* Create the new Rx filter list and update the same in H/W. */
5132                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5133                      i++, mclist = mclist->next) {
5134                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5135                                ETH_ALEN);
5136                         mac_addr = 0;
5137                         for (j = 0; j < ETH_ALEN; j++) {
5138                                 mac_addr |= mclist->dmi_addr[j];
5139                                 mac_addr <<= 8;
5140                         }
5141                         mac_addr >>= 8;
5142                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5143                                &bar0->rmac_addr_data0_mem);
5144                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5145                                &bar0->rmac_addr_data1_mem);
5146                         val64 = RMAC_ADDR_CMD_MEM_WE |
5147                                 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5148                                 RMAC_ADDR_CMD_MEM_OFFSET
5149                                 (i + config->mc_start_offset);
5150                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5151
5152                         /* Wait for command completes */
5153                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5154                                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5155                                                   S2IO_BIT_RESET)) {
5156                                 DBG_PRINT(ERR_DBG, "%s: Adding ", dev->name);
5157                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5158                                 return;
5159                         }
5160                 }
5161         }
5162 }
5163
5164 /* read from CAM unicast & multicast addresses and store it in
5165  * def_mac_addr structure
5166  */
5167 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5168 {
5169         int offset;
5170         u64 mac_addr = 0x0;
5171         struct config_param *config = &sp->config;
5172
5173         /* store unicast & multicast mac addresses */
5174         for (offset = 0; offset < config->max_mc_addr; offset++) {
5175                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5176                 /* if read fails disable the entry */
5177                 if (mac_addr == FAILURE)
5178                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5179                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5180         }
5181 }
5182
5183 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5184 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5185 {
5186         int offset;
5187         struct config_param *config = &sp->config;
5188         /* restore unicast mac address */
5189         for (offset = 0; offset < config->max_mac_addr; offset++)
5190                 do_s2io_prog_unicast(sp->dev,
5191                                      sp->def_mac_addr[offset].mac_addr);
5192
5193         /* restore multicast mac address */
5194         for (offset = config->mc_start_offset;
5195              offset < config->max_mc_addr; offset++)
5196                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5197 }
5198
5199 /* add a multicast MAC address to CAM */
5200 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5201 {
5202         int i;
5203         u64 mac_addr = 0;
5204         struct config_param *config = &sp->config;
5205
5206         for (i = 0; i < ETH_ALEN; i++) {
5207                 mac_addr <<= 8;
5208                 mac_addr |= addr[i];
5209         }
5210         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5211                 return SUCCESS;
5212
5213         /* check if the multicast mac already preset in CAM */
5214         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5215                 u64 tmp64;
5216                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5217                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5218                         break;
5219
5220                 if (tmp64 == mac_addr)
5221                         return SUCCESS;
5222         }
5223         if (i == config->max_mc_addr) {
5224                 DBG_PRINT(ERR_DBG,
5225                           "CAM full no space left for multicast MAC\n");
5226                 return FAILURE;
5227         }
5228         /* Update the internal structure with this new mac address */
5229         do_s2io_copy_mac_addr(sp, i, mac_addr);
5230
5231         return do_s2io_add_mac(sp, mac_addr, i);
5232 }
5233
5234 /* add MAC address to CAM */
5235 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5236 {
5237         u64 val64;
5238         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5239
5240         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5241                &bar0->rmac_addr_data0_mem);
5242
5243         val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5244                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5245         writeq(val64, &bar0->rmac_addr_cmd_mem);
5246
5247         /* Wait till command completes */
5248         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5249                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5250                                   S2IO_BIT_RESET)) {
5251                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5252                 return FAILURE;
5253         }
5254         return SUCCESS;
5255 }
5256 /* deletes a specified unicast/multicast mac entry from CAM */
5257 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5258 {
5259         int offset;
5260         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5261         struct config_param *config = &sp->config;
5262
5263         for (offset = 1;
5264              offset < config->max_mc_addr; offset++) {
5265                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5266                 if (tmp64 == addr) {
5267                         /* disable the entry by writing  0xffffffffffffULL */
5268                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5269                                 return FAILURE;
5270                         /* store the new mac list from CAM */
5271                         do_s2io_store_unicast_mc(sp);
5272                         return SUCCESS;
5273                 }
5274         }
5275         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5276                   (unsigned long long)addr);
5277         return FAILURE;
5278 }
5279
5280 /* read mac entries from CAM */
5281 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5282 {
5283         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5284         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5285
5286         /* read mac addr */
5287         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5288                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5289         writeq(val64, &bar0->rmac_addr_cmd_mem);
5290
5291         /* Wait till command completes */
5292         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5293                                   RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5294                                   S2IO_BIT_RESET)) {
5295                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5296                 return FAILURE;
5297         }
5298         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5299
5300         return tmp64 >> 16;
5301 }
5302
5303 /**
5304  * s2io_set_mac_addr driver entry point
5305  */
5306
5307 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5308 {
5309         struct sockaddr *addr = p;
5310
5311         if (!is_valid_ether_addr(addr->sa_data))
5312                 return -EINVAL;
5313
5314         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5315
5316         /* store the MAC address in CAM */
5317         return do_s2io_prog_unicast(dev, dev->dev_addr);
5318 }
5319 /**
5320  *  do_s2io_prog_unicast - Programs the Xframe mac address
5321  *  @dev : pointer to the device structure.
5322  *  @addr: a uchar pointer to the new mac address which is to be set.
5323  *  Description : This procedure will program the Xframe to receive
5324  *  frames with new Mac Address
5325  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5326  *  as defined in errno.h file on failure.
5327  */
5328
5329 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5330 {
5331         struct s2io_nic *sp = netdev_priv(dev);
5332         register u64 mac_addr = 0, perm_addr = 0;
5333         int i;
5334         u64 tmp64;
5335         struct config_param *config = &sp->config;
5336
5337         /*
5338          * Set the new MAC address as the new unicast filter and reflect this
5339          * change on the device address registered with the OS. It will be
5340          * at offset 0.
5341          */
5342         for (i = 0; i < ETH_ALEN; i++) {
5343                 mac_addr <<= 8;
5344                 mac_addr |= addr[i];
5345                 perm_addr <<= 8;
5346                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5347         }
5348
5349         /* check if the dev_addr is different than perm_addr */
5350         if (mac_addr == perm_addr)
5351                 return SUCCESS;
5352
5353         /* check if the mac already preset in CAM */
5354         for (i = 1; i < config->max_mac_addr; i++) {
5355                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5356                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5357                         break;
5358
5359                 if (tmp64 == mac_addr) {
5360                         DBG_PRINT(INFO_DBG,
5361                                   "MAC addr:0x%llx already present in CAM\n",
5362                                   (unsigned long long)mac_addr);
5363                         return SUCCESS;
5364                 }
5365         }
5366         if (i == config->max_mac_addr) {
5367                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5368                 return FAILURE;
5369         }
5370         /* Update the internal structure with this new mac address */
5371         do_s2io_copy_mac_addr(sp, i, mac_addr);
5372
5373         return do_s2io_add_mac(sp, mac_addr, i);
5374 }
5375
5376 /**
5377  * s2io_ethtool_sset - Sets different link parameters.
5378  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5379  * @info: pointer to the structure with parameters given by ethtool to set
5380  * link information.
5381  * Description:
5382  * The function sets different link parameters provided by the user onto
5383  * the NIC.
5384  * Return value:
5385  * 0 on success.
5386  */
5387
5388 static int s2io_ethtool_sset(struct net_device *dev,
5389                              struct ethtool_cmd *info)
5390 {
5391         struct s2io_nic *sp = netdev_priv(dev);
5392         if ((info->autoneg == AUTONEG_ENABLE) ||
5393             (info->speed != SPEED_10000) ||
5394             (info->duplex != DUPLEX_FULL))
5395                 return -EINVAL;
5396         else {
5397                 s2io_close(sp->dev);
5398                 s2io_open(sp->dev);
5399         }
5400
5401         return 0;
5402 }
5403
5404 /**
5405  * s2io_ethtol_gset - Return link specific information.
5406  * @sp : private member of the device structure, pointer to the
5407  *      s2io_nic structure.
5408  * @info : pointer to the structure with parameters given by ethtool
5409  * to return link information.
5410  * Description:
5411  * Returns link specific information like speed, duplex etc.. to ethtool.
5412  * Return value :
5413  * return 0 on success.
5414  */
5415
5416 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5417 {
5418         struct s2io_nic *sp = netdev_priv(dev);
5419         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5420         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5421         info->port = PORT_FIBRE;
5422
5423         /* info->transceiver */
5424         info->transceiver = XCVR_EXTERNAL;
5425
5426         if (netif_carrier_ok(sp->dev)) {
5427                 info->speed = 10000;
5428                 info->duplex = DUPLEX_FULL;
5429         } else {
5430                 info->speed = -1;
5431                 info->duplex = -1;
5432         }
5433
5434         info->autoneg = AUTONEG_DISABLE;
5435         return 0;
5436 }
5437
5438 /**
5439  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5440  * @sp : private member of the device structure, which is a pointer to the
5441  * s2io_nic structure.
5442  * @info : pointer to the structure with parameters given by ethtool to
5443  * return driver information.
5444  * Description:
5445  * Returns driver specefic information like name, version etc.. to ethtool.
5446  * Return value:
5447  *  void
5448  */
5449
5450 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5451                                   struct ethtool_drvinfo *info)
5452 {
5453         struct s2io_nic *sp = netdev_priv(dev);
5454
5455         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5456         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5457         strncpy(info->fw_version, "", sizeof(info->fw_version));
5458         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5459         info->regdump_len = XENA_REG_SPACE;
5460         info->eedump_len = XENA_EEPROM_SPACE;
5461 }
5462
5463 /**
5464  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5465  *  @sp: private member of the device structure, which is a pointer to the
5466  *  s2io_nic structure.
5467  *  @regs : pointer to the structure with parameters given by ethtool for
5468  *  dumping the registers.
5469  *  @reg_space: The input argumnet into which all the registers are dumped.
5470  *  Description:
5471  *  Dumps the entire register space of xFrame NIC into the user given
5472  *  buffer area.
5473  * Return value :
5474  * void .
5475  */
5476
5477 static void s2io_ethtool_gregs(struct net_device *dev,
5478                                struct ethtool_regs *regs, void *space)
5479 {
5480         int i;
5481         u64 reg;
5482         u8 *reg_space = (u8 *)space;
5483         struct s2io_nic *sp = netdev_priv(dev);
5484
5485         regs->len = XENA_REG_SPACE;
5486         regs->version = sp->pdev->subsystem_device;
5487
5488         for (i = 0; i < regs->len; i += 8) {
5489                 reg = readq(sp->bar0 + i);
5490                 memcpy((reg_space + i), &reg, 8);
5491         }