]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - drivers/net/s2io.c
S2IO: Restoring the mac address in s2io_reset
[linux-3.10.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2 and 3.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     1(MSI), 2(MSI_X). Default value is '0(INTA)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.17.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135         "Register test\t(offline)",
136         "Eeprom test\t(offline)",
137         "Link test\t(online)",
138         "RLDRAM test\t(offline)",
139         "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143         {"tmac_frms"},
144         {"tmac_data_octets"},
145         {"tmac_drop_frms"},
146         {"tmac_mcst_frms"},
147         {"tmac_bcst_frms"},
148         {"tmac_pause_ctrl_frms"},
149         {"tmac_ttl_octets"},
150         {"tmac_ucst_frms"},
151         {"tmac_nucst_frms"},
152         {"tmac_any_err_frms"},
153         {"tmac_ttl_less_fb_octets"},
154         {"tmac_vld_ip_octets"},
155         {"tmac_vld_ip"},
156         {"tmac_drop_ip"},
157         {"tmac_icmp"},
158         {"tmac_rst_tcp"},
159         {"tmac_tcp"},
160         {"tmac_udp"},
161         {"rmac_vld_frms"},
162         {"rmac_data_octets"},
163         {"rmac_fcs_err_frms"},
164         {"rmac_drop_frms"},
165         {"rmac_vld_mcst_frms"},
166         {"rmac_vld_bcst_frms"},
167         {"rmac_in_rng_len_err_frms"},
168         {"rmac_out_rng_len_err_frms"},
169         {"rmac_long_frms"},
170         {"rmac_pause_ctrl_frms"},
171         {"rmac_unsup_ctrl_frms"},
172         {"rmac_ttl_octets"},
173         {"rmac_accepted_ucst_frms"},
174         {"rmac_accepted_nucst_frms"},
175         {"rmac_discarded_frms"},
176         {"rmac_drop_events"},
177         {"rmac_ttl_less_fb_octets"},
178         {"rmac_ttl_frms"},
179         {"rmac_usized_frms"},
180         {"rmac_osized_frms"},
181         {"rmac_frag_frms"},
182         {"rmac_jabber_frms"},
183         {"rmac_ttl_64_frms"},
184         {"rmac_ttl_65_127_frms"},
185         {"rmac_ttl_128_255_frms"},
186         {"rmac_ttl_256_511_frms"},
187         {"rmac_ttl_512_1023_frms"},
188         {"rmac_ttl_1024_1518_frms"},
189         {"rmac_ip"},
190         {"rmac_ip_octets"},
191         {"rmac_hdr_err_ip"},
192         {"rmac_drop_ip"},
193         {"rmac_icmp"},
194         {"rmac_tcp"},
195         {"rmac_udp"},
196         {"rmac_err_drp_udp"},
197         {"rmac_xgmii_err_sym"},
198         {"rmac_frms_q0"},
199         {"rmac_frms_q1"},
200         {"rmac_frms_q2"},
201         {"rmac_frms_q3"},
202         {"rmac_frms_q4"},
203         {"rmac_frms_q5"},
204         {"rmac_frms_q6"},
205         {"rmac_frms_q7"},
206         {"rmac_full_q0"},
207         {"rmac_full_q1"},
208         {"rmac_full_q2"},
209         {"rmac_full_q3"},
210         {"rmac_full_q4"},
211         {"rmac_full_q5"},
212         {"rmac_full_q6"},
213         {"rmac_full_q7"},
214         {"rmac_pause_cnt"},
215         {"rmac_xgmii_data_err_cnt"},
216         {"rmac_xgmii_ctrl_err_cnt"},
217         {"rmac_accepted_ip"},
218         {"rmac_err_tcp"},
219         {"rd_req_cnt"},
220         {"new_rd_req_cnt"},
221         {"new_rd_req_rtry_cnt"},
222         {"rd_rtry_cnt"},
223         {"wr_rtry_rd_ack_cnt"},
224         {"wr_req_cnt"},
225         {"new_wr_req_cnt"},
226         {"new_wr_req_rtry_cnt"},
227         {"wr_rtry_cnt"},
228         {"wr_disc_cnt"},
229         {"rd_rtry_wr_ack_cnt"},
230         {"txp_wr_cnt"},
231         {"txd_rd_cnt"},
232         {"txd_wr_cnt"},
233         {"rxd_rd_cnt"},
234         {"rxd_wr_cnt"},
235         {"txf_rd_cnt"},
236         {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240         {"rmac_ttl_1519_4095_frms"},
241         {"rmac_ttl_4096_8191_frms"},
242         {"rmac_ttl_8192_max_frms"},
243         {"rmac_ttl_gt_max_frms"},
244         {"rmac_osized_alt_frms"},
245         {"rmac_jabber_alt_frms"},
246         {"rmac_gt_max_alt_frms"},
247         {"rmac_vlan_frms"},
248         {"rmac_len_discard"},
249         {"rmac_fcs_discard"},
250         {"rmac_pf_discard"},
251         {"rmac_da_discard"},
252         {"rmac_red_discard"},
253         {"rmac_rts_discard"},
254         {"rmac_ingm_full_discard"},
255         {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259         {"\n DRIVER STATISTICS"},
260         {"single_bit_ecc_errs"},
261         {"double_bit_ecc_errs"},
262         {"parity_err_cnt"},
263         {"serious_err_cnt"},
264         {"soft_reset_cnt"},
265         {"fifo_full_cnt"},
266         {"ring_full_cnt"},
267         ("alarm_transceiver_temp_high"),
268         ("alarm_transceiver_temp_low"),
269         ("alarm_laser_bias_current_high"),
270         ("alarm_laser_bias_current_low"),
271         ("alarm_laser_output_power_high"),
272         ("alarm_laser_output_power_low"),
273         ("warn_transceiver_temp_high"),
274         ("warn_transceiver_temp_low"),
275         ("warn_laser_bias_current_high"),
276         ("warn_laser_bias_current_low"),
277         ("warn_laser_output_power_high"),
278         ("warn_laser_output_power_low"),
279         ("lro_aggregated_pkts"),
280         ("lro_flush_both_count"),
281         ("lro_out_of_sequence_pkts"),
282         ("lro_flush_due_to_max_pkts"),
283         ("lro_avg_aggr_pkts"),
284 };
285
286 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
287 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
288                                         ETH_GSTRING_LEN
289 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
290
291 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
293
294 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
296
297 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
298 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
299
300 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
301                         init_timer(&timer);                     \
302                         timer.function = handle;                \
303                         timer.data = (unsigned long) arg;       \
304                         mod_timer(&timer, (jiffies + exp))      \
305
306 /* Add the vlan */
307 static void s2io_vlan_rx_register(struct net_device *dev,
308                                         struct vlan_group *grp)
309 {
310         struct s2io_nic *nic = dev->priv;
311         unsigned long flags;
312
313         spin_lock_irqsave(&nic->tx_lock, flags);
314         nic->vlgrp = grp;
315         spin_unlock_irqrestore(&nic->tx_lock, flags);
316 }
317
318 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
319 int vlan_strip_flag;
320
321 /* Unregister the vlan */
322 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
323 {
324         struct s2io_nic *nic = dev->priv;
325         unsigned long flags;
326
327         spin_lock_irqsave(&nic->tx_lock, flags);
328         if (nic->vlgrp)
329                 nic->vlgrp->vlan_devices[vid] = NULL;
330         spin_unlock_irqrestore(&nic->tx_lock, flags);
331 }
332
333 /*
334  * Constants to be programmed into the Xena's registers, to configure
335  * the XAUI.
336  */
337
338 #define END_SIGN        0x0
339 static const u64 herc_act_dtx_cfg[] = {
340         /* Set address */
341         0x8000051536750000ULL, 0x80000515367500E0ULL,
342         /* Write data */
343         0x8000051536750004ULL, 0x80000515367500E4ULL,
344         /* Set address */
345         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
346         /* Write data */
347         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
348         /* Set address */
349         0x801205150D440000ULL, 0x801205150D4400E0ULL,
350         /* Write data */
351         0x801205150D440004ULL, 0x801205150D4400E4ULL,
352         /* Set address */
353         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
354         /* Write data */
355         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
356         /* Done */
357         END_SIGN
358 };
359
360 static const u64 xena_dtx_cfg[] = {
361         /* Set address */
362         0x8000051500000000ULL, 0x80000515000000E0ULL,
363         /* Write data */
364         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
365         /* Set address */
366         0x8001051500000000ULL, 0x80010515000000E0ULL,
367         /* Write data */
368         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
369         /* Set address */
370         0x8002051500000000ULL, 0x80020515000000E0ULL,
371         /* Write data */
372         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
373         END_SIGN
374 };
375
376 /*
377  * Constants for Fixing the MacAddress problem seen mostly on
378  * Alpha machines.
379  */
380 static const u64 fix_mac[] = {
381         0x0060000000000000ULL, 0x0060600000000000ULL,
382         0x0040600000000000ULL, 0x0000600000000000ULL,
383         0x0020600000000000ULL, 0x0060600000000000ULL,
384         0x0020600000000000ULL, 0x0060600000000000ULL,
385         0x0020600000000000ULL, 0x0060600000000000ULL,
386         0x0020600000000000ULL, 0x0060600000000000ULL,
387         0x0020600000000000ULL, 0x0060600000000000ULL,
388         0x0020600000000000ULL, 0x0060600000000000ULL,
389         0x0020600000000000ULL, 0x0060600000000000ULL,
390         0x0020600000000000ULL, 0x0060600000000000ULL,
391         0x0020600000000000ULL, 0x0060600000000000ULL,
392         0x0020600000000000ULL, 0x0060600000000000ULL,
393         0x0020600000000000ULL, 0x0000600000000000ULL,
394         0x0040600000000000ULL, 0x0060600000000000ULL,
395         END_SIGN
396 };
397
398 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
399 MODULE_LICENSE("GPL");
400 MODULE_VERSION(DRV_VERSION);
401
402
403 /* Module Loadable parameters. */
404 S2IO_PARM_INT(tx_fifo_num, 1);
405 S2IO_PARM_INT(rx_ring_num, 1);
406
407
408 S2IO_PARM_INT(rx_ring_mode, 1);
409 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
410 S2IO_PARM_INT(rmac_pause_time, 0x100);
411 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
412 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
413 S2IO_PARM_INT(shared_splits, 0);
414 S2IO_PARM_INT(tmac_util_period, 5);
415 S2IO_PARM_INT(rmac_util_period, 5);
416 S2IO_PARM_INT(bimodal, 0);
417 S2IO_PARM_INT(l3l4hdr_size, 128);
418 /* Frequency of Rx desc syncs expressed as power of 2 */
419 S2IO_PARM_INT(rxsync_frequency, 3);
420 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
421 S2IO_PARM_INT(intr_type, 0);
422 /* Large receive offload feature */
423 S2IO_PARM_INT(lro, 0);
424 /* Max pkts to be aggregated by LRO at one time. If not specified,
425  * aggregation happens until we hit max IP pkt size(64K)
426  */
427 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
428 S2IO_PARM_INT(indicate_max_pkts, 0);
429
430 S2IO_PARM_INT(napi, 1);
431 S2IO_PARM_INT(ufo, 0);
432 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
433
434 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
435     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
436 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
437     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
438 static unsigned int rts_frm_len[MAX_RX_RINGS] =
439     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
440
441 module_param_array(tx_fifo_len, uint, NULL, 0);
442 module_param_array(rx_ring_sz, uint, NULL, 0);
443 module_param_array(rts_frm_len, uint, NULL, 0);
444
445 /*
446  * S2IO device table.
447  * This table lists all the devices that this driver supports.
448  */
449 static struct pci_device_id s2io_tbl[] __devinitdata = {
450         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
451          PCI_ANY_ID, PCI_ANY_ID},
452         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
453          PCI_ANY_ID, PCI_ANY_ID},
454         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
455          PCI_ANY_ID, PCI_ANY_ID},
456         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
457          PCI_ANY_ID, PCI_ANY_ID},
458         {0,}
459 };
460
461 MODULE_DEVICE_TABLE(pci, s2io_tbl);
462
463 static struct pci_driver s2io_driver = {
464       .name = "S2IO",
465       .id_table = s2io_tbl,
466       .probe = s2io_init_nic,
467       .remove = __devexit_p(s2io_rem_nic),
468 };
469
470 /* A simplifier macro used both by init and free shared_mem Fns(). */
471 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
472
473 /**
474  * init_shared_mem - Allocation and Initialization of Memory
475  * @nic: Device private variable.
476  * Description: The function allocates all the memory areas shared
477  * between the NIC and the driver. This includes Tx descriptors,
478  * Rx descriptors and the statistics block.
479  */
480
481 static int init_shared_mem(struct s2io_nic *nic)
482 {
483         u32 size;
484         void *tmp_v_addr, *tmp_v_addr_next;
485         dma_addr_t tmp_p_addr, tmp_p_addr_next;
486         struct RxD_block *pre_rxd_blk = NULL;
487         int i, j, blk_cnt;
488         int lst_size, lst_per_page;
489         struct net_device *dev = nic->dev;
490         unsigned long tmp;
491         struct buffAdd *ba;
492
493         struct mac_info *mac_control;
494         struct config_param *config;
495
496         mac_control = &nic->mac_control;
497         config = &nic->config;
498
499
500         /* Allocation and initialization of TXDLs in FIOFs */
501         size = 0;
502         for (i = 0; i < config->tx_fifo_num; i++) {
503                 size += config->tx_cfg[i].fifo_len;
504         }
505         if (size > MAX_AVAILABLE_TXDS) {
506                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
507                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
508                 return -EINVAL;
509         }
510
511         lst_size = (sizeof(struct TxD) * config->max_txds);
512         lst_per_page = PAGE_SIZE / lst_size;
513
514         for (i = 0; i < config->tx_fifo_num; i++) {
515                 int fifo_len = config->tx_cfg[i].fifo_len;
516                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
517                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
518                                                           GFP_KERNEL);
519                 if (!mac_control->fifos[i].list_info) {
520                         DBG_PRINT(ERR_DBG,
521                                   "Malloc failed for list_info\n");
522                         return -ENOMEM;
523                 }
524                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
525         }
526         for (i = 0; i < config->tx_fifo_num; i++) {
527                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
528                                                 lst_per_page);
529                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
530                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
531                     config->tx_cfg[i].fifo_len - 1;
532                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
533                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
534                     config->tx_cfg[i].fifo_len - 1;
535                 mac_control->fifos[i].fifo_no = i;
536                 mac_control->fifos[i].nic = nic;
537                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
538
539                 for (j = 0; j < page_num; j++) {
540                         int k = 0;
541                         dma_addr_t tmp_p;
542                         void *tmp_v;
543                         tmp_v = pci_alloc_consistent(nic->pdev,
544                                                      PAGE_SIZE, &tmp_p);
545                         if (!tmp_v) {
546                                 DBG_PRINT(ERR_DBG,
547                                           "pci_alloc_consistent ");
548                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
549                                 return -ENOMEM;
550                         }
551                         /* If we got a zero DMA address(can happen on
552                          * certain platforms like PPC), reallocate.
553                          * Store virtual address of page we don't want,
554                          * to be freed later.
555                          */
556                         if (!tmp_p) {
557                                 mac_control->zerodma_virt_addr = tmp_v;
558                                 DBG_PRINT(INIT_DBG,
559                                 "%s: Zero DMA address for TxDL. ", dev->name);
560                                 DBG_PRINT(INIT_DBG,
561                                 "Virtual address %p\n", tmp_v);
562                                 tmp_v = pci_alloc_consistent(nic->pdev,
563                                                      PAGE_SIZE, &tmp_p);
564                                 if (!tmp_v) {
565                                         DBG_PRINT(ERR_DBG,
566                                           "pci_alloc_consistent ");
567                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
568                                         return -ENOMEM;
569                                 }
570                         }
571                         while (k < lst_per_page) {
572                                 int l = (j * lst_per_page) + k;
573                                 if (l == config->tx_cfg[i].fifo_len)
574                                         break;
575                                 mac_control->fifos[i].list_info[l].list_virt_addr =
576                                     tmp_v + (k * lst_size);
577                                 mac_control->fifos[i].list_info[l].list_phy_addr =
578                                     tmp_p + (k * lst_size);
579                                 k++;
580                         }
581                 }
582         }
583
584         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
585         if (!nic->ufo_in_band_v)
586                 return -ENOMEM;
587
588         /* Allocation and initialization of RXDs in Rings */
589         size = 0;
590         for (i = 0; i < config->rx_ring_num; i++) {
591                 if (config->rx_cfg[i].num_rxd %
592                     (rxd_count[nic->rxd_mode] + 1)) {
593                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
594                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
595                                   i);
596                         DBG_PRINT(ERR_DBG, "RxDs per Block");
597                         return FAILURE;
598                 }
599                 size += config->rx_cfg[i].num_rxd;
600                 mac_control->rings[i].block_count =
601                         config->rx_cfg[i].num_rxd /
602                         (rxd_count[nic->rxd_mode] + 1 );
603                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
604                         mac_control->rings[i].block_count;
605         }
606         if (nic->rxd_mode == RXD_MODE_1)
607                 size = (size * (sizeof(struct RxD1)));
608         else
609                 size = (size * (sizeof(struct RxD3)));
610
611         for (i = 0; i < config->rx_ring_num; i++) {
612                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
613                 mac_control->rings[i].rx_curr_get_info.offset = 0;
614                 mac_control->rings[i].rx_curr_get_info.ring_len =
615                     config->rx_cfg[i].num_rxd - 1;
616                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
617                 mac_control->rings[i].rx_curr_put_info.offset = 0;
618                 mac_control->rings[i].rx_curr_put_info.ring_len =
619                     config->rx_cfg[i].num_rxd - 1;
620                 mac_control->rings[i].nic = nic;
621                 mac_control->rings[i].ring_no = i;
622
623                 blk_cnt = config->rx_cfg[i].num_rxd /
624                                 (rxd_count[nic->rxd_mode] + 1);
625                 /*  Allocating all the Rx blocks */
626                 for (j = 0; j < blk_cnt; j++) {
627                         struct rx_block_info *rx_blocks;
628                         int l;
629
630                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
631                         size = SIZE_OF_BLOCK; //size is always page size
632                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
633                                                           &tmp_p_addr);
634                         if (tmp_v_addr == NULL) {
635                                 /*
636                                  * In case of failure, free_shared_mem()
637                                  * is called, which should free any
638                                  * memory that was alloced till the
639                                  * failure happened.
640                                  */
641                                 rx_blocks->block_virt_addr = tmp_v_addr;
642                                 return -ENOMEM;
643                         }
644                         memset(tmp_v_addr, 0, size);
645                         rx_blocks->block_virt_addr = tmp_v_addr;
646                         rx_blocks->block_dma_addr = tmp_p_addr;
647                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
648                                                   rxd_count[nic->rxd_mode],
649                                                   GFP_KERNEL);
650                         if (!rx_blocks->rxds)
651                                 return -ENOMEM;
652                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
653                                 rx_blocks->rxds[l].virt_addr =
654                                         rx_blocks->block_virt_addr +
655                                         (rxd_size[nic->rxd_mode] * l);
656                                 rx_blocks->rxds[l].dma_addr =
657                                         rx_blocks->block_dma_addr +
658                                         (rxd_size[nic->rxd_mode] * l);
659                         }
660                 }
661                 /* Interlinking all Rx Blocks */
662                 for (j = 0; j < blk_cnt; j++) {
663                         tmp_v_addr =
664                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
665                         tmp_v_addr_next =
666                                 mac_control->rings[i].rx_blocks[(j + 1) %
667                                               blk_cnt].block_virt_addr;
668                         tmp_p_addr =
669                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
670                         tmp_p_addr_next =
671                                 mac_control->rings[i].rx_blocks[(j + 1) %
672                                               blk_cnt].block_dma_addr;
673
674                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
675                         pre_rxd_blk->reserved_2_pNext_RxD_block =
676                             (unsigned long) tmp_v_addr_next;
677                         pre_rxd_blk->pNext_RxD_Blk_physical =
678                             (u64) tmp_p_addr_next;
679                 }
680         }
681         if (nic->rxd_mode >= RXD_MODE_3A) {
682                 /*
683                  * Allocation of Storages for buffer addresses in 2BUFF mode
684                  * and the buffers as well.
685                  */
686                 for (i = 0; i < config->rx_ring_num; i++) {
687                         blk_cnt = config->rx_cfg[i].num_rxd /
688                            (rxd_count[nic->rxd_mode]+ 1);
689                         mac_control->rings[i].ba =
690                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
691                                      GFP_KERNEL);
692                         if (!mac_control->rings[i].ba)
693                                 return -ENOMEM;
694                         for (j = 0; j < blk_cnt; j++) {
695                                 int k = 0;
696                                 mac_control->rings[i].ba[j] =
697                                         kmalloc((sizeof(struct buffAdd) *
698                                                 (rxd_count[nic->rxd_mode] + 1)),
699                                                 GFP_KERNEL);
700                                 if (!mac_control->rings[i].ba[j])
701                                         return -ENOMEM;
702                                 while (k != rxd_count[nic->rxd_mode]) {
703                                         ba = &mac_control->rings[i].ba[j][k];
704
705                                         ba->ba_0_org = (void *) kmalloc
706                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
707                                         if (!ba->ba_0_org)
708                                                 return -ENOMEM;
709                                         tmp = (unsigned long)ba->ba_0_org;
710                                         tmp += ALIGN_SIZE;
711                                         tmp &= ~((unsigned long) ALIGN_SIZE);
712                                         ba->ba_0 = (void *) tmp;
713
714                                         ba->ba_1_org = (void *) kmalloc
715                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
716                                         if (!ba->ba_1_org)
717                                                 return -ENOMEM;
718                                         tmp = (unsigned long) ba->ba_1_org;
719                                         tmp += ALIGN_SIZE;
720                                         tmp &= ~((unsigned long) ALIGN_SIZE);
721                                         ba->ba_1 = (void *) tmp;
722                                         k++;
723                                 }
724                         }
725                 }
726         }
727
728         /* Allocation and initialization of Statistics block */
729         size = sizeof(struct stat_block);
730         mac_control->stats_mem = pci_alloc_consistent
731             (nic->pdev, size, &mac_control->stats_mem_phy);
732
733         if (!mac_control->stats_mem) {
734                 /*
735                  * In case of failure, free_shared_mem() is called, which
736                  * should free any memory that was alloced till the
737                  * failure happened.
738                  */
739                 return -ENOMEM;
740         }
741         mac_control->stats_mem_sz = size;
742
743         tmp_v_addr = mac_control->stats_mem;
744         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
745         memset(tmp_v_addr, 0, size);
746         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
747                   (unsigned long long) tmp_p_addr);
748
749         return SUCCESS;
750 }
751
752 /**
753  * free_shared_mem - Free the allocated Memory
754  * @nic:  Device private variable.
755  * Description: This function is to free all memory locations allocated by
756  * the init_shared_mem() function and return it to the kernel.
757  */
758
759 static void free_shared_mem(struct s2io_nic *nic)
760 {
761         int i, j, blk_cnt, size;
762         void *tmp_v_addr;
763         dma_addr_t tmp_p_addr;
764         struct mac_info *mac_control;
765         struct config_param *config;
766         int lst_size, lst_per_page;
767         struct net_device *dev = nic->dev;
768
769         if (!nic)
770                 return;
771
772         mac_control = &nic->mac_control;
773         config = &nic->config;
774
775         lst_size = (sizeof(struct TxD) * config->max_txds);
776         lst_per_page = PAGE_SIZE / lst_size;
777
778         for (i = 0; i < config->tx_fifo_num; i++) {
779                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
780                                                 lst_per_page);
781                 for (j = 0; j < page_num; j++) {
782                         int mem_blks = (j * lst_per_page);
783                         if (!mac_control->fifos[i].list_info)
784                                 return;
785                         if (!mac_control->fifos[i].list_info[mem_blks].
786                                  list_virt_addr)
787                                 break;
788                         pci_free_consistent(nic->pdev, PAGE_SIZE,
789                                             mac_control->fifos[i].
790                                             list_info[mem_blks].
791                                             list_virt_addr,
792                                             mac_control->fifos[i].
793                                             list_info[mem_blks].
794                                             list_phy_addr);
795                 }
796                 /* If we got a zero DMA address during allocation,
797                  * free the page now
798                  */
799                 if (mac_control->zerodma_virt_addr) {
800                         pci_free_consistent(nic->pdev, PAGE_SIZE,
801                                             mac_control->zerodma_virt_addr,
802                                             (dma_addr_t)0);
803                         DBG_PRINT(INIT_DBG,
804                                 "%s: Freeing TxDL with zero DMA addr. ",
805                                 dev->name);
806                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
807                                 mac_control->zerodma_virt_addr);
808                 }
809                 kfree(mac_control->fifos[i].list_info);
810         }
811
812         size = SIZE_OF_BLOCK;
813         for (i = 0; i < config->rx_ring_num; i++) {
814                 blk_cnt = mac_control->rings[i].block_count;
815                 for (j = 0; j < blk_cnt; j++) {
816                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
817                                 block_virt_addr;
818                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
819                                 block_dma_addr;
820                         if (tmp_v_addr == NULL)
821                                 break;
822                         pci_free_consistent(nic->pdev, size,
823                                             tmp_v_addr, tmp_p_addr);
824                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
825                 }
826         }
827
828         if (nic->rxd_mode >= RXD_MODE_3A) {
829                 /* Freeing buffer storage addresses in 2BUFF mode. */
830                 for (i = 0; i < config->rx_ring_num; i++) {
831                         blk_cnt = config->rx_cfg[i].num_rxd /
832                             (rxd_count[nic->rxd_mode] + 1);
833                         for (j = 0; j < blk_cnt; j++) {
834                                 int k = 0;
835                                 if (!mac_control->rings[i].ba[j])
836                                         continue;
837                                 while (k != rxd_count[nic->rxd_mode]) {
838                                         struct buffAdd *ba =
839                                                 &mac_control->rings[i].ba[j][k];
840                                         kfree(ba->ba_0_org);
841                                         kfree(ba->ba_1_org);
842                                         k++;
843                                 }
844                                 kfree(mac_control->rings[i].ba[j]);
845                         }
846                         kfree(mac_control->rings[i].ba);
847                 }
848         }
849
850         if (mac_control->stats_mem) {
851                 pci_free_consistent(nic->pdev,
852                                     mac_control->stats_mem_sz,
853                                     mac_control->stats_mem,
854                                     mac_control->stats_mem_phy);
855         }
856         if (nic->ufo_in_band_v)
857                 kfree(nic->ufo_in_band_v);
858 }
859
860 /**
861  * s2io_verify_pci_mode -
862  */
863
864 static int s2io_verify_pci_mode(struct s2io_nic *nic)
865 {
866         struct XENA_dev_config __iomem *bar0 = nic->bar0;
867         register u64 val64 = 0;
868         int     mode;
869
870         val64 = readq(&bar0->pci_mode);
871         mode = (u8)GET_PCI_MODE(val64);
872
873         if ( val64 & PCI_MODE_UNKNOWN_MODE)
874                 return -1;      /* Unknown PCI mode */
875         return mode;
876 }
877
878 #define NEC_VENID   0x1033
879 #define NEC_DEVID   0x0125
880 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
881 {
882         struct pci_dev *tdev = NULL;
883         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
884                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
885                         if (tdev->bus == s2io_pdev->bus->parent)
886                                 pci_dev_put(tdev);
887                                 return 1;
888                 }
889         }
890         return 0;
891 }
892
893 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
894 /**
895  * s2io_print_pci_mode -
896  */
897 static int s2io_print_pci_mode(struct s2io_nic *nic)
898 {
899         struct XENA_dev_config __iomem *bar0 = nic->bar0;
900         register u64 val64 = 0;
901         int     mode;
902         struct config_param *config = &nic->config;
903
904         val64 = readq(&bar0->pci_mode);
905         mode = (u8)GET_PCI_MODE(val64);
906
907         if ( val64 & PCI_MODE_UNKNOWN_MODE)
908                 return -1;      /* Unknown PCI mode */
909
910         config->bus_speed = bus_speed[mode];
911
912         if (s2io_on_nec_bridge(nic->pdev)) {
913                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
914                                                         nic->dev->name);
915                 return mode;
916         }
917
918         if (val64 & PCI_MODE_32_BITS) {
919                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
920         } else {
921                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
922         }
923
924         switch(mode) {
925                 case PCI_MODE_PCI_33:
926                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
927                         break;
928                 case PCI_MODE_PCI_66:
929                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
930                         break;
931                 case PCI_MODE_PCIX_M1_66:
932                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
933                         break;
934                 case PCI_MODE_PCIX_M1_100:
935                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
936                         break;
937                 case PCI_MODE_PCIX_M1_133:
938                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
939                         break;
940                 case PCI_MODE_PCIX_M2_66:
941                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
942                         break;
943                 case PCI_MODE_PCIX_M2_100:
944                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
945                         break;
946                 case PCI_MODE_PCIX_M2_133:
947                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
948                         break;
949                 default:
950                         return -1;      /* Unsupported bus speed */
951         }
952
953         return mode;
954 }
955
956 /**
957  *  init_nic - Initialization of hardware
958  *  @nic: device peivate variable
959  *  Description: The function sequentially configures every block
960  *  of the H/W from their reset values.
961  *  Return Value:  SUCCESS on success and
962  *  '-1' on failure (endian settings incorrect).
963  */
964
965 static int init_nic(struct s2io_nic *nic)
966 {
967         struct XENA_dev_config __iomem *bar0 = nic->bar0;
968         struct net_device *dev = nic->dev;
969         register u64 val64 = 0;
970         void __iomem *add;
971         u32 time;
972         int i, j;
973         struct mac_info *mac_control;
974         struct config_param *config;
975         int dtx_cnt = 0;
976         unsigned long long mem_share;
977         int mem_size;
978
979         mac_control = &nic->mac_control;
980         config = &nic->config;
981
982         /* to set the swapper controle on the card */
983         if(s2io_set_swapper(nic)) {
984                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
985                 return -1;
986         }
987
988         /*
989          * Herc requires EOI to be removed from reset before XGXS, so..
990          */
991         if (nic->device_type & XFRAME_II_DEVICE) {
992                 val64 = 0xA500000000ULL;
993                 writeq(val64, &bar0->sw_reset);
994                 msleep(500);
995                 val64 = readq(&bar0->sw_reset);
996         }
997
998         /* Remove XGXS from reset state */
999         val64 = 0;
1000         writeq(val64, &bar0->sw_reset);
1001         msleep(500);
1002         val64 = readq(&bar0->sw_reset);
1003
1004         /*  Enable Receiving broadcasts */
1005         add = &bar0->mac_cfg;
1006         val64 = readq(&bar0->mac_cfg);
1007         val64 |= MAC_RMAC_BCAST_ENABLE;
1008         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1009         writel((u32) val64, add);
1010         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1011         writel((u32) (val64 >> 32), (add + 4));
1012
1013         /* Read registers in all blocks */
1014         val64 = readq(&bar0->mac_int_mask);
1015         val64 = readq(&bar0->mc_int_mask);
1016         val64 = readq(&bar0->xgxs_int_mask);
1017
1018         /*  Set MTU */
1019         val64 = dev->mtu;
1020         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1021
1022         if (nic->device_type & XFRAME_II_DEVICE) {
1023                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1024                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1025                                           &bar0->dtx_control, UF);
1026                         if (dtx_cnt & 0x1)
1027                                 msleep(1); /* Necessary!! */
1028                         dtx_cnt++;
1029                 }
1030         } else {
1031                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1032                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1033                                           &bar0->dtx_control, UF);
1034                         val64 = readq(&bar0->dtx_control);
1035                         dtx_cnt++;
1036                 }
1037         }
1038
1039         /*  Tx DMA Initialization */
1040         val64 = 0;
1041         writeq(val64, &bar0->tx_fifo_partition_0);
1042         writeq(val64, &bar0->tx_fifo_partition_1);
1043         writeq(val64, &bar0->tx_fifo_partition_2);
1044         writeq(val64, &bar0->tx_fifo_partition_3);
1045
1046
1047         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1048                 val64 |=
1049                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1050                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1051                                     ((i * 32) + 5), 3);
1052
1053                 if (i == (config->tx_fifo_num - 1)) {
1054                         if (i % 2 == 0)
1055                                 i++;
1056                 }
1057
1058                 switch (i) {
1059                 case 1:
1060                         writeq(val64, &bar0->tx_fifo_partition_0);
1061                         val64 = 0;
1062                         break;
1063                 case 3:
1064                         writeq(val64, &bar0->tx_fifo_partition_1);
1065                         val64 = 0;
1066                         break;
1067                 case 5:
1068                         writeq(val64, &bar0->tx_fifo_partition_2);
1069                         val64 = 0;
1070                         break;
1071                 case 7:
1072                         writeq(val64, &bar0->tx_fifo_partition_3);
1073                         break;
1074                 }
1075         }
1076
1077         /*
1078          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1079          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1080          */
1081         if ((nic->device_type == XFRAME_I_DEVICE) &&
1082                 (get_xena_rev_id(nic->pdev) < 4))
1083                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1084
1085         val64 = readq(&bar0->tx_fifo_partition_0);
1086         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1087                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1088
1089         /*
1090          * Initialization of Tx_PA_CONFIG register to ignore packet
1091          * integrity checking.
1092          */
1093         val64 = readq(&bar0->tx_pa_cfg);
1094         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1095             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1096         writeq(val64, &bar0->tx_pa_cfg);
1097
1098         /* Rx DMA intialization. */
1099         val64 = 0;
1100         for (i = 0; i < config->rx_ring_num; i++) {
1101                 val64 |=
1102                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1103                          3);
1104         }
1105         writeq(val64, &bar0->rx_queue_priority);
1106
1107         /*
1108          * Allocating equal share of memory to all the
1109          * configured Rings.
1110          */
1111         val64 = 0;
1112         if (nic->device_type & XFRAME_II_DEVICE)
1113                 mem_size = 32;
1114         else
1115                 mem_size = 64;
1116
1117         for (i = 0; i < config->rx_ring_num; i++) {
1118                 switch (i) {
1119                 case 0:
1120                         mem_share = (mem_size / config->rx_ring_num +
1121                                      mem_size % config->rx_ring_num);
1122                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1123                         continue;
1124                 case 1:
1125                         mem_share = (mem_size / config->rx_ring_num);
1126                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1127                         continue;
1128                 case 2:
1129                         mem_share = (mem_size / config->rx_ring_num);
1130                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1131                         continue;
1132                 case 3:
1133                         mem_share = (mem_size / config->rx_ring_num);
1134                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1135                         continue;
1136                 case 4:
1137                         mem_share = (mem_size / config->rx_ring_num);
1138                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1139                         continue;
1140                 case 5:
1141                         mem_share = (mem_size / config->rx_ring_num);
1142                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1143                         continue;
1144                 case 6:
1145                         mem_share = (mem_size / config->rx_ring_num);
1146                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1147                         continue;
1148                 case 7:
1149                         mem_share = (mem_size / config->rx_ring_num);
1150                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1151                         continue;
1152                 }
1153         }
1154         writeq(val64, &bar0->rx_queue_cfg);
1155
1156         /*
1157          * Filling Tx round robin registers
1158          * as per the number of FIFOs
1159          */
1160         switch (config->tx_fifo_num) {
1161         case 1:
1162                 val64 = 0x0000000000000000ULL;
1163                 writeq(val64, &bar0->tx_w_round_robin_0);
1164                 writeq(val64, &bar0->tx_w_round_robin_1);
1165                 writeq(val64, &bar0->tx_w_round_robin_2);
1166                 writeq(val64, &bar0->tx_w_round_robin_3);
1167                 writeq(val64, &bar0->tx_w_round_robin_4);
1168                 break;
1169         case 2:
1170                 val64 = 0x0000010000010000ULL;
1171                 writeq(val64, &bar0->tx_w_round_robin_0);
1172                 val64 = 0x0100000100000100ULL;
1173                 writeq(val64, &bar0->tx_w_round_robin_1);
1174                 val64 = 0x0001000001000001ULL;
1175                 writeq(val64, &bar0->tx_w_round_robin_2);
1176                 val64 = 0x0000010000010000ULL;
1177                 writeq(val64, &bar0->tx_w_round_robin_3);
1178                 val64 = 0x0100000000000000ULL;
1179                 writeq(val64, &bar0->tx_w_round_robin_4);
1180                 break;
1181         case 3:
1182                 val64 = 0x0001000102000001ULL;
1183                 writeq(val64, &bar0->tx_w_round_robin_0);
1184                 val64 = 0x0001020000010001ULL;
1185                 writeq(val64, &bar0->tx_w_round_robin_1);
1186                 val64 = 0x0200000100010200ULL;
1187                 writeq(val64, &bar0->tx_w_round_robin_2);
1188                 val64 = 0x0001000102000001ULL;
1189                 writeq(val64, &bar0->tx_w_round_robin_3);
1190                 val64 = 0x0001020000000000ULL;
1191                 writeq(val64, &bar0->tx_w_round_robin_4);
1192                 break;
1193         case 4:
1194                 val64 = 0x0001020300010200ULL;
1195                 writeq(val64, &bar0->tx_w_round_robin_0);
1196                 val64 = 0x0100000102030001ULL;
1197                 writeq(val64, &bar0->tx_w_round_robin_1);
1198                 val64 = 0x0200010000010203ULL;
1199                 writeq(val64, &bar0->tx_w_round_robin_2);
1200                 val64 = 0x0001020001000001ULL;
1201                 writeq(val64, &bar0->tx_w_round_robin_3);
1202                 val64 = 0x0203000100000000ULL;
1203                 writeq(val64, &bar0->tx_w_round_robin_4);
1204                 break;
1205         case 5:
1206                 val64 = 0x0001000203000102ULL;
1207                 writeq(val64, &bar0->tx_w_round_robin_0);
1208                 val64 = 0x0001020001030004ULL;
1209                 writeq(val64, &bar0->tx_w_round_robin_1);
1210                 val64 = 0x0001000203000102ULL;
1211                 writeq(val64, &bar0->tx_w_round_robin_2);
1212                 val64 = 0x0001020001030004ULL;
1213                 writeq(val64, &bar0->tx_w_round_robin_3);
1214                 val64 = 0x0001000000000000ULL;
1215                 writeq(val64, &bar0->tx_w_round_robin_4);
1216                 break;
1217         case 6:
1218                 val64 = 0x0001020304000102ULL;
1219                 writeq(val64, &bar0->tx_w_round_robin_0);
1220                 val64 = 0x0304050001020001ULL;
1221                 writeq(val64, &bar0->tx_w_round_robin_1);
1222                 val64 = 0x0203000100000102ULL;
1223                 writeq(val64, &bar0->tx_w_round_robin_2);
1224                 val64 = 0x0304000102030405ULL;
1225                 writeq(val64, &bar0->tx_w_round_robin_3);
1226                 val64 = 0x0001000200000000ULL;
1227                 writeq(val64, &bar0->tx_w_round_robin_4);
1228                 break;
1229         case 7:
1230                 val64 = 0x0001020001020300ULL;
1231                 writeq(val64, &bar0->tx_w_round_robin_0);
1232                 val64 = 0x0102030400010203ULL;
1233                 writeq(val64, &bar0->tx_w_round_robin_1);
1234                 val64 = 0x0405060001020001ULL;
1235                 writeq(val64, &bar0->tx_w_round_robin_2);
1236                 val64 = 0x0304050000010200ULL;
1237                 writeq(val64, &bar0->tx_w_round_robin_3);
1238                 val64 = 0x0102030000000000ULL;
1239                 writeq(val64, &bar0->tx_w_round_robin_4);
1240                 break;
1241         case 8:
1242                 val64 = 0x0001020300040105ULL;
1243                 writeq(val64, &bar0->tx_w_round_robin_0);
1244                 val64 = 0x0200030106000204ULL;
1245                 writeq(val64, &bar0->tx_w_round_robin_1);
1246                 val64 = 0x0103000502010007ULL;
1247                 writeq(val64, &bar0->tx_w_round_robin_2);
1248                 val64 = 0x0304010002060500ULL;
1249                 writeq(val64, &bar0->tx_w_round_robin_3);
1250                 val64 = 0x0103020400000000ULL;
1251                 writeq(val64, &bar0->tx_w_round_robin_4);
1252                 break;
1253         }
1254
1255         /* Enable all configured Tx FIFO partitions */
1256         val64 = readq(&bar0->tx_fifo_partition_0);
1257         val64 |= (TX_FIFO_PARTITION_EN);
1258         writeq(val64, &bar0->tx_fifo_partition_0);
1259
1260         /* Filling the Rx round robin registers as per the
1261          * number of Rings and steering based on QoS.
1262          */
1263         switch (config->rx_ring_num) {
1264         case 1:
1265                 val64 = 0x8080808080808080ULL;
1266                 writeq(val64, &bar0->rts_qos_steering);
1267                 break;
1268         case 2:
1269                 val64 = 0x0000010000010000ULL;
1270                 writeq(val64, &bar0->rx_w_round_robin_0);
1271                 val64 = 0x0100000100000100ULL;
1272                 writeq(val64, &bar0->rx_w_round_robin_1);
1273                 val64 = 0x0001000001000001ULL;
1274                 writeq(val64, &bar0->rx_w_round_robin_2);
1275                 val64 = 0x0000010000010000ULL;
1276                 writeq(val64, &bar0->rx_w_round_robin_3);
1277                 val64 = 0x0100000000000000ULL;
1278                 writeq(val64, &bar0->rx_w_round_robin_4);
1279
1280                 val64 = 0x8080808040404040ULL;
1281                 writeq(val64, &bar0->rts_qos_steering);
1282                 break;
1283         case 3:
1284                 val64 = 0x0001000102000001ULL;
1285                 writeq(val64, &bar0->rx_w_round_robin_0);
1286                 val64 = 0x0001020000010001ULL;
1287                 writeq(val64, &bar0->rx_w_round_robin_1);
1288                 val64 = 0x0200000100010200ULL;
1289                 writeq(val64, &bar0->rx_w_round_robin_2);
1290                 val64 = 0x0001000102000001ULL;
1291                 writeq(val64, &bar0->rx_w_round_robin_3);
1292                 val64 = 0x0001020000000000ULL;
1293                 writeq(val64, &bar0->rx_w_round_robin_4);
1294
1295                 val64 = 0x8080804040402020ULL;
1296                 writeq(val64, &bar0->rts_qos_steering);
1297                 break;
1298         case 4:
1299                 val64 = 0x0001020300010200ULL;
1300                 writeq(val64, &bar0->rx_w_round_robin_0);
1301                 val64 = 0x0100000102030001ULL;
1302                 writeq(val64, &bar0->rx_w_round_robin_1);
1303                 val64 = 0x0200010000010203ULL;
1304                 writeq(val64, &bar0->rx_w_round_robin_2);
1305                 val64 = 0x0001020001000001ULL;
1306                 writeq(val64, &bar0->rx_w_round_robin_3);
1307                 val64 = 0x0203000100000000ULL;
1308                 writeq(val64, &bar0->rx_w_round_robin_4);
1309
1310                 val64 = 0x8080404020201010ULL;
1311                 writeq(val64, &bar0->rts_qos_steering);
1312                 break;
1313         case 5:
1314                 val64 = 0x0001000203000102ULL;
1315                 writeq(val64, &bar0->rx_w_round_robin_0);
1316                 val64 = 0x0001020001030004ULL;
1317                 writeq(val64, &bar0->rx_w_round_robin_1);
1318                 val64 = 0x0001000203000102ULL;
1319                 writeq(val64, &bar0->rx_w_round_robin_2);
1320                 val64 = 0x0001020001030004ULL;
1321                 writeq(val64, &bar0->rx_w_round_robin_3);
1322                 val64 = 0x0001000000000000ULL;
1323                 writeq(val64, &bar0->rx_w_round_robin_4);
1324
1325                 val64 = 0x8080404020201008ULL;
1326                 writeq(val64, &bar0->rts_qos_steering);
1327                 break;
1328         case 6:
1329                 val64 = 0x0001020304000102ULL;
1330                 writeq(val64, &bar0->rx_w_round_robin_0);
1331                 val64 = 0x0304050001020001ULL;
1332                 writeq(val64, &bar0->rx_w_round_robin_1);
1333                 val64 = 0x0203000100000102ULL;
1334                 writeq(val64, &bar0->rx_w_round_robin_2);
1335                 val64 = 0x0304000102030405ULL;
1336                 writeq(val64, &bar0->rx_w_round_robin_3);
1337                 val64 = 0x0001000200000000ULL;
1338                 writeq(val64, &bar0->rx_w_round_robin_4);
1339
1340                 val64 = 0x8080404020100804ULL;
1341                 writeq(val64, &bar0->rts_qos_steering);
1342                 break;
1343         case 7:
1344                 val64 = 0x0001020001020300ULL;
1345                 writeq(val64, &bar0->rx_w_round_robin_0);
1346                 val64 = 0x0102030400010203ULL;
1347                 writeq(val64, &bar0->rx_w_round_robin_1);
1348                 val64 = 0x0405060001020001ULL;
1349                 writeq(val64, &bar0->rx_w_round_robin_2);
1350                 val64 = 0x0304050000010200ULL;
1351                 writeq(val64, &bar0->rx_w_round_robin_3);
1352                 val64 = 0x0102030000000000ULL;
1353                 writeq(val64, &bar0->rx_w_round_robin_4);
1354
1355                 val64 = 0x8080402010080402ULL;
1356                 writeq(val64, &bar0->rts_qos_steering);
1357                 break;
1358         case 8:
1359                 val64 = 0x0001020300040105ULL;
1360                 writeq(val64, &bar0->rx_w_round_robin_0);
1361                 val64 = 0x0200030106000204ULL;
1362                 writeq(val64, &bar0->rx_w_round_robin_1);
1363                 val64 = 0x0103000502010007ULL;
1364                 writeq(val64, &bar0->rx_w_round_robin_2);
1365                 val64 = 0x0304010002060500ULL;
1366                 writeq(val64, &bar0->rx_w_round_robin_3);
1367                 val64 = 0x0103020400000000ULL;
1368                 writeq(val64, &bar0->rx_w_round_robin_4);
1369
1370                 val64 = 0x8040201008040201ULL;
1371                 writeq(val64, &bar0->rts_qos_steering);
1372                 break;
1373         }
1374
1375         /* UDP Fix */
1376         val64 = 0;
1377         for (i = 0; i < 8; i++)
1378                 writeq(val64, &bar0->rts_frm_len_n[i]);
1379
1380         /* Set the default rts frame length for the rings configured */
1381         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1382         for (i = 0 ; i < config->rx_ring_num ; i++)
1383                 writeq(val64, &bar0->rts_frm_len_n[i]);
1384
1385         /* Set the frame length for the configured rings
1386          * desired by the user
1387          */
1388         for (i = 0; i < config->rx_ring_num; i++) {
1389                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1390                  * specified frame length steering.
1391                  * If the user provides the frame length then program
1392                  * the rts_frm_len register for those values or else
1393                  * leave it as it is.
1394                  */
1395                 if (rts_frm_len[i] != 0) {
1396                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1397                                 &bar0->rts_frm_len_n[i]);
1398                 }
1399         }
1400         
1401         /* Disable differentiated services steering logic */
1402         for (i = 0; i < 64; i++) {
1403                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1404                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1405                                 dev->name);
1406                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1407                         return FAILURE;
1408                 }
1409         }
1410
1411         /* Program statistics memory */
1412         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1413
1414         if (nic->device_type == XFRAME_II_DEVICE) {
1415                 val64 = STAT_BC(0x320);
1416                 writeq(val64, &bar0->stat_byte_cnt);
1417         }
1418
1419         /*
1420          * Initializing the sampling rate for the device to calculate the
1421          * bandwidth utilization.
1422          */
1423         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1424             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1425         writeq(val64, &bar0->mac_link_util);
1426
1427
1428         /*
1429          * Initializing the Transmit and Receive Traffic Interrupt
1430          * Scheme.
1431          */
1432         /*
1433          * TTI Initialization. Default Tx timer gets us about
1434          * 250 interrupts per sec. Continuous interrupts are enabled
1435          * by default.
1436          */
1437         if (nic->device_type == XFRAME_II_DEVICE) {
1438                 int count = (nic->config.bus_speed * 125)/2;
1439                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1440         } else {
1441
1442                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1443         }
1444         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1445             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1446             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1447                 if (use_continuous_tx_intrs)
1448                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1449         writeq(val64, &bar0->tti_data1_mem);
1450
1451         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1452             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1453             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1454         writeq(val64, &bar0->tti_data2_mem);
1455
1456         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1457         writeq(val64, &bar0->tti_command_mem);
1458
1459         /*
1460          * Once the operation completes, the Strobe bit of the command
1461          * register will be reset. We poll for this particular condition
1462          * We wait for a maximum of 500ms for the operation to complete,
1463          * if it's not complete by then we return error.
1464          */
1465         time = 0;
1466         while (TRUE) {
1467                 val64 = readq(&bar0->tti_command_mem);
1468                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1469                         break;
1470                 }
1471                 if (time > 10) {
1472                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1473                                   dev->name);
1474                         return -1;
1475                 }
1476                 msleep(50);
1477                 time++;
1478         }
1479
1480         if (nic->config.bimodal) {
1481                 int k = 0;
1482                 for (k = 0; k < config->rx_ring_num; k++) {
1483                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1484                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1485                         writeq(val64, &bar0->tti_command_mem);
1486
1487                 /*
1488                  * Once the operation completes, the Strobe bit of the command
1489                  * register will be reset. We poll for this particular condition
1490                  * We wait for a maximum of 500ms for the operation to complete,
1491                  * if it's not complete by then we return error.
1492                 */
1493                         time = 0;
1494                         while (TRUE) {
1495                                 val64 = readq(&bar0->tti_command_mem);
1496                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1497                                         break;
1498                                 }
1499                                 if (time > 10) {
1500                                         DBG_PRINT(ERR_DBG,
1501                                                 "%s: TTI init Failed\n",
1502                                         dev->name);
1503                                         return -1;
1504                                 }
1505                                 time++;
1506                                 msleep(50);
1507                         }
1508                 }
1509         } else {
1510
1511                 /* RTI Initialization */
1512                 if (nic->device_type == XFRAME_II_DEVICE) {
1513                         /*
1514                          * Programmed to generate Apprx 500 Intrs per
1515                          * second
1516                          */
1517                         int count = (nic->config.bus_speed * 125)/4;
1518                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1519                 } else {
1520                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1521                 }
1522                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1523                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1524                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1525
1526                 writeq(val64, &bar0->rti_data1_mem);
1527
1528                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1529                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1530                 if (nic->intr_type == MSI_X)
1531                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1532                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1533                 else
1534                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1535                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1536                 writeq(val64, &bar0->rti_data2_mem);
1537
1538                 for (i = 0; i < config->rx_ring_num; i++) {
1539                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1540                                         | RTI_CMD_MEM_OFFSET(i);
1541                         writeq(val64, &bar0->rti_command_mem);
1542
1543                         /*
1544                          * Once the operation completes, the Strobe bit of the
1545                          * command register will be reset. We poll for this
1546                          * particular condition. We wait for a maximum of 500ms
1547                          * for the operation to complete, if it's not complete
1548                          * by then we return error.
1549                          */
1550                         time = 0;
1551                         while (TRUE) {
1552                                 val64 = readq(&bar0->rti_command_mem);
1553                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1554                                         break;
1555                                 }
1556                                 if (time > 10) {
1557                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1558                                                   dev->name);
1559                                         return -1;
1560                                 }
1561                                 time++;
1562                                 msleep(50);
1563                         }
1564                 }
1565         }
1566
1567         /*
1568          * Initializing proper values as Pause threshold into all
1569          * the 8 Queues on Rx side.
1570          */
1571         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1572         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1573
1574         /* Disable RMAC PAD STRIPPING */
1575         add = &bar0->mac_cfg;
1576         val64 = readq(&bar0->mac_cfg);
1577         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1578         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1579         writel((u32) (val64), add);
1580         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1581         writel((u32) (val64 >> 32), (add + 4));
1582         val64 = readq(&bar0->mac_cfg);
1583
1584         /* Enable FCS stripping by adapter */
1585         add = &bar0->mac_cfg;
1586         val64 = readq(&bar0->mac_cfg);
1587         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1588         if (nic->device_type == XFRAME_II_DEVICE)
1589                 writeq(val64, &bar0->mac_cfg);
1590         else {
1591                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1592                 writel((u32) (val64), add);
1593                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1594                 writel((u32) (val64 >> 32), (add + 4));
1595         }
1596
1597         /*
1598          * Set the time value to be inserted in the pause frame
1599          * generated by xena.
1600          */
1601         val64 = readq(&bar0->rmac_pause_cfg);
1602         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1603         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1604         writeq(val64, &bar0->rmac_pause_cfg);
1605
1606         /*
1607          * Set the Threshold Limit for Generating the pause frame
1608          * If the amount of data in any Queue exceeds ratio of
1609          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1610          * pause frame is generated
1611          */
1612         val64 = 0;
1613         for (i = 0; i < 4; i++) {
1614                 val64 |=
1615                     (((u64) 0xFF00 | nic->mac_control.
1616                       mc_pause_threshold_q0q3)
1617                      << (i * 2 * 8));
1618         }
1619         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1620
1621         val64 = 0;
1622         for (i = 0; i < 4; i++) {
1623                 val64 |=
1624                     (((u64) 0xFF00 | nic->mac_control.
1625                       mc_pause_threshold_q4q7)
1626                      << (i * 2 * 8));
1627         }
1628         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1629
1630         /*
1631          * TxDMA will stop Read request if the number of read split has
1632          * exceeded the limit pointed by shared_splits
1633          */
1634         val64 = readq(&bar0->pic_control);
1635         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1636         writeq(val64, &bar0->pic_control);
1637
1638         if (nic->config.bus_speed == 266) {
1639                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1640                 writeq(0x0, &bar0->read_retry_delay);
1641                 writeq(0x0, &bar0->write_retry_delay);
1642         }
1643
1644         /*
1645          * Programming the Herc to split every write transaction
1646          * that does not start on an ADB to reduce disconnects.
1647          */
1648         if (nic->device_type == XFRAME_II_DEVICE) {
1649                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1650                         MISC_LINK_STABILITY_PRD(3);
1651                 writeq(val64, &bar0->misc_control);
1652                 val64 = readq(&bar0->pic_control2);
1653                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1654                 writeq(val64, &bar0->pic_control2);
1655         }
1656         if (strstr(nic->product_name, "CX4")) {
1657                 val64 = TMAC_AVG_IPG(0x17);
1658                 writeq(val64, &bar0->tmac_avg_ipg);
1659         }
1660
1661         return SUCCESS;
1662 }
1663 #define LINK_UP_DOWN_INTERRUPT          1
1664 #define MAC_RMAC_ERR_TIMER              2
1665
1666 static int s2io_link_fault_indication(struct s2io_nic *nic)
1667 {
1668         if (nic->intr_type != INTA)
1669                 return MAC_RMAC_ERR_TIMER;
1670         if (nic->device_type == XFRAME_II_DEVICE)
1671                 return LINK_UP_DOWN_INTERRUPT;
1672         else
1673                 return MAC_RMAC_ERR_TIMER;
1674 }
1675
1676 /**
1677  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1678  *  @nic: device private variable,
1679  *  @mask: A mask indicating which Intr block must be modified and,
1680  *  @flag: A flag indicating whether to enable or disable the Intrs.
1681  *  Description: This function will either disable or enable the interrupts
1682  *  depending on the flag argument. The mask argument can be used to
1683  *  enable/disable any Intr block.
1684  *  Return Value: NONE.
1685  */
1686
1687 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1688 {
1689         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1690         register u64 val64 = 0, temp64 = 0;
1691
1692         /*  Top level interrupt classification */
1693         /*  PIC Interrupts */
1694         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1695                 /*  Enable PIC Intrs in the general intr mask register */
1696                 val64 = TXPIC_INT_M;
1697                 if (flag == ENABLE_INTRS) {
1698                         temp64 = readq(&bar0->general_int_mask);
1699                         temp64 &= ~((u64) val64);
1700                         writeq(temp64, &bar0->general_int_mask);
1701                         /*
1702                          * If Hercules adapter enable GPIO otherwise
1703                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1704                          * interrupts for now.
1705                          * TODO
1706                          */
1707                         if (s2io_link_fault_indication(nic) ==
1708                                         LINK_UP_DOWN_INTERRUPT ) {
1709                                 temp64 = readq(&bar0->pic_int_mask);
1710                                 temp64 &= ~((u64) PIC_INT_GPIO);
1711                                 writeq(temp64, &bar0->pic_int_mask);
1712                                 temp64 = readq(&bar0->gpio_int_mask);
1713                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1714                                 writeq(temp64, &bar0->gpio_int_mask);
1715                         } else {
1716                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1717                         }
1718                         /*
1719                          * No MSI Support is available presently, so TTI and
1720                          * RTI interrupts are also disabled.
1721                          */
1722                 } else if (flag == DISABLE_INTRS) {
1723                         /*
1724                          * Disable PIC Intrs in the general
1725                          * intr mask register
1726                          */
1727                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1728                         temp64 = readq(&bar0->general_int_mask);
1729                         val64 |= temp64;
1730                         writeq(val64, &bar0->general_int_mask);
1731                 }
1732         }
1733
1734         /*  MAC Interrupts */
1735         /*  Enabling/Disabling MAC interrupts */
1736         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1737                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1738                 if (flag == ENABLE_INTRS) {
1739                         temp64 = readq(&bar0->general_int_mask);
1740                         temp64 &= ~((u64) val64);
1741                         writeq(temp64, &bar0->general_int_mask);
1742                         /*
1743                          * All MAC block error interrupts are disabled for now
1744                          * TODO
1745                          */
1746                 } else if (flag == DISABLE_INTRS) {
1747                         /*
1748                          * Disable MAC Intrs in the general intr mask register
1749                          */
1750                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1751                         writeq(DISABLE_ALL_INTRS,
1752                                &bar0->mac_rmac_err_mask);
1753
1754                         temp64 = readq(&bar0->general_int_mask);
1755                         val64 |= temp64;
1756                         writeq(val64, &bar0->general_int_mask);
1757                 }
1758         }
1759
1760         /*  Tx traffic interrupts */
1761         if (mask & TX_TRAFFIC_INTR) {
1762                 val64 = TXTRAFFIC_INT_M;
1763                 if (flag == ENABLE_INTRS) {
1764                         temp64 = readq(&bar0->general_int_mask);
1765                         temp64 &= ~((u64) val64);
1766                         writeq(temp64, &bar0->general_int_mask);
1767                         /*
1768                          * Enable all the Tx side interrupts
1769                          * writing 0 Enables all 64 TX interrupt levels
1770                          */
1771                         writeq(0x0, &bar0->tx_traffic_mask);
1772                 } else if (flag == DISABLE_INTRS) {
1773                         /*
1774                          * Disable Tx Traffic Intrs in the general intr mask
1775                          * register.
1776                          */
1777                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1778                         temp64 = readq(&bar0->general_int_mask);
1779                         val64 |= temp64;
1780                         writeq(val64, &bar0->general_int_mask);
1781                 }
1782         }
1783
1784         /*  Rx traffic interrupts */
1785         if (mask & RX_TRAFFIC_INTR) {
1786                 val64 = RXTRAFFIC_INT_M;
1787                 if (flag == ENABLE_INTRS) {
1788                         temp64 = readq(&bar0->general_int_mask);
1789                         temp64 &= ~((u64) val64);
1790                         writeq(temp64, &bar0->general_int_mask);
1791                         /* writing 0 Enables all 8 RX interrupt levels */
1792                         writeq(0x0, &bar0->rx_traffic_mask);
1793                 } else if (flag == DISABLE_INTRS) {
1794                         /*
1795                          * Disable Rx Traffic Intrs in the general intr mask
1796                          * register.
1797                          */
1798                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1799                         temp64 = readq(&bar0->general_int_mask);
1800                         val64 |= temp64;
1801                         writeq(val64, &bar0->general_int_mask);
1802                 }
1803         }
1804 }
1805
1806 /**
1807  *  verify_pcc_quiescent- Checks for PCC quiescent state
1808  *  Return: 1 If PCC is quiescence
1809  *          0 If PCC is not quiescence
1810  */
1811 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1812 {
1813         int ret = 0, herc;
1814         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1815         u64 val64 = readq(&bar0->adapter_status);
1816         
1817         herc = (sp->device_type == XFRAME_II_DEVICE);
1818
1819         if (flag == FALSE) {
1820                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1821                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1822                                 ret = 1;
1823                 } else {
1824                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1825                                 ret = 1;
1826                 }
1827         } else {
1828                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1829                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1830                              ADAPTER_STATUS_RMAC_PCC_IDLE))
1831                                 ret = 1;
1832                 } else {
1833                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1834                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1835                                 ret = 1;
1836                 }
1837         }
1838
1839         return ret;
1840 }
1841 /**
1842  *  verify_xena_quiescence - Checks whether the H/W is ready
1843  *  Description: Returns whether the H/W is ready to go or not. Depending
1844  *  on whether adapter enable bit was written or not the comparison
1845  *  differs and the calling function passes the input argument flag to
1846  *  indicate this.
1847  *  Return: 1 If xena is quiescence
1848  *          0 If Xena is not quiescence
1849  */
1850
1851 static int verify_xena_quiescence(struct s2io_nic *sp)
1852 {
1853         int  mode;
1854         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1855         u64 val64 = readq(&bar0->adapter_status);
1856         mode = s2io_verify_pci_mode(sp);
1857
1858         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1859                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1860                 return 0;
1861         }
1862         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1863         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1864                 return 0;
1865         }
1866         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1867                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1868                 return 0;
1869         }
1870         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1871                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1872                 return 0;
1873         }
1874         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1875                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1876                 return 0;
1877         }
1878         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1879                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1880                 return 0;
1881         }
1882         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1883                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1884                 return 0;
1885         }
1886         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1887                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1888                 return 0;
1889         }
1890
1891         /*
1892          * In PCI 33 mode, the P_PLL is not used, and therefore,
1893          * the the P_PLL_LOCK bit in the adapter_status register will
1894          * not be asserted.
1895          */
1896         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1897                 sp->device_type == XFRAME_II_DEVICE && mode !=
1898                 PCI_MODE_PCI_33) {
1899                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1900                 return 0;
1901         }
1902         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1903                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1904                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1905                 return 0;
1906         }
1907         return 1;
1908 }
1909
1910 /**
1911  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1912  * @sp: Pointer to device specifc structure
1913  * Description :
1914  * New procedure to clear mac address reading  problems on Alpha platforms
1915  *
1916  */
1917
1918 static void fix_mac_address(struct s2io_nic * sp)
1919 {
1920         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1921         u64 val64;
1922         int i = 0;
1923
1924         while (fix_mac[i] != END_SIGN) {
1925                 writeq(fix_mac[i++], &bar0->gpio_control);
1926                 udelay(10);
1927                 val64 = readq(&bar0->gpio_control);
1928         }
1929 }
1930
1931 /**
1932  *  start_nic - Turns the device on
1933  *  @nic : device private variable.
1934  *  Description:
1935  *  This function actually turns the device on. Before this  function is
1936  *  called,all Registers are configured from their reset states
1937  *  and shared memory is allocated but the NIC is still quiescent. On
1938  *  calling this function, the device interrupts are cleared and the NIC is
1939  *  literally switched on by writing into the adapter control register.
1940  *  Return Value:
1941  *  SUCCESS on success and -1 on failure.
1942  */
1943
1944 static int start_nic(struct s2io_nic *nic)
1945 {
1946         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1947         struct net_device *dev = nic->dev;
1948         register u64 val64 = 0;
1949         u16 subid, i;
1950         struct mac_info *mac_control;
1951         struct config_param *config;
1952
1953         mac_control = &nic->mac_control;
1954         config = &nic->config;
1955
1956         /*  PRC Initialization and configuration */
1957         for (i = 0; i < config->rx_ring_num; i++) {
1958                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1959                        &bar0->prc_rxd0_n[i]);
1960
1961                 val64 = readq(&bar0->prc_ctrl_n[i]);
1962                 if (nic->config.bimodal)
1963                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1964                 if (nic->rxd_mode == RXD_MODE_1)
1965                         val64 |= PRC_CTRL_RC_ENABLED;
1966                 else
1967                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1968                 if (nic->device_type == XFRAME_II_DEVICE)
1969                         val64 |= PRC_CTRL_GROUP_READS;
1970                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1971                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1972                 writeq(val64, &bar0->prc_ctrl_n[i]);
1973         }
1974
1975         if (nic->rxd_mode == RXD_MODE_3B) {
1976                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1977                 val64 = readq(&bar0->rx_pa_cfg);
1978                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1979                 writeq(val64, &bar0->rx_pa_cfg);
1980         }
1981
1982         if (vlan_tag_strip == 0) {
1983                 val64 = readq(&bar0->rx_pa_cfg);
1984                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1985                 writeq(val64, &bar0->rx_pa_cfg);
1986                 vlan_strip_flag = 0;
1987         }
1988
1989         /*
1990          * Enabling MC-RLDRAM. After enabling the device, we timeout
1991          * for around 100ms, which is approximately the time required
1992          * for the device to be ready for operation.
1993          */
1994         val64 = readq(&bar0->mc_rldram_mrs);
1995         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1996         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1997         val64 = readq(&bar0->mc_rldram_mrs);
1998
1999         msleep(100);    /* Delay by around 100 ms. */
2000
2001         /* Enabling ECC Protection. */
2002         val64 = readq(&bar0->adapter_control);
2003         val64 &= ~ADAPTER_ECC_EN;
2004         writeq(val64, &bar0->adapter_control);
2005
2006         /*
2007          * Clearing any possible Link state change interrupts that
2008          * could have popped up just before Enabling the card.
2009          */
2010         val64 = readq(&bar0->mac_rmac_err_reg);
2011         if (val64)
2012                 writeq(val64, &bar0->mac_rmac_err_reg);
2013
2014         /*
2015          * Verify if the device is ready to be enabled, if so enable
2016          * it.
2017          */
2018         val64 = readq(&bar0->adapter_status);
2019         if (!verify_xena_quiescence(nic)) {
2020                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2021                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2022                           (unsigned long long) val64);
2023                 return FAILURE;
2024         }
2025
2026         /*
2027          * With some switches, link might be already up at this point.
2028          * Because of this weird behavior, when we enable laser,
2029          * we may not get link. We need to handle this. We cannot
2030          * figure out which switch is misbehaving. So we are forced to
2031          * make a global change.
2032          */
2033
2034         /* Enabling Laser. */
2035         val64 = readq(&bar0->adapter_control);
2036         val64 |= ADAPTER_EOI_TX_ON;
2037         writeq(val64, &bar0->adapter_control);
2038
2039         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2040                 /*
2041                  * Dont see link state interrupts initally on some switches,
2042                  * so directly scheduling the link state task here.
2043                  */
2044                 schedule_work(&nic->set_link_task);
2045         }
2046         /* SXE-002: Initialize link and activity LED */
2047         subid = nic->pdev->subsystem_device;
2048         if (((subid & 0xFF) >= 0x07) &&
2049             (nic->device_type == XFRAME_I_DEVICE)) {
2050                 val64 = readq(&bar0->gpio_control);
2051                 val64 |= 0x0000800000000000ULL;
2052                 writeq(val64, &bar0->gpio_control);
2053                 val64 = 0x0411040400000000ULL;
2054                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2055         }
2056
2057         return SUCCESS;
2058 }
2059 /**
2060  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2061  */
2062 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2063                                         TxD *txdlp, int get_off)
2064 {
2065         struct s2io_nic *nic = fifo_data->nic;
2066         struct sk_buff *skb;
2067         struct TxD *txds;
2068         u16 j, frg_cnt;
2069
2070         txds = txdlp;
2071         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2072                 pci_unmap_single(nic->pdev, (dma_addr_t)
2073                         txds->Buffer_Pointer, sizeof(u64),
2074                         PCI_DMA_TODEVICE);
2075                 txds++;
2076         }
2077
2078         skb = (struct sk_buff *) ((unsigned long)
2079                         txds->Host_Control);
2080         if (!skb) {
2081                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2082                 return NULL;
2083         }
2084         pci_unmap_single(nic->pdev, (dma_addr_t)
2085                          txds->Buffer_Pointer,
2086                          skb->len - skb->data_len,
2087                          PCI_DMA_TODEVICE);
2088         frg_cnt = skb_shinfo(skb)->nr_frags;
2089         if (frg_cnt) {
2090                 txds++;
2091                 for (j = 0; j < frg_cnt; j++, txds++) {
2092                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2093                         if (!txds->Buffer_Pointer)
2094                                 break;
2095                         pci_unmap_page(nic->pdev, (dma_addr_t)
2096                                         txds->Buffer_Pointer,
2097                                        frag->size, PCI_DMA_TODEVICE);
2098                 }
2099         }
2100         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2101         return(skb);
2102 }
2103
2104 /**
2105  *  free_tx_buffers - Free all queued Tx buffers
2106  *  @nic : device private variable.
2107  *  Description:
2108  *  Free all queued Tx buffers.
2109  *  Return Value: void
2110 */
2111
2112 static void free_tx_buffers(struct s2io_nic *nic)
2113 {
2114         struct net_device *dev = nic->dev;
2115         struct sk_buff *skb;
2116         struct TxD *txdp;
2117         int i, j;
2118         struct mac_info *mac_control;
2119         struct config_param *config;
2120         int cnt = 0;
2121
2122         mac_control = &nic->mac_control;
2123         config = &nic->config;
2124
2125         for (i = 0; i < config->tx_fifo_num; i++) {
2126                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2127                         txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2128                             list_virt_addr;
2129                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2130                         if (skb) {
2131                                 dev_kfree_skb(skb);
2132                                 cnt++;
2133                         }
2134                 }
2135                 DBG_PRINT(INTR_DBG,
2136                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2137                           dev->name, cnt, i);
2138                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2139                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2140         }
2141 }
2142
2143 /**
2144  *   stop_nic -  To stop the nic
2145  *   @nic ; device private variable.
2146  *   Description:
2147  *   This function does exactly the opposite of what the start_nic()
2148  *   function does. This function is called to stop the device.
2149  *   Return Value:
2150  *   void.
2151  */
2152
2153 static void stop_nic(struct s2io_nic *nic)
2154 {
2155         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2156         register u64 val64 = 0;
2157         u16 interruptible;
2158         struct mac_info *mac_control;
2159         struct config_param *config;
2160
2161         mac_control = &nic->mac_control;
2162         config = &nic->config;
2163
2164         /*  Disable all interrupts */
2165         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2166         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2167         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2168         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2169
2170         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2171         val64 = readq(&bar0->adapter_control);
2172         val64 &= ~(ADAPTER_CNTL_EN);
2173         writeq(val64, &bar0->adapter_control);
2174 }
2175
2176 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2177                                 sk_buff *skb)
2178 {
2179         struct net_device *dev = nic->dev;
2180         struct sk_buff *frag_list;
2181         void *tmp;
2182
2183         /* Buffer-1 receives L3/L4 headers */
2184         ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2185                         (nic->pdev, skb->data, l3l4hdr_size + 4,
2186                         PCI_DMA_FROMDEVICE);
2187
2188         /* skb_shinfo(skb)->frag_list will have L4 data payload */
2189         skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2190         if (skb_shinfo(skb)->frag_list == NULL) {
2191                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2192                 return -ENOMEM ;
2193         }
2194         frag_list = skb_shinfo(skb)->frag_list;
2195         skb->truesize += frag_list->truesize;
2196         frag_list->next = NULL;
2197         tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2198         frag_list->data = tmp;
2199         frag_list->tail = tmp;
2200
2201         /* Buffer-2 receives L4 data payload */
2202         ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2203                                 frag_list->data, dev->mtu,
2204                                 PCI_DMA_FROMDEVICE);
2205         rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2206         rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2207
2208         return SUCCESS;
2209 }
2210
2211 /**
2212  *  fill_rx_buffers - Allocates the Rx side skbs
2213  *  @nic:  device private variable
2214  *  @ring_no: ring number
2215  *  Description:
2216  *  The function allocates Rx side skbs and puts the physical
2217  *  address of these buffers into the RxD buffer pointers, so that the NIC
2218  *  can DMA the received frame into these locations.
2219  *  The NIC supports 3 receive modes, viz
2220  *  1. single buffer,
2221  *  2. three buffer and
2222  *  3. Five buffer modes.
2223  *  Each mode defines how many fragments the received frame will be split
2224  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2225  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2226  *  is split into 3 fragments. As of now only single buffer mode is
2227  *  supported.
2228  *   Return Value:
2229  *  SUCCESS on success or an appropriate -ve value on failure.
2230  */
2231
2232 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2233 {
2234         struct net_device *dev = nic->dev;
2235         struct sk_buff *skb;
2236         struct RxD_t *rxdp;
2237         int off, off1, size, block_no, block_no1;
2238         u32 alloc_tab = 0;
2239         u32 alloc_cnt;
2240         struct mac_info *mac_control;
2241         struct config_param *config;
2242         u64 tmp;
2243         struct buffAdd *ba;
2244         unsigned long flags;
2245         struct RxD_t *first_rxdp = NULL;
2246
2247         mac_control = &nic->mac_control;
2248         config = &nic->config;
2249         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2250             atomic_read(&nic->rx_bufs_left[ring_no]);
2251
2252         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2253         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2254         while (alloc_tab < alloc_cnt) {
2255                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2256                     block_index;
2257                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2258
2259                 rxdp = mac_control->rings[ring_no].
2260                                 rx_blocks[block_no].rxds[off].virt_addr;
2261
2262                 if ((block_no == block_no1) && (off == off1) &&
2263                                         (rxdp->Host_Control)) {
2264                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2265                                   dev->name);
2266                         DBG_PRINT(INTR_DBG, " info equated\n");
2267                         goto end;
2268                 }
2269                 if (off && (off == rxd_count[nic->rxd_mode])) {
2270                         mac_control->rings[ring_no].rx_curr_put_info.
2271                             block_index++;
2272                         if (mac_control->rings[ring_no].rx_curr_put_info.
2273                             block_index == mac_control->rings[ring_no].
2274                                         block_count)
2275                                 mac_control->rings[ring_no].rx_curr_put_info.
2276                                         block_index = 0;
2277                         block_no = mac_control->rings[ring_no].
2278                                         rx_curr_put_info.block_index;
2279                         if (off == rxd_count[nic->rxd_mode])
2280                                 off = 0;
2281                         mac_control->rings[ring_no].rx_curr_put_info.
2282                                 offset = off;
2283                         rxdp = mac_control->rings[ring_no].
2284                                 rx_blocks[block_no].block_virt_addr;
2285                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2286                                   dev->name, rxdp);
2287                 }
2288                 if(!napi) {
2289                         spin_lock_irqsave(&nic->put_lock, flags);
2290                         mac_control->rings[ring_no].put_pos =
2291                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2292                         spin_unlock_irqrestore(&nic->put_lock, flags);
2293                 } else {
2294                         mac_control->rings[ring_no].put_pos =
2295                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2296                 }
2297                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2298                         ((nic->rxd_mode >= RXD_MODE_3A) &&
2299                                 (rxdp->Control_2 & BIT(0)))) {
2300                         mac_control->rings[ring_no].rx_curr_put_info.
2301                                         offset = off;
2302                         goto end;
2303                 }
2304                 /* calculate size of skb based on ring mode */
2305                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2306                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2307                 if (nic->rxd_mode == RXD_MODE_1)
2308                         size += NET_IP_ALIGN;
2309                 else if (nic->rxd_mode == RXD_MODE_3B)
2310                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2311                 else
2312                         size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2313
2314                 /* allocate skb */
2315                 skb = dev_alloc_skb(size);
2316                 if(!skb) {
2317                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2318                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2319                         if (first_rxdp) {
2320                                 wmb();
2321                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2322                         }
2323                         return -ENOMEM ;
2324                 }
2325                 if (nic->rxd_mode == RXD_MODE_1) {
2326                         /* 1 buffer mode - normal operation mode */
2327                         memset(rxdp, 0, sizeof(struct RxD1));
2328                         skb_reserve(skb, NET_IP_ALIGN);
2329                         ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2330                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2331                                 PCI_DMA_FROMDEVICE);
2332                         rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2333
2334                 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2335                         /*
2336                          * 2 or 3 buffer mode -
2337                          * Both 2 buffer mode and 3 buffer mode provides 128
2338                          * byte aligned receive buffers.
2339                          *
2340                          * 3 buffer mode provides header separation where in
2341                          * skb->data will have L3/L4 headers where as
2342                          * skb_shinfo(skb)->frag_list will have the L4 data
2343                          * payload
2344                          */
2345
2346                         memset(rxdp, 0, sizeof(struct RxD3));
2347                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2348                         skb_reserve(skb, BUF0_LEN);
2349                         tmp = (u64)(unsigned long) skb->data;
2350                         tmp += ALIGN_SIZE;
2351                         tmp &= ~ALIGN_SIZE;
2352                         skb->data = (void *) (unsigned long)tmp;
2353                         skb->tail = (void *) (unsigned long)tmp;
2354
2355                         if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2356                                 ((struct RxD3*)rxdp)->Buffer0_ptr =
2357                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2358                                            PCI_DMA_FROMDEVICE);
2359                         else
2360                                 pci_dma_sync_single_for_device(nic->pdev,
2361                                     (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2362                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2363                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2364                         if (nic->rxd_mode == RXD_MODE_3B) {
2365                                 /* Two buffer mode */
2366
2367                                 /*
2368                                  * Buffer2 will have L3/L4 header plus
2369                                  * L4 payload
2370                                  */
2371                                 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2372                                 (nic->pdev, skb->data, dev->mtu + 4,
2373                                                 PCI_DMA_FROMDEVICE);
2374
2375                                 /* Buffer-1 will be dummy buffer. Not used */
2376                                 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2377                                         ((struct RxD3*)rxdp)->Buffer1_ptr =
2378                                                 pci_map_single(nic->pdev,
2379                                                 ba->ba_1, BUF1_LEN,
2380                                                 PCI_DMA_FROMDEVICE);
2381                                 }
2382                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2383                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2384                                                                 (dev->mtu + 4);
2385                         } else {
2386                                 /* 3 buffer mode */
2387                                 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2388                                         dev_kfree_skb_irq(skb);
2389                                         if (first_rxdp) {
2390                                                 wmb();
2391                                                 first_rxdp->Control_1 |=
2392                                                         RXD_OWN_XENA;
2393                                         }
2394                                         return -ENOMEM ;
2395                                 }
2396                         }
2397                         rxdp->Control_2 |= BIT(0);
2398                 }
2399                 rxdp->Host_Control = (unsigned long) (skb);
2400                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2401                         rxdp->Control_1 |= RXD_OWN_XENA;
2402                 off++;
2403                 if (off == (rxd_count[nic->rxd_mode] + 1))
2404                         off = 0;
2405                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2406
2407                 rxdp->Control_2 |= SET_RXD_MARKER;
2408                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2409                         if (first_rxdp) {
2410                                 wmb();
2411                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2412                         }
2413                         first_rxdp = rxdp;
2414                 }
2415                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2416                 alloc_tab++;
2417         }
2418
2419       end:
2420         /* Transfer ownership of first descriptor to adapter just before
2421          * exiting. Before that, use memory barrier so that ownership
2422          * and other fields are seen by adapter correctly.
2423          */
2424         if (first_rxdp) {
2425                 wmb();
2426                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2427         }
2428
2429         return SUCCESS;
2430 }
2431
2432 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2433 {
2434         struct net_device *dev = sp->dev;
2435         int j;
2436         struct sk_buff *skb;
2437         struct RxD_t *rxdp;
2438         struct mac_info *mac_control;
2439         struct buffAdd *ba;
2440
2441         mac_control = &sp->mac_control;
2442         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2443                 rxdp = mac_control->rings[ring_no].
2444                                 rx_blocks[blk].rxds[j].virt_addr;
2445                 skb = (struct sk_buff *)
2446                         ((unsigned long) rxdp->Host_Control);
2447                 if (!skb) {
2448                         continue;
2449                 }
2450                 if (sp->rxd_mode == RXD_MODE_1) {
2451                         pci_unmap_single(sp->pdev, (dma_addr_t)
2452                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2453                                  dev->mtu +
2454                                  HEADER_ETHERNET_II_802_3_SIZE
2455                                  + HEADER_802_2_SIZE +
2456                                  HEADER_SNAP_SIZE,
2457                                  PCI_DMA_FROMDEVICE);
2458                         memset(rxdp, 0, sizeof(struct RxD1));
2459                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2460                         ba = &mac_control->rings[ring_no].
2461                                 ba[blk][j];
2462                         pci_unmap_single(sp->pdev, (dma_addr_t)
2463                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2464                                  BUF0_LEN,
2465                                  PCI_DMA_FROMDEVICE);
2466                         pci_unmap_single(sp->pdev, (dma_addr_t)
2467                                  ((struct RxD3*)rxdp)->Buffer1_ptr,
2468                                  BUF1_LEN,
2469                                  PCI_DMA_FROMDEVICE);
2470                         pci_unmap_single(sp->pdev, (dma_addr_t)
2471                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2472                                  dev->mtu + 4,
2473                                  PCI_DMA_FROMDEVICE);
2474                         memset(rxdp, 0, sizeof(struct RxD3));
2475                 } else {
2476                         pci_unmap_single(sp->pdev, (dma_addr_t)
2477                                 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2478                                 PCI_DMA_FROMDEVICE);
2479                         pci_unmap_single(sp->pdev, (dma_addr_t)
2480                                 ((struct RxD3*)rxdp)->Buffer1_ptr,
2481                                 l3l4hdr_size + 4,
2482                                 PCI_DMA_FROMDEVICE);
2483                         pci_unmap_single(sp->pdev, (dma_addr_t)
2484                                 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2485                                 PCI_DMA_FROMDEVICE);
2486                         memset(rxdp, 0, sizeof(struct RxD3));
2487                 }
2488                 dev_kfree_skb(skb);
2489                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2490         }
2491 }
2492
2493 /**
2494  *  free_rx_buffers - Frees all Rx buffers
2495  *  @sp: device private variable.
2496  *  Description:
2497  *  This function will free all Rx buffers allocated by host.
2498  *  Return Value:
2499  *  NONE.
2500  */
2501
2502 static void free_rx_buffers(struct s2io_nic *sp)
2503 {
2504         struct net_device *dev = sp->dev;
2505         int i, blk = 0, buf_cnt = 0;
2506         struct mac_info *mac_control;
2507         struct config_param *config;
2508
2509         mac_control = &sp->mac_control;
2510         config = &sp->config;
2511
2512         for (i = 0; i < config->rx_ring_num; i++) {
2513                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2514                         free_rxd_blk(sp,i,blk);
2515
2516                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2517                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2518                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2519                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2520                 atomic_set(&sp->rx_bufs_left[i], 0);
2521                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2522                           dev->name, buf_cnt, i);
2523         }
2524 }
2525
2526 /**
2527  * s2io_poll - Rx interrupt handler for NAPI support
2528  * @dev : pointer to the device structure.
2529  * @budget : The number of packets that were budgeted to be processed
2530  * during  one pass through the 'Poll" function.
2531  * Description:
2532  * Comes into picture only if NAPI support has been incorporated. It does
2533  * the same thing that rx_intr_handler does, but not in a interrupt context
2534  * also It will process only a given number of packets.
2535  * Return value:
2536  * 0 on success and 1 if there are No Rx packets to be processed.
2537  */
2538
2539 static int s2io_poll(struct net_device *dev, int *budget)
2540 {
2541         struct s2io_nic *nic = dev->priv;
2542         int pkt_cnt = 0, org_pkts_to_process;
2543         struct mac_info *mac_control;
2544         struct config_param *config;
2545         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2546         int i;
2547
2548         atomic_inc(&nic->isr_cnt);
2549         mac_control = &nic->mac_control;
2550         config = &nic->config;
2551
2552         nic->pkts_to_process = *budget;
2553         if (nic->pkts_to_process > dev->quota)
2554                 nic->pkts_to_process = dev->quota;
2555         org_pkts_to_process = nic->pkts_to_process;
2556
2557         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2558         readl(&bar0->rx_traffic_int);
2559
2560         for (i = 0; i < config->rx_ring_num; i++) {
2561                 rx_intr_handler(&mac_control->rings[i]);
2562                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2563                 if (!nic->pkts_to_process) {
2564                         /* Quota for the current iteration has been met */
2565                         goto no_rx;
2566                 }
2567         }
2568         if (!pkt_cnt)
2569                 pkt_cnt = 1;
2570
2571         dev->quota -= pkt_cnt;
2572         *budget -= pkt_cnt;
2573         netif_rx_complete(dev);
2574
2575         for (i = 0; i < config->rx_ring_num; i++) {
2576                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2577                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2578                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2579                         break;
2580                 }
2581         }
2582         /* Re enable the Rx interrupts. */
2583         writeq(0x0, &bar0->rx_traffic_mask);
2584         readl(&bar0->rx_traffic_mask);
2585         atomic_dec(&nic->isr_cnt);
2586         return 0;
2587
2588 no_rx:
2589         dev->quota -= pkt_cnt;
2590         *budget -= pkt_cnt;
2591
2592         for (i = 0; i < config->rx_ring_num; i++) {
2593                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2594                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2595                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2596                         break;
2597                 }
2598         }
2599         atomic_dec(&nic->isr_cnt);
2600         return 1;
2601 }
2602
2603 #ifdef CONFIG_NET_POLL_CONTROLLER
2604 /**
2605  * s2io_netpoll - netpoll event handler entry point
2606  * @dev : pointer to the device structure.
2607  * Description:
2608  *      This function will be called by upper layer to check for events on the
2609  * interface in situations where interrupts are disabled. It is used for
2610  * specific in-kernel networking tasks, such as remote consoles and kernel
2611  * debugging over the network (example netdump in RedHat).
2612  */
2613 static void s2io_netpoll(struct net_device *dev)
2614 {
2615         struct s2io_nic *nic = dev->priv;
2616         struct mac_info *mac_control;
2617         struct config_param *config;
2618         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2619         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2620         int i;
2621
2622         disable_irq(dev->irq);
2623
2624         atomic_inc(&nic->isr_cnt);
2625         mac_control = &nic->mac_control;
2626         config = &nic->config;
2627
2628         writeq(val64, &bar0->rx_traffic_int);
2629         writeq(val64, &bar0->tx_traffic_int);
2630
2631         /* we need to free up the transmitted skbufs or else netpoll will
2632          * run out of skbs and will fail and eventually netpoll application such
2633          * as netdump will fail.
2634          */
2635         for (i = 0; i < config->tx_fifo_num; i++)
2636                 tx_intr_handler(&mac_control->fifos[i]);
2637
2638         /* check for received packet and indicate up to network */
2639         for (i = 0; i < config->rx_ring_num; i++)
2640                 rx_intr_handler(&mac_control->rings[i]);
2641
2642         for (i = 0; i < config->rx_ring_num; i++) {
2643                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2644                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2645                         DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2646                         break;
2647                 }
2648         }
2649         atomic_dec(&nic->isr_cnt);
2650         enable_irq(dev->irq);
2651         return;
2652 }
2653 #endif
2654
2655 /**
2656  *  rx_intr_handler - Rx interrupt handler
2657  *  @nic: device private variable.
2658  *  Description:
2659  *  If the interrupt is because of a received frame or if the
2660  *  receive ring contains fresh as yet un-processed frames,this function is
2661  *  called. It picks out the RxD at which place the last Rx processing had
2662  *  stopped and sends the skb to the OSM's Rx handler and then increments
2663  *  the offset.
2664  *  Return Value:
2665  *  NONE.
2666  */
2667 static void rx_intr_handler(struct ring_info *ring_data)
2668 {
2669         struct s2io_nic *nic = ring_data->nic;
2670         struct net_device *dev = (struct net_device *) nic->dev;
2671         int get_block, put_block, put_offset;
2672         struct rx_curr_get_info get_info, put_info;
2673         struct RxD_t *rxdp;
2674         struct sk_buff *skb;
2675         int pkt_cnt = 0;
2676         int i;
2677
2678         spin_lock(&nic->rx_lock);
2679         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2680                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2681                           __FUNCTION__, dev->name);
2682                 spin_unlock(&nic->rx_lock);
2683                 return;
2684         }
2685
2686         get_info = ring_data->rx_curr_get_info;
2687         get_block = get_info.block_index;
2688         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2689         put_block = put_info.block_index;
2690         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2691         if (!napi) {
2692                 spin_lock(&nic->put_lock);
2693                 put_offset = ring_data->put_pos;
2694                 spin_unlock(&nic->put_lock);
2695         } else
2696                 put_offset = ring_data->put_pos;
2697
2698         while (RXD_IS_UP2DT(rxdp)) {
2699                 /*
2700                  * If your are next to put index then it's
2701                  * FIFO full condition
2702                  */
2703                 if ((get_block == put_block) &&
2704                     (get_info.offset + 1) == put_info.offset) {
2705                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2706                         break;
2707                 }
2708                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2709                 if (skb == NULL) {
2710                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2711                                   dev->name);
2712                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2713                         spin_unlock(&nic->rx_lock);
2714                         return;
2715                 }
2716                 if (nic->rxd_mode == RXD_MODE_1) {
2717                         pci_unmap_single(nic->pdev, (dma_addr_t)
2718                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2719                                  dev->mtu +
2720                                  HEADER_ETHERNET_II_802_3_SIZE +
2721                                  HEADER_802_2_SIZE +
2722                                  HEADER_SNAP_SIZE,
2723                                  PCI_DMA_FROMDEVICE);
2724                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2725                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2726                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2727                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2728                         pci_unmap_single(nic->pdev, (dma_addr_t)
2729                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2730                                  dev->mtu + 4,
2731                                  PCI_DMA_FROMDEVICE);
2732                 } else {
2733                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2734                                          ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2735                                          PCI_DMA_FROMDEVICE);
2736                         pci_unmap_single(nic->pdev, (dma_addr_t)
2737                                          ((struct RxD3*)rxdp)->Buffer1_ptr,
2738                                          l3l4hdr_size + 4,
2739                                          PCI_DMA_FROMDEVICE);
2740                         pci_unmap_single(nic->pdev, (dma_addr_t)
2741                                          ((struct RxD3*)rxdp)->Buffer2_ptr,
2742                                          dev->mtu, PCI_DMA_FROMDEVICE);
2743                 }
2744                 prefetch(skb->data);
2745                 rx_osm_handler(ring_data, rxdp);
2746                 get_info.offset++;
2747                 ring_data->rx_curr_get_info.offset = get_info.offset;
2748                 rxdp = ring_data->rx_blocks[get_block].
2749                                 rxds[get_info.offset].virt_addr;
2750                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2751                         get_info.offset = 0;
2752                         ring_data->rx_curr_get_info.offset = get_info.offset;
2753                         get_block++;
2754                         if (get_block == ring_data->block_count)
2755                                 get_block = 0;
2756                         ring_data->rx_curr_get_info.block_index = get_block;
2757                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2758                 }
2759
2760                 nic->pkts_to_process -= 1;
2761                 if ((napi) && (!nic->pkts_to_process))
2762                         break;
2763                 pkt_cnt++;
2764                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2765                         break;
2766         }
2767         if (nic->lro) {
2768                 /* Clear all LRO sessions before exiting */
2769                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2770                         struct lro *lro = &nic->lro0_n[i];
2771                         if (lro->in_use) {
2772                                 update_L3L4_header(nic, lro);
2773                                 queue_rx_frame(lro->parent);
2774                                 clear_lro_session(lro);
2775                         }
2776                 }
2777         }
2778
2779         spin_unlock(&nic->rx_lock);
2780 }
2781
2782 /**
2783  *  tx_intr_handler - Transmit interrupt handler
2784  *  @nic : device private variable
2785  *  Description:
2786  *  If an interrupt was raised to indicate DMA complete of the
2787  *  Tx packet, this function is called. It identifies the last TxD
2788  *  whose buffer was freed and frees all skbs whose data have already
2789  *  DMA'ed into the NICs internal memory.
2790  *  Return Value:
2791  *  NONE
2792  */
2793
2794 static void tx_intr_handler(struct fifo_info *fifo_data)
2795 {
2796         struct s2io_nic *nic = fifo_data->nic;
2797         struct net_device *dev = (struct net_device *) nic->dev;
2798         struct tx_curr_get_info get_info, put_info;
2799         struct sk_buff *skb;
2800         struct TxD *txdlp;
2801
2802         get_info = fifo_data->tx_curr_get_info;
2803         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2804         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2805             list_virt_addr;
2806         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2807                (get_info.offset != put_info.offset) &&
2808                (txdlp->Host_Control)) {
2809                 /* Check for TxD errors */
2810                 if (txdlp->Control_1 & TXD_T_CODE) {
2811                         unsigned long long err;
2812                         err = txdlp->Control_1 & TXD_T_CODE;
2813                         if (err & 0x1) {
2814                                 nic->mac_control.stats_info->sw_stat.
2815                                                 parity_err_cnt++;
2816                         }
2817                         if ((err >> 48) == 0xA) {
2818                                 DBG_PRINT(TX_DBG, "TxD returned due \
2819                                                 to loss of link\n");
2820                         }
2821                         else {
2822                                 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2823                         }
2824                 }
2825
2826                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2827                 if (skb == NULL) {
2828                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2829                         __FUNCTION__);
2830                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2831                         return;
2832                 }
2833
2834                 /* Updating the statistics block */
2835                 nic->stats.tx_bytes += skb->len;
2836                 dev_kfree_skb_irq(skb);
2837
2838                 get_info.offset++;
2839                 if (get_info.offset == get_info.fifo_len + 1)
2840                         get_info.offset = 0;
2841                 txdlp = (struct TxD *) fifo_data->list_info
2842                     [get_info.offset].list_virt_addr;
2843                 fifo_data->tx_curr_get_info.offset =
2844                     get_info.offset;
2845         }
2846
2847         spin_lock(&nic->tx_lock);
2848         if (netif_queue_stopped(dev))
2849                 netif_wake_queue(dev);
2850         spin_unlock(&nic->tx_lock);
2851 }
2852
2853 /**
2854  *  s2io_mdio_write - Function to write in to MDIO registers
2855  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2856  *  @addr     : address value
2857  *  @value    : data value
2858  *  @dev      : pointer to net_device structure
2859  *  Description:
2860  *  This function is used to write values to the MDIO registers
2861  *  NONE
2862  */
2863 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2864 {
2865         u64 val64 = 0x0;
2866         struct s2io_nic *sp = dev->priv;
2867         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2868
2869         //address transaction
2870         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2871                         | MDIO_MMD_DEV_ADDR(mmd_type)
2872                         | MDIO_MMS_PRT_ADDR(0x0);
2873         writeq(val64, &bar0->mdio_control);
2874         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2875         writeq(val64, &bar0->mdio_control);
2876         udelay(100);
2877
2878         //Data transaction
2879         val64 = 0x0;
2880         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2881                         | MDIO_MMD_DEV_ADDR(mmd_type)
2882                         | MDIO_MMS_PRT_ADDR(0x0)
2883                         | MDIO_MDIO_DATA(value)
2884                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
2885         writeq(val64, &bar0->mdio_control);
2886         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2887         writeq(val64, &bar0->mdio_control);
2888         udelay(100);
2889
2890         val64 = 0x0;
2891         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2892         | MDIO_MMD_DEV_ADDR(mmd_type)
2893         | MDIO_MMS_PRT_ADDR(0x0)
2894         | MDIO_OP(MDIO_OP_READ_TRANS);
2895         writeq(val64, &bar0->mdio_control);
2896         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2897         writeq(val64, &bar0->mdio_control);
2898         udelay(100);
2899
2900 }
2901
2902 /**
2903  *  s2io_mdio_read - Function to write in to MDIO registers
2904  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2905  *  @addr     : address value
2906  *  @dev      : pointer to net_device structure
2907  *  Description:
2908  *  This function is used to read values to the MDIO registers
2909  *  NONE
2910  */
2911 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2912 {
2913         u64 val64 = 0x0;
2914         u64 rval64 = 0x0;
2915         struct s2io_nic *sp = dev->priv;
2916         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2917
2918         /* address transaction */
2919         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2920                         | MDIO_MMD_DEV_ADDR(mmd_type)
2921                         | MDIO_MMS_PRT_ADDR(0x0);
2922         writeq(val64, &bar0->mdio_control);
2923         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2924         writeq(val64, &bar0->mdio_control);
2925         udelay(100);
2926
2927         /* Data transaction */
2928         val64 = 0x0;
2929         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2930                         | MDIO_MMD_DEV_ADDR(mmd_type)
2931                         | MDIO_MMS_PRT_ADDR(0x0)
2932                         | MDIO_OP(MDIO_OP_READ_TRANS);
2933         writeq(val64, &bar0->mdio_control);
2934         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2935         writeq(val64, &bar0->mdio_control);
2936         udelay(100);
2937
2938         /* Read the value from regs */
2939         rval64 = readq(&bar0->mdio_control);
2940         rval64 = rval64 & 0xFFFF0000;
2941         rval64 = rval64 >> 16;
2942         return rval64;
2943 }
2944 /**
2945  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
2946  *  @counter      : couter value to be updated
2947  *  @flag         : flag to indicate the status
2948  *  @type         : counter type
2949  *  Description:
2950  *  This function is to check the status of the xpak counters value
2951  *  NONE
2952  */
2953
2954 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2955 {
2956         u64 mask = 0x3;
2957         u64 val64;
2958         int i;
2959         for(i = 0; i <index; i++)
2960                 mask = mask << 0x2;
2961
2962         if(flag > 0)
2963         {
2964                 *counter = *counter + 1;
2965                 val64 = *regs_stat & mask;
2966                 val64 = val64 >> (index * 0x2);
2967                 val64 = val64 + 1;
2968                 if(val64 == 3)
2969                 {
2970                         switch(type)
2971                         {
2972                         case 1:
2973                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2974                                           "service. Excessive temperatures may "
2975                                           "result in premature transceiver "
2976                                           "failure \n");
2977                         break;
2978                         case 2:
2979                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2980                                           "service Excessive bias currents may "
2981                                           "indicate imminent laser diode "
2982                                           "failure \n");
2983                         break;
2984                         case 3:
2985                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2986                                           "service Excessive laser output "
2987                                           "power may saturate far-end "
2988                                           "receiver\n");
2989                         break;
2990                         default:
2991                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2992                                           "type \n");
2993                         }
2994                         val64 = 0x0;
2995                 }
2996                 val64 = val64 << (index * 0x2);
2997                 *regs_stat = (*regs_stat & (~mask)) | (val64);
2998
2999         } else {
3000                 *regs_stat = *regs_stat & (~mask);
3001         }
3002 }
3003
3004 /**
3005  *  s2io_updt_xpak_counter - Function to update the xpak counters
3006  *  @dev         : pointer to net_device struct
3007  *  Description:
3008  *  This function is to upate the status of the xpak counters value
3009  *  NONE
3010  */
3011 static void s2io_updt_xpak_counter(struct net_device *dev)
3012 {
3013         u16 flag  = 0x0;
3014         u16 type  = 0x0;
3015         u16 val16 = 0x0;
3016         u64 val64 = 0x0;
3017         u64 addr  = 0x0;
3018
3019         struct s2io_nic *sp = dev->priv;
3020         struct stat_block *stat_info = sp->mac_control.stats_info;
3021
3022         /* Check the communication with the MDIO slave */
3023         addr = 0x0000;
3024         val64 = 0x0;
3025         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3026         if((val64 == 0xFFFF) || (val64 == 0x0000))
3027         {
3028                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3029                           "Returned %llx\n", (unsigned long long)val64);
3030                 return;
3031         }
3032
3033         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3034         if(val64 != 0x2040)
3035         {
3036                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3037                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3038                           (unsigned long long)val64);
3039                 return;
3040         }
3041
3042         /* Loading the DOM register to MDIO register */
3043         addr = 0xA100;
3044         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3045         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3046
3047         /* Reading the Alarm flags */
3048         addr = 0xA070;
3049         val64 = 0x0;
3050         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3051
3052         flag = CHECKBIT(val64, 0x7);
3053         type = 1;
3054         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3055                                 &stat_info->xpak_stat.xpak_regs_stat,
3056                                 0x0, flag, type);
3057
3058         if(CHECKBIT(val64, 0x6))
3059                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3060
3061         flag = CHECKBIT(val64, 0x3);
3062         type = 2;
3063         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3064                                 &stat_info->xpak_stat.xpak_regs_stat,
3065                                 0x2, flag, type);
3066
3067         if(CHECKBIT(val64, 0x2))
3068                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3069
3070         flag = CHECKBIT(val64, 0x1);
3071         type = 3;
3072         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3073                                 &stat_info->xpak_stat.xpak_regs_stat,
3074                                 0x4, flag, type);
3075
3076         if(CHECKBIT(val64, 0x0))
3077                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3078
3079         /* Reading the Warning flags */
3080         addr = 0xA074;
3081         val64 = 0x0;
3082         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3083
3084         if(CHECKBIT(val64, 0x7))
3085                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3086
3087         if(CHECKBIT(val64, 0x6))
3088                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3089
3090         if(CHECKBIT(val64, 0x3))
3091                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3092
3093         if(CHECKBIT(val64, 0x2))
3094                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3095
3096         if(CHECKBIT(val64, 0x1))
3097                 stat_info->xpak_stat.warn_laser_output_power_high++;
3098
3099         if(CHECKBIT(val64, 0x0))
3100                 stat_info->xpak_stat.warn_laser_output_power_low++;
3101 }
3102
3103 /**
3104  *  alarm_intr_handler - Alarm Interrrupt handler
3105  *  @nic: device private variable
3106  *  Description: If the interrupt was neither because of Rx packet or Tx
3107  *  complete, this function is called. If the interrupt was to indicate
3108  *  a loss of link, the OSM link status handler is invoked for any other
3109  *  alarm interrupt the block that raised the interrupt is displayed
3110  *  and a H/W reset is issued.
3111  *  Return Value:
3112  *  NONE
3113 */
3114
3115 static void alarm_intr_handler(struct s2io_nic *nic)
3116 {
3117         struct net_device *dev = (struct net_device *) nic->dev;
3118         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3119         register u64 val64 = 0, err_reg = 0;
3120         u64 cnt;
3121         int i;
3122         if (atomic_read(&nic->card_state) == CARD_DOWN)
3123                 return;
3124         nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3125         /* Handling the XPAK counters update */
3126         if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3127                 /* waiting for an hour */
3128                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;