be2net: Fix error recovery paths
[linux-2.6.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static ushort rx_frag_size = 2048;
31 static unsigned int num_vfs;
32 module_param(rx_frag_size, ushort, S_IRUGO);
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static const char * const ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static const char * const ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 /* Is BE in a multi-channel mode */
119 static inline bool be_is_mc(struct be_adapter *adapter) {
120         return (adapter->function_mode & FLEX10_MODE ||
121                 adapter->function_mode & VNIC_MODE ||
122                 adapter->function_mode & UMC_ENABLED);
123 }
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va)
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131 }
132
133 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134                 u16 len, u16 entry_size)
135 {
136         struct be_dma_mem *mem = &q->dma_mem;
137
138         memset(q, 0, sizeof(*q));
139         q->len = len;
140         q->entry_size = entry_size;
141         mem->size = len * entry_size;
142         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143                                      GFP_KERNEL);
144         if (!mem->va)
145                 return -1;
146         memset(mem->va, 0, mem->size);
147         return 0;
148 }
149
150 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 {
152         u32 reg, enabled;
153
154         if (adapter->eeh_err)
155                 return;
156
157         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158                                 &reg);
159         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
161         if (!enabled && enable)
162                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else if (enabled && !enable)
164                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else
166                 return;
167
168         pci_write_config_dword(adapter->pdev,
169                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170 }
171
172 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 {
174         u32 val = 0;
175         val |= qid & DB_RQ_RING_ID_MASK;
176         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
177
178         wmb();
179         iowrite32(val, adapter->db + DB_RQ_OFFSET);
180 }
181
182 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
183 {
184         u32 val = 0;
185         val |= qid & DB_TXULP_RING_ID_MASK;
186         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
187
188         wmb();
189         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
190 }
191
192 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193                 bool arm, bool clear_int, u16 num_popped)
194 {
195         u32 val = 0;
196         val |= qid & DB_EQ_RING_ID_MASK;
197         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_EQ_REARM_SHIFT;
205         if (clear_int)
206                 val |= 1 << DB_EQ_CLR_SHIFT;
207         val |= 1 << DB_EQ_EVNT_SHIFT;
208         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209         iowrite32(val, adapter->db + DB_EQ_OFFSET);
210 }
211
212 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_CQ_RING_ID_MASK;
216         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_err)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_CQ_REARM_SHIFT;
224         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_CQ_OFFSET);
226 }
227
228 static int be_mac_addr_set(struct net_device *netdev, void *p)
229 {
230         struct be_adapter *adapter = netdev_priv(netdev);
231         struct sockaddr *addr = p;
232         int status = 0;
233         u8 current_mac[ETH_ALEN];
234         u32 pmac_id = adapter->pmac_id;
235
236         if (!is_valid_ether_addr(addr->sa_data))
237                 return -EADDRNOTAVAIL;
238
239         status = be_cmd_mac_addr_query(adapter, current_mac,
240                                 MAC_ADDRESS_TYPE_NETWORK, false,
241                                 adapter->if_handle, 0);
242         if (status)
243                 goto err;
244
245         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
247                                 adapter->if_handle, &adapter->pmac_id, 0);
248                 if (status)
249                         goto err;
250
251                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252         }
253         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254         return 0;
255 err:
256         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
257         return status;
258 }
259
260 static void populate_be2_stats(struct be_adapter *adapter)
261 {
262         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
265         struct be_port_rxf_stats_v0 *port_stats =
266                                         &rxf_stats->port[adapter->port_num];
267         struct be_drv_stats *drvs = &adapter->drv_stats;
268
269         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
270         drvs->rx_pause_frames = port_stats->rx_pause_frames;
271         drvs->rx_crc_errors = port_stats->rx_crc_errors;
272         drvs->rx_control_frames = port_stats->rx_control_frames;
273         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
284         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
285         drvs->rx_dropped_header_too_small =
286                 port_stats->rx_dropped_header_too_small;
287         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
288         drvs->rx_alignment_symbol_errors =
289                 port_stats->rx_alignment_symbol_errors;
290
291         drvs->tx_pauseframes = port_stats->tx_pauseframes;
292         drvs->tx_controlframes = port_stats->tx_controlframes;
293
294         if (adapter->port_num)
295                 drvs->jabber_events = rxf_stats->port1_jabber_events;
296         else
297                 drvs->jabber_events = rxf_stats->port0_jabber_events;
298         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302         drvs->forwarded_packets = rxf_stats->forwarded_packets;
303         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
304         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
306         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307 }
308
309 static void populate_be3_stats(struct be_adapter *adapter)
310 {
311         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
314         struct be_port_rxf_stats_v1 *port_stats =
315                                         &rxf_stats->port[adapter->port_num];
316         struct be_drv_stats *drvs = &adapter->drv_stats;
317
318         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
319         drvs->rx_pause_frames = port_stats->rx_pause_frames;
320         drvs->rx_crc_errors = port_stats->rx_crc_errors;
321         drvs->rx_control_frames = port_stats->rx_control_frames;
322         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332         drvs->rx_dropped_header_too_small =
333                 port_stats->rx_dropped_header_too_small;
334         drvs->rx_input_fifo_overflow_drop =
335                 port_stats->rx_input_fifo_overflow_drop;
336         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
337         drvs->rx_alignment_symbol_errors =
338                 port_stats->rx_alignment_symbol_errors;
339         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
340         drvs->tx_pauseframes = port_stats->tx_pauseframes;
341         drvs->tx_controlframes = port_stats->tx_controlframes;
342         drvs->jabber_events = port_stats->jabber_events;
343         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347         drvs->forwarded_packets = rxf_stats->forwarded_packets;
348         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
351         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352 }
353
354 static void populate_lancer_stats(struct be_adapter *adapter)
355 {
356
357         struct be_drv_stats *drvs = &adapter->drv_stats;
358         struct lancer_pport_stats *pport_stats =
359                                         pport_stats_from_cmd(adapter);
360
361         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
365         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
366         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
367         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371         drvs->rx_dropped_tcp_length =
372                                 pport_stats->rx_dropped_invalid_tcp_length;
373         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376         drvs->rx_dropped_header_too_small =
377                                 pport_stats->rx_dropped_header_too_small;
378         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
380         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
381         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
382         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
384         drvs->jabber_events = pport_stats->rx_jabbers;
385         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
386         drvs->forwarded_packets = pport_stats->num_forwards_lo;
387         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
388         drvs->rx_drops_too_many_frags =
389                                 pport_stats->rx_drops_too_many_frags_lo;
390 }
391
392 static void accumulate_16bit_val(u32 *acc, u16 val)
393 {
394 #define lo(x)                   (x & 0xFFFF)
395 #define hi(x)                   (x & 0xFFFF0000)
396         bool wrapped = val < lo(*acc);
397         u32 newacc = hi(*acc) + val;
398
399         if (wrapped)
400                 newacc += 65536;
401         ACCESS_ONCE(*acc) = newacc;
402 }
403
404 void be_parse_stats(struct be_adapter *adapter)
405 {
406         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407         struct be_rx_obj *rxo;
408         int i;
409
410         if (adapter->generation == BE_GEN3) {
411                 if (lancer_chip(adapter))
412                         populate_lancer_stats(adapter);
413                  else
414                         populate_be3_stats(adapter);
415         } else {
416                 populate_be2_stats(adapter);
417         }
418
419         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
420         for_all_rx_queues(adapter, rxo, i) {
421                 /* below erx HW counter can actually wrap around after
422                  * 65535. Driver accumulates a 32-bit value
423                  */
424                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426         }
427 }
428
429 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430                                         struct rtnl_link_stats64 *stats)
431 {
432         struct be_adapter *adapter = netdev_priv(netdev);
433         struct be_drv_stats *drvs = &adapter->drv_stats;
434         struct be_rx_obj *rxo;
435         struct be_tx_obj *txo;
436         u64 pkts, bytes;
437         unsigned int start;
438         int i;
439
440         for_all_rx_queues(adapter, rxo, i) {
441                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442                 do {
443                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444                         pkts = rx_stats(rxo)->rx_pkts;
445                         bytes = rx_stats(rxo)->rx_bytes;
446                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447                 stats->rx_packets += pkts;
448                 stats->rx_bytes += bytes;
449                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451                                         rx_stats(rxo)->rx_drops_no_frags;
452         }
453
454         for_all_tx_queues(adapter, txo, i) {
455                 const struct be_tx_stats *tx_stats = tx_stats(txo);
456                 do {
457                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458                         pkts = tx_stats(txo)->tx_pkts;
459                         bytes = tx_stats(txo)->tx_bytes;
460                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461                 stats->tx_packets += pkts;
462                 stats->tx_bytes += bytes;
463         }
464
465         /* bad pkts received */
466         stats->rx_errors = drvs->rx_crc_errors +
467                 drvs->rx_alignment_symbol_errors +
468                 drvs->rx_in_range_errors +
469                 drvs->rx_out_range_errors +
470                 drvs->rx_frame_too_long +
471                 drvs->rx_dropped_too_small +
472                 drvs->rx_dropped_too_short +
473                 drvs->rx_dropped_header_too_small +
474                 drvs->rx_dropped_tcp_length +
475                 drvs->rx_dropped_runt;
476
477         /* detailed rx errors */
478         stats->rx_length_errors = drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long;
481
482         stats->rx_crc_errors = drvs->rx_crc_errors;
483
484         /* frame alignment errors */
485         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
486
487         /* receiver fifo overrun */
488         /* drops_no_pbuf is no per i/f, it's per BE card */
489         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
490                                 drvs->rx_input_fifo_overflow_drop +
491                                 drvs->rx_drops_no_pbuf;
492         return stats;
493 }
494
495 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
496 {
497         struct net_device *netdev = adapter->netdev;
498
499         /* when link status changes, link speed must be re-queried from card */
500         adapter->link_speed = -1;
501         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502                 netif_carrier_on(netdev);
503                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504         } else {
505                 netif_carrier_off(netdev);
506                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
507         }
508 }
509
510 static void be_tx_stats_update(struct be_tx_obj *txo,
511                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
512 {
513         struct be_tx_stats *stats = tx_stats(txo);
514
515         u64_stats_update_begin(&stats->sync);
516         stats->tx_reqs++;
517         stats->tx_wrbs += wrb_cnt;
518         stats->tx_bytes += copied;
519         stats->tx_pkts += (gso_segs ? gso_segs : 1);
520         if (stopped)
521                 stats->tx_stops++;
522         u64_stats_update_end(&stats->sync);
523 }
524
525 /* Determine number of WRB entries needed to xmit data in an skb */
526 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527                                                                 bool *dummy)
528 {
529         int cnt = (skb->len > skb->data_len);
530
531         cnt += skb_shinfo(skb)->nr_frags;
532
533         /* to account for hdr wrb */
534         cnt++;
535         if (lancer_chip(adapter) || !(cnt & 1)) {
536                 *dummy = false;
537         } else {
538                 /* add a dummy to make it an even num */
539                 cnt++;
540                 *dummy = true;
541         }
542         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543         return cnt;
544 }
545
546 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547 {
548         wrb->frag_pa_hi = upper_32_bits(addr);
549         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551 }
552
553 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
555 {
556         u8 vlan_prio = 0;
557         u16 vlan_tag = 0;
558
559         memset(hdr, 0, sizeof(*hdr));
560
561         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
563         if (skb_is_gso(skb)) {
564                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566                         hdr, skb_shinfo(skb)->gso_size);
567                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
568                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
569                 if (lancer_chip(adapter) && adapter->sli_family  ==
570                                                         LANCER_A0_SLI_FAMILY) {
571                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572                         if (is_tcp_pkt(skb))
573                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574                                                                 tcpcs, hdr, 1);
575                         else if (is_udp_pkt(skb))
576                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577                                                                 udpcs, hdr, 1);
578                 }
579         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580                 if (is_tcp_pkt(skb))
581                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582                 else if (is_udp_pkt(skb))
583                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584         }
585
586         if (vlan_tx_tag_present(skb)) {
587                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
588                 vlan_tag = vlan_tx_tag_get(skb);
589                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590                 /* If vlan priority provided by OS is NOT in available bmap */
591                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593                                         adapter->recommended_prio;
594                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
595         }
596
597         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601 }
602
603 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
604                 bool unmap_single)
605 {
606         dma_addr_t dma;
607
608         be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
611         if (wrb->frag_len) {
612                 if (unmap_single)
613                         dma_unmap_single(dev, dma, wrb->frag_len,
614                                          DMA_TO_DEVICE);
615                 else
616                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
617         }
618 }
619
620 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
621                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622 {
623         dma_addr_t busaddr;
624         int i, copied = 0;
625         struct device *dev = &adapter->pdev->dev;
626         struct sk_buff *first_skb = skb;
627         struct be_eth_wrb *wrb;
628         struct be_eth_hdr_wrb *hdr;
629         bool map_single = false;
630         u16 map_head;
631
632         hdr = queue_head_node(txq);
633         queue_head_inc(txq);
634         map_head = txq->head;
635
636         if (skb->len > skb->data_len) {
637                 int len = skb_headlen(skb);
638                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639                 if (dma_mapping_error(dev, busaddr))
640                         goto dma_err;
641                 map_single = true;
642                 wrb = queue_head_node(txq);
643                 wrb_fill(wrb, busaddr, len);
644                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645                 queue_head_inc(txq);
646                 copied += len;
647         }
648
649         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
650                 const struct skb_frag_struct *frag =
651                         &skb_shinfo(skb)->frags[i];
652                 busaddr = skb_frag_dma_map(dev, frag, 0,
653                                            skb_frag_size(frag), DMA_TO_DEVICE);
654                 if (dma_mapping_error(dev, busaddr))
655                         goto dma_err;
656                 wrb = queue_head_node(txq);
657                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
658                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659                 queue_head_inc(txq);
660                 copied += skb_frag_size(frag);
661         }
662
663         if (dummy_wrb) {
664                 wrb = queue_head_node(txq);
665                 wrb_fill(wrb, 0, 0);
666                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667                 queue_head_inc(txq);
668         }
669
670         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
671         be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673         return copied;
674 dma_err:
675         txq->head = map_head;
676         while (copied) {
677                 wrb = queue_head_node(txq);
678                 unmap_tx_frag(dev, wrb, map_single);
679                 map_single = false;
680                 copied -= wrb->frag_len;
681                 queue_head_inc(txq);
682         }
683         return 0;
684 }
685
686 static netdev_tx_t be_xmit(struct sk_buff *skb,
687                         struct net_device *netdev)
688 {
689         struct be_adapter *adapter = netdev_priv(netdev);
690         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691         struct be_queue_info *txq = &txo->q;
692         u32 wrb_cnt = 0, copied = 0;
693         u32 start = txq->head;
694         bool dummy_wrb, stopped = false;
695
696         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
697
698         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
699         if (copied) {
700                 /* record the sent skb in the sent_skb table */
701                 BUG_ON(txo->sent_skb_list[start]);
702                 txo->sent_skb_list[start] = skb;
703
704                 /* Ensure txq has space for the next skb; Else stop the queue
705                  * *BEFORE* ringing the tx doorbell, so that we serialze the
706                  * tx compls of the current transmit which'll wake up the queue
707                  */
708                 atomic_add(wrb_cnt, &txq->used);
709                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710                                                                 txq->len) {
711                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
712                         stopped = true;
713                 }
714
715                 be_txq_notify(adapter, txq->id, wrb_cnt);
716
717                 be_tx_stats_update(txo, wrb_cnt, copied,
718                                 skb_shinfo(skb)->gso_segs, stopped);
719         } else {
720                 txq->head = start;
721                 dev_kfree_skb_any(skb);
722         }
723         return NETDEV_TX_OK;
724 }
725
726 static int be_change_mtu(struct net_device *netdev, int new_mtu)
727 {
728         struct be_adapter *adapter = netdev_priv(netdev);
729         if (new_mtu < BE_MIN_MTU ||
730                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731                                         (ETH_HLEN + ETH_FCS_LEN))) {
732                 dev_info(&adapter->pdev->dev,
733                         "MTU must be between %d and %d bytes\n",
734                         BE_MIN_MTU,
735                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
736                 return -EINVAL;
737         }
738         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739                         netdev->mtu, new_mtu);
740         netdev->mtu = new_mtu;
741         return 0;
742 }
743
744 /*
745  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746  * If the user configures more, place BE in vlan promiscuous mode.
747  */
748 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
749 {
750         u16 vtag[BE_NUM_VLANS_SUPPORTED];
751         u16 ntags = 0, i;
752         int status = 0;
753         u32 if_handle;
754
755         if (vf) {
756                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759         }
760
761         /* No need to further configure vids if in promiscuous mode */
762         if (adapter->promiscuous)
763                 return 0;
764
765         if (adapter->vlans_added <= adapter->max_vlans)  {
766                 /* Construct VLAN Table to give to HW */
767                 for (i = 0; i < VLAN_N_VID; i++) {
768                         if (adapter->vlan_tag[i]) {
769                                 vtag[ntags] = cpu_to_le16(i);
770                                 ntags++;
771                         }
772                 }
773                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774                                         vtag, ntags, 1, 0);
775         } else {
776                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777                                         NULL, 0, 1, 1);
778         }
779
780         return status;
781 }
782
783 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784 {
785         struct be_adapter *adapter = netdev_priv(netdev);
786
787         adapter->vlans_added++;
788         if (!be_physfn(adapter))
789                 return;
790
791         adapter->vlan_tag[vid] = 1;
792         if (adapter->vlans_added <= (adapter->max_vlans + 1))
793                 be_vid_config(adapter, false, 0);
794 }
795
796 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799
800         adapter->vlans_added--;
801
802         if (!be_physfn(adapter))
803                 return;
804
805         adapter->vlan_tag[vid] = 0;
806         if (adapter->vlans_added <= adapter->max_vlans)
807                 be_vid_config(adapter, false, 0);
808 }
809
810 static void be_set_rx_mode(struct net_device *netdev)
811 {
812         struct be_adapter *adapter = netdev_priv(netdev);
813
814         if (netdev->flags & IFF_PROMISC) {
815                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
816                 adapter->promiscuous = true;
817                 goto done;
818         }
819
820         /* BE was previously in promiscuous mode; disable it */
821         if (adapter->promiscuous) {
822                 adapter->promiscuous = false;
823                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
824
825                 if (adapter->vlans_added)
826                         be_vid_config(adapter, false, 0);
827         }
828
829         /* Enable multicast promisc if num configured exceeds what we support */
830         if (netdev->flags & IFF_ALLMULTI ||
831                         netdev_mc_count(netdev) > BE_MAX_MC) {
832                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
833                 goto done;
834         }
835
836         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
837 done:
838         return;
839 }
840
841 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842 {
843         struct be_adapter *adapter = netdev_priv(netdev);
844         int status;
845
846         if (!adapter->sriov_enabled)
847                 return -EPERM;
848
849         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850                 return -EINVAL;
851
852         if (lancer_chip(adapter)) {
853                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
854         } else {
855                 status = be_cmd_pmac_del(adapter,
856                                 adapter->vf_cfg[vf].vf_if_handle,
857                                 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
858
859                 status = be_cmd_pmac_add(adapter, mac,
860                                 adapter->vf_cfg[vf].vf_if_handle,
861                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
862         }
863
864         if (status)
865                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
866                                 mac, vf);
867         else
868                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
869
870         return status;
871 }
872
873 static int be_get_vf_config(struct net_device *netdev, int vf,
874                         struct ifla_vf_info *vi)
875 {
876         struct be_adapter *adapter = netdev_priv(netdev);
877
878         if (!adapter->sriov_enabled)
879                 return -EPERM;
880
881         if (vf >= num_vfs)
882                 return -EINVAL;
883
884         vi->vf = vf;
885         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
886         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
887         vi->qos = 0;
888         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
889
890         return 0;
891 }
892
893 static int be_set_vf_vlan(struct net_device *netdev,
894                         int vf, u16 vlan, u8 qos)
895 {
896         struct be_adapter *adapter = netdev_priv(netdev);
897         int status = 0;
898
899         if (!adapter->sriov_enabled)
900                 return -EPERM;
901
902         if ((vf >= num_vfs) || (vlan > 4095))
903                 return -EINVAL;
904
905         if (vlan) {
906                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
907                 adapter->vlans_added++;
908         } else {
909                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
910                 adapter->vlans_added--;
911         }
912
913         status = be_vid_config(adapter, true, vf);
914
915         if (status)
916                 dev_info(&adapter->pdev->dev,
917                                 "VLAN %d config on VF %d failed\n", vlan, vf);
918         return status;
919 }
920
921 static int be_set_vf_tx_rate(struct net_device *netdev,
922                         int vf, int rate)
923 {
924         struct be_adapter *adapter = netdev_priv(netdev);
925         int status = 0;
926
927         if (!adapter->sriov_enabled)
928                 return -EPERM;
929
930         if ((vf >= num_vfs) || (rate < 0))
931                 return -EINVAL;
932
933         if (rate > 10000)
934                 rate = 10000;
935
936         adapter->vf_cfg[vf].vf_tx_rate = rate;
937         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
938
939         if (status)
940                 dev_info(&adapter->pdev->dev,
941                                 "tx rate %d on VF %d failed\n", rate, vf);
942         return status;
943 }
944
945 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
946 {
947         struct be_eq_obj *rx_eq = &rxo->rx_eq;
948         struct be_rx_stats *stats = rx_stats(rxo);
949         ulong now = jiffies;
950         ulong delta = now - stats->rx_jiffies;
951         u64 pkts;
952         unsigned int start, eqd;
953
954         if (!rx_eq->enable_aic)
955                 return;
956
957         /* Wrapped around */
958         if (time_before(now, stats->rx_jiffies)) {
959                 stats->rx_jiffies = now;
960                 return;
961         }
962
963         /* Update once a second */
964         if (delta < HZ)
965                 return;
966
967         do {
968                 start = u64_stats_fetch_begin_bh(&stats->sync);
969                 pkts = stats->rx_pkts;
970         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
971
972         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
973         stats->rx_pkts_prev = pkts;
974         stats->rx_jiffies = now;
975         eqd = stats->rx_pps / 110000;
976         eqd = eqd << 3;
977         if (eqd > rx_eq->max_eqd)
978                 eqd = rx_eq->max_eqd;
979         if (eqd < rx_eq->min_eqd)
980                 eqd = rx_eq->min_eqd;
981         if (eqd < 10)
982                 eqd = 0;
983         if (eqd != rx_eq->cur_eqd) {
984                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
985                 rx_eq->cur_eqd = eqd;
986         }
987 }
988
989 static void be_rx_stats_update(struct be_rx_obj *rxo,
990                 struct be_rx_compl_info *rxcp)
991 {
992         struct be_rx_stats *stats = rx_stats(rxo);
993
994         u64_stats_update_begin(&stats->sync);
995         stats->rx_compl++;
996         stats->rx_bytes += rxcp->pkt_size;
997         stats->rx_pkts++;
998         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
999                 stats->rx_mcast_pkts++;
1000         if (rxcp->err)
1001                 stats->rx_compl_err++;
1002         u64_stats_update_end(&stats->sync);
1003 }
1004
1005 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1006 {
1007         /* L4 checksum is not reliable for non TCP/UDP packets.
1008          * Also ignore ipcksm for ipv6 pkts */
1009         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1010                                 (rxcp->ip_csum || rxcp->ipv6);
1011 }
1012
1013 static struct be_rx_page_info *
1014 get_rx_page_info(struct be_adapter *adapter,
1015                 struct be_rx_obj *rxo,
1016                 u16 frag_idx)
1017 {
1018         struct be_rx_page_info *rx_page_info;
1019         struct be_queue_info *rxq = &rxo->q;
1020
1021         rx_page_info = &rxo->page_info_tbl[frag_idx];
1022         BUG_ON(!rx_page_info->page);
1023
1024         if (rx_page_info->last_page_user) {
1025                 dma_unmap_page(&adapter->pdev->dev,
1026                                dma_unmap_addr(rx_page_info, bus),
1027                                adapter->big_page_size, DMA_FROM_DEVICE);
1028                 rx_page_info->last_page_user = false;
1029         }
1030
1031         atomic_dec(&rxq->used);
1032         return rx_page_info;
1033 }
1034
1035 /* Throwaway the data in the Rx completion */
1036 static void be_rx_compl_discard(struct be_adapter *adapter,
1037                 struct be_rx_obj *rxo,
1038                 struct be_rx_compl_info *rxcp)
1039 {
1040         struct be_queue_info *rxq = &rxo->q;
1041         struct be_rx_page_info *page_info;
1042         u16 i, num_rcvd = rxcp->num_rcvd;
1043
1044         for (i = 0; i < num_rcvd; i++) {
1045                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1046                 put_page(page_info->page);
1047                 memset(page_info, 0, sizeof(*page_info));
1048                 index_inc(&rxcp->rxq_idx, rxq->len);
1049         }
1050 }
1051
1052 /*
1053  * skb_fill_rx_data forms a complete skb for an ether frame
1054  * indicated by rxcp.
1055  */
1056 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1057                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1058 {
1059         struct be_queue_info *rxq = &rxo->q;
1060         struct be_rx_page_info *page_info;
1061         u16 i, j;
1062         u16 hdr_len, curr_frag_len, remaining;
1063         u8 *start;
1064
1065         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1066         start = page_address(page_info->page) + page_info->page_offset;
1067         prefetch(start);
1068
1069         /* Copy data in the first descriptor of this completion */
1070         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1071
1072         /* Copy the header portion into skb_data */
1073         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1074         memcpy(skb->data, start, hdr_len);
1075         skb->len = curr_frag_len;
1076         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1077                 /* Complete packet has now been moved to data */
1078                 put_page(page_info->page);
1079                 skb->data_len = 0;
1080                 skb->tail += curr_frag_len;
1081         } else {
1082                 skb_shinfo(skb)->nr_frags = 1;
1083                 skb_frag_set_page(skb, 0, page_info->page);
1084                 skb_shinfo(skb)->frags[0].page_offset =
1085                                         page_info->page_offset + hdr_len;
1086                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1087                 skb->data_len = curr_frag_len - hdr_len;
1088                 skb->truesize += rx_frag_size;
1089                 skb->tail += hdr_len;
1090         }
1091         page_info->page = NULL;
1092
1093         if (rxcp->pkt_size <= rx_frag_size) {
1094                 BUG_ON(rxcp->num_rcvd != 1);
1095                 return;
1096         }
1097
1098         /* More frags present for this completion */
1099         index_inc(&rxcp->rxq_idx, rxq->len);
1100         remaining = rxcp->pkt_size - curr_frag_len;
1101         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1102                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1103                 curr_frag_len = min(remaining, rx_frag_size);
1104
1105                 /* Coalesce all frags from the same physical page in one slot */
1106                 if (page_info->page_offset == 0) {
1107                         /* Fresh page */
1108                         j++;
1109                         skb_frag_set_page(skb, j, page_info->page);
1110                         skb_shinfo(skb)->frags[j].page_offset =
1111                                                         page_info->page_offset;
1112                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1113                         skb_shinfo(skb)->nr_frags++;
1114                 } else {
1115                         put_page(page_info->page);
1116                 }
1117
1118                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1119                 skb->len += curr_frag_len;
1120                 skb->data_len += curr_frag_len;
1121                 skb->truesize += rx_frag_size;
1122                 remaining -= curr_frag_len;
1123                 index_inc(&rxcp->rxq_idx, rxq->len);
1124                 page_info->page = NULL;
1125         }
1126         BUG_ON(j > MAX_SKB_FRAGS);
1127 }
1128
1129 /* Process the RX completion indicated by rxcp when GRO is disabled */
1130 static void be_rx_compl_process(struct be_adapter *adapter,
1131                         struct be_rx_obj *rxo,
1132                         struct be_rx_compl_info *rxcp)
1133 {
1134         struct net_device *netdev = adapter->netdev;
1135         struct sk_buff *skb;
1136
1137         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1138         if (unlikely(!skb)) {
1139                 rx_stats(rxo)->rx_drops_no_skbs++;
1140                 be_rx_compl_discard(adapter, rxo, rxcp);
1141                 return;
1142         }
1143
1144         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1145
1146         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1147                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1148         else
1149                 skb_checksum_none_assert(skb);
1150
1151         skb->protocol = eth_type_trans(skb, netdev);
1152         if (adapter->netdev->features & NETIF_F_RXHASH)
1153                 skb->rxhash = rxcp->rss_hash;
1154
1155
1156         if (rxcp->vlanf)
1157                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1158
1159         netif_receive_skb(skb);
1160 }
1161
1162 /* Process the RX completion indicated by rxcp when GRO is enabled */
1163 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1164                 struct be_rx_obj *rxo,
1165                 struct be_rx_compl_info *rxcp)
1166 {
1167         struct be_rx_page_info *page_info;
1168         struct sk_buff *skb = NULL;
1169         struct be_queue_info *rxq = &rxo->q;
1170         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1171         u16 remaining, curr_frag_len;
1172         u16 i, j;
1173
1174         skb = napi_get_frags(&eq_obj->napi);
1175         if (!skb) {
1176                 be_rx_compl_discard(adapter, rxo, rxcp);
1177                 return;
1178         }
1179
1180         remaining = rxcp->pkt_size;
1181         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1182                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1183
1184                 curr_frag_len = min(remaining, rx_frag_size);
1185
1186                 /* Coalesce all frags from the same physical page in one slot */
1187                 if (i == 0 || page_info->page_offset == 0) {
1188                         /* First frag or Fresh page */
1189                         j++;
1190                         skb_frag_set_page(skb, j, page_info->page);
1191                         skb_shinfo(skb)->frags[j].page_offset =
1192                                                         page_info->page_offset;
1193                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1194                 } else {
1195                         put_page(page_info->page);
1196                 }
1197                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1198                 skb->truesize += rx_frag_size;
1199                 remaining -= curr_frag_len;
1200                 index_inc(&rxcp->rxq_idx, rxq->len);
1201                 memset(page_info, 0, sizeof(*page_info));
1202         }
1203         BUG_ON(j > MAX_SKB_FRAGS);
1204
1205         skb_shinfo(skb)->nr_frags = j + 1;
1206         skb->len = rxcp->pkt_size;
1207         skb->data_len = rxcp->pkt_size;
1208         skb->ip_summed = CHECKSUM_UNNECESSARY;
1209         if (adapter->netdev->features & NETIF_F_RXHASH)
1210                 skb->rxhash = rxcp->rss_hash;
1211
1212         if (rxcp->vlanf)
1213                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1214
1215         napi_gro_frags(&eq_obj->napi);
1216 }
1217
1218 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1219                                 struct be_eth_rx_compl *compl,
1220                                 struct be_rx_compl_info *rxcp)
1221 {
1222         rxcp->pkt_size =
1223                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1224         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1225         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1226         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1227         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1228         rxcp->ip_csum =
1229                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1230         rxcp->l4_csum =
1231                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1232         rxcp->ipv6 =
1233                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1234         rxcp->rxq_idx =
1235                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1236         rxcp->num_rcvd =
1237                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1238         rxcp->pkt_type =
1239                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1240         rxcp->rss_hash =
1241                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1242         if (rxcp->vlanf) {
1243                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1244                                           compl);
1245                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1246                                                compl);
1247         }
1248         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1249 }
1250
1251 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1252                                 struct be_eth_rx_compl *compl,
1253                                 struct be_rx_compl_info *rxcp)
1254 {
1255         rxcp->pkt_size =
1256                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1257         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1258         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1259         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1260         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1261         rxcp->ip_csum =
1262                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1263         rxcp->l4_csum =
1264                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1265         rxcp->ipv6 =
1266                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1267         rxcp->rxq_idx =
1268                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1269         rxcp->num_rcvd =
1270                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1271         rxcp->pkt_type =
1272                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1273         rxcp->rss_hash =
1274                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1275         if (rxcp->vlanf) {
1276                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1277                                           compl);
1278                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1279                                                compl);
1280         }
1281         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1282 }
1283
1284 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1285 {
1286         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1287         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1288         struct be_adapter *adapter = rxo->adapter;
1289
1290         /* For checking the valid bit it is Ok to use either definition as the
1291          * valid bit is at the same position in both v0 and v1 Rx compl */
1292         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1293                 return NULL;
1294
1295         rmb();
1296         be_dws_le_to_cpu(compl, sizeof(*compl));
1297
1298         if (adapter->be3_native)
1299                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1300         else
1301                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1302
1303         if (rxcp->vlanf) {
1304                 /* vlanf could be wrongly set in some cards.
1305                  * ignore if vtm is not set */
1306                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1307                         rxcp->vlanf = 0;
1308
1309                 if (!lancer_chip(adapter))
1310                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1311
1312                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1313                     !adapter->vlan_tag[rxcp->vlan_tag])
1314                         rxcp->vlanf = 0;
1315         }
1316
1317         /* As the compl has been parsed, reset it; we wont touch it again */
1318         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1319
1320         queue_tail_inc(&rxo->cq);
1321         return rxcp;
1322 }
1323
1324 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1325 {
1326         u32 order = get_order(size);
1327
1328         if (order > 0)
1329                 gfp |= __GFP_COMP;
1330         return  alloc_pages(gfp, order);
1331 }
1332
1333 /*
1334  * Allocate a page, split it to fragments of size rx_frag_size and post as
1335  * receive buffers to BE
1336  */
1337 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1338 {
1339         struct be_adapter *adapter = rxo->adapter;
1340         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1341         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1342         struct be_queue_info *rxq = &rxo->q;
1343         struct page *pagep = NULL;
1344         struct be_eth_rx_d *rxd;
1345         u64 page_dmaaddr = 0, frag_dmaaddr;
1346         u32 posted, page_offset = 0;
1347
1348         page_info = &rxo->page_info_tbl[rxq->head];
1349         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1350                 if (!pagep) {
1351                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1352                         if (unlikely(!pagep)) {
1353                                 rx_stats(rxo)->rx_post_fail++;
1354                                 break;
1355                         }
1356                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1357                                                     0, adapter->big_page_size,
1358                                                     DMA_FROM_DEVICE);
1359                         page_info->page_offset = 0;
1360                 } else {
1361                         get_page(pagep);
1362                         page_info->page_offset = page_offset + rx_frag_size;
1363                 }
1364                 page_offset = page_info->page_offset;
1365                 page_info->page = pagep;
1366                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1367                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1368
1369                 rxd = queue_head_node(rxq);
1370                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1371                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1372
1373                 /* Any space left in the current big page for another frag? */
1374                 if ((page_offset + rx_frag_size + rx_frag_size) >
1375                                         adapter->big_page_size) {
1376                         pagep = NULL;
1377                         page_info->last_page_user = true;
1378                 }
1379
1380                 prev_page_info = page_info;
1381                 queue_head_inc(rxq);
1382                 page_info = &page_info_tbl[rxq->head];
1383         }
1384         if (pagep)
1385                 prev_page_info->last_page_user = true;
1386
1387         if (posted) {
1388                 atomic_add(posted, &rxq->used);
1389                 be_rxq_notify(adapter, rxq->id, posted);
1390         } else if (atomic_read(&rxq->used) == 0) {
1391                 /* Let be_worker replenish when memory is available */
1392                 rxo->rx_post_starved = true;
1393         }
1394 }
1395
1396 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1397 {
1398         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1399
1400         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1401                 return NULL;
1402
1403         rmb();
1404         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1405
1406         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1407
1408         queue_tail_inc(tx_cq);
1409         return txcp;
1410 }
1411
1412 static u16 be_tx_compl_process(struct be_adapter *adapter,
1413                 struct be_tx_obj *txo, u16 last_index)
1414 {
1415         struct be_queue_info *txq = &txo->q;
1416         struct be_eth_wrb *wrb;
1417         struct sk_buff **sent_skbs = txo->sent_skb_list;
1418         struct sk_buff *sent_skb;
1419         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1420         bool unmap_skb_hdr = true;
1421
1422         sent_skb = sent_skbs[txq->tail];
1423         BUG_ON(!sent_skb);
1424         sent_skbs[txq->tail] = NULL;
1425
1426         /* skip header wrb */
1427         queue_tail_inc(txq);
1428
1429         do {
1430                 cur_index = txq->tail;
1431                 wrb = queue_tail_node(txq);
1432                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1433                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1434                 unmap_skb_hdr = false;
1435
1436                 num_wrbs++;
1437                 queue_tail_inc(txq);
1438         } while (cur_index != last_index);
1439
1440         kfree_skb(sent_skb);
1441         return num_wrbs;
1442 }
1443
1444 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1445 {
1446         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1447
1448         if (!eqe->evt)
1449                 return NULL;
1450
1451         rmb();
1452         eqe->evt = le32_to_cpu(eqe->evt);
1453         queue_tail_inc(&eq_obj->q);
1454         return eqe;
1455 }
1456
1457 static int event_handle(struct be_adapter *adapter,
1458                         struct be_eq_obj *eq_obj,
1459                         bool rearm)
1460 {
1461         struct be_eq_entry *eqe;
1462         u16 num = 0;
1463
1464         while ((eqe = event_get(eq_obj)) != NULL) {
1465                 eqe->evt = 0;
1466                 num++;
1467         }
1468
1469         /* Deal with any spurious interrupts that come
1470          * without events
1471          */
1472         if (!num)
1473                 rearm = true;
1474
1475         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1476         if (num)
1477                 napi_schedule(&eq_obj->napi);
1478
1479         return num;
1480 }
1481
1482 /* Just read and notify events without processing them.
1483  * Used at the time of destroying event queues */
1484 static void be_eq_clean(struct be_adapter *adapter,
1485                         struct be_eq_obj *eq_obj)
1486 {
1487         struct be_eq_entry *eqe;
1488         u16 num = 0;
1489
1490         while ((eqe = event_get(eq_obj)) != NULL) {
1491                 eqe->evt = 0;
1492                 num++;
1493         }
1494
1495         if (num)
1496                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1497 }
1498
1499 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1500 {
1501         struct be_rx_page_info *page_info;
1502         struct be_queue_info *rxq = &rxo->q;
1503         struct be_queue_info *rx_cq = &rxo->cq;
1504         struct be_rx_compl_info *rxcp;
1505         u16 tail;
1506
1507         /* First cleanup pending rx completions */
1508         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1509                 be_rx_compl_discard(adapter, rxo, rxcp);
1510                 be_cq_notify(adapter, rx_cq->id, false, 1);
1511         }
1512
1513         /* Then free posted rx buffer that were not used */
1514         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1515         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1516                 page_info = get_rx_page_info(adapter, rxo, tail);
1517                 put_page(page_info->page);
1518                 memset(page_info, 0, sizeof(*page_info));
1519         }
1520         BUG_ON(atomic_read(&rxq->used));
1521         rxq->tail = rxq->head = 0;
1522 }
1523
1524 static void be_tx_compl_clean(struct be_adapter *adapter,
1525                                 struct be_tx_obj *txo)
1526 {
1527         struct be_queue_info *tx_cq = &txo->cq;
1528         struct be_queue_info *txq = &txo->q;
1529         struct be_eth_tx_compl *txcp;
1530         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1531         struct sk_buff **sent_skbs = txo->sent_skb_list;
1532         struct sk_buff *sent_skb;
1533         bool dummy_wrb;
1534
1535         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1536         do {
1537                 while ((txcp = be_tx_compl_get(tx_cq))) {
1538                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1539                                         wrb_index, txcp);
1540                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1541                         cmpl++;
1542                 }
1543                 if (cmpl) {
1544                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1545                         atomic_sub(num_wrbs, &txq->used);
1546                         cmpl = 0;
1547                         num_wrbs = 0;
1548                 }
1549
1550                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1551                         break;
1552
1553                 mdelay(1);
1554         } while (true);
1555
1556         if (atomic_read(&txq->used))
1557                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1558                         atomic_read(&txq->used));
1559
1560         /* free posted tx for which compls will never arrive */
1561         while (atomic_read(&txq->used)) {
1562                 sent_skb = sent_skbs[txq->tail];
1563                 end_idx = txq->tail;
1564                 index_adv(&end_idx,
1565                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1566                         txq->len);
1567                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1568                 atomic_sub(num_wrbs, &txq->used);
1569         }
1570 }
1571
1572 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1573 {
1574         struct be_queue_info *q;
1575
1576         q = &adapter->mcc_obj.q;
1577         if (q->created)
1578                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1579         be_queue_free(adapter, q);
1580
1581         q = &adapter->mcc_obj.cq;
1582         if (q->created)
1583                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1584         be_queue_free(adapter, q);
1585 }
1586
1587 /* Must be called only after TX qs are created as MCC shares TX EQ */
1588 static int be_mcc_queues_create(struct be_adapter *adapter)
1589 {
1590         struct be_queue_info *q, *cq;
1591
1592         /* Alloc MCC compl queue */
1593         cq = &adapter->mcc_obj.cq;
1594         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1595                         sizeof(struct be_mcc_compl)))
1596                 goto err;
1597
1598         /* Ask BE to create MCC compl queue; share TX's eq */
1599         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1600                 goto mcc_cq_free;
1601
1602         /* Alloc MCC queue */
1603         q = &adapter->mcc_obj.q;
1604         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1605                 goto mcc_cq_destroy;
1606
1607         /* Ask BE to create MCC queue */
1608         if (be_cmd_mccq_create(adapter, q, cq))
1609                 goto mcc_q_free;
1610
1611         return 0;
1612
1613 mcc_q_free:
1614         be_queue_free(adapter, q);
1615 mcc_cq_destroy:
1616         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1617 mcc_cq_free:
1618         be_queue_free(adapter, cq);
1619 err:
1620         return -1;
1621 }
1622
1623 static void be_tx_queues_destroy(struct be_adapter *adapter)
1624 {
1625         struct be_queue_info *q;
1626         struct be_tx_obj *txo;
1627         u8 i;
1628
1629         for_all_tx_queues(adapter, txo, i) {
1630                 q = &txo->q;
1631                 if (q->created)
1632                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1633                 be_queue_free(adapter, q);
1634
1635                 q = &txo->cq;
1636                 if (q->created)
1637                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1638                 be_queue_free(adapter, q);
1639         }
1640
1641         /* Clear any residual events */
1642         be_eq_clean(adapter, &adapter->tx_eq);
1643
1644         q = &adapter->tx_eq.q;
1645         if (q->created)
1646                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1647         be_queue_free(adapter, q);
1648 }
1649
1650 static int be_num_txqs_want(struct be_adapter *adapter)
1651 {
1652         if ((num_vfs && adapter->sriov_enabled) ||
1653                 be_is_mc(adapter) ||
1654                 lancer_chip(adapter) || !be_physfn(adapter) ||
1655                 adapter->generation == BE_GEN2)
1656                 return 1;
1657         else
1658                 return MAX_TX_QS;
1659 }
1660
1661 /* One TX event queue is shared by all TX compl qs */
1662 static int be_tx_queues_create(struct be_adapter *adapter)
1663 {
1664         struct be_queue_info *eq, *q, *cq;
1665         struct be_tx_obj *txo;
1666         u8 i;
1667
1668         adapter->num_tx_qs = be_num_txqs_want(adapter);
1669         if (adapter->num_tx_qs != MAX_TX_QS) {
1670                 rtnl_lock();
1671                 netif_set_real_num_tx_queues(adapter->netdev,
1672                         adapter->num_tx_qs);
1673                 rtnl_unlock();
1674         }
1675
1676         adapter->tx_eq.max_eqd = 0;
1677         adapter->tx_eq.min_eqd = 0;
1678         adapter->tx_eq.cur_eqd = 96;
1679         adapter->tx_eq.enable_aic = false;
1680
1681         eq = &adapter->tx_eq.q;
1682         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1683                 sizeof(struct be_eq_entry)))
1684                 return -1;
1685
1686         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1687                 goto err;
1688         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1689
1690         for_all_tx_queues(adapter, txo, i) {
1691                 cq = &txo->cq;
1692                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1693                         sizeof(struct be_eth_tx_compl)))
1694                         goto err;
1695
1696                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1697                         goto err;
1698
1699                 q = &txo->q;
1700                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1701                         sizeof(struct be_eth_wrb)))
1702                         goto err;
1703         }
1704         return 0;
1705
1706 err:
1707         be_tx_queues_destroy(adapter);
1708         return -1;
1709 }
1710
1711 static void be_rx_queues_destroy(struct be_adapter *adapter)
1712 {
1713         struct be_queue_info *q;
1714         struct be_rx_obj *rxo;
1715         int i;
1716
1717         for_all_rx_queues(adapter, rxo, i) {
1718                 be_queue_free(adapter, &rxo->q);
1719
1720                 q = &rxo->cq;
1721                 if (q->created)
1722                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1723                 be_queue_free(adapter, q);
1724
1725                 q = &rxo->rx_eq.q;
1726                 if (q->created)
1727                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1728                 be_queue_free(adapter, q);
1729         }
1730 }
1731
1732 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1733 {
1734         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1735                 !adapter->sriov_enabled && be_physfn(adapter) &&
1736                 !be_is_mc(adapter)) {
1737                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1738         } else {
1739                 dev_warn(&adapter->pdev->dev,
1740                         "No support for multiple RX queues\n");
1741                 return 1;
1742         }
1743 }
1744
1745 static int be_rx_queues_create(struct be_adapter *adapter)
1746 {
1747         struct be_queue_info *eq, *q, *cq;
1748         struct be_rx_obj *rxo;
1749         int rc, i;
1750
1751         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1752                                 msix_enabled(adapter) ?
1753                                         adapter->num_msix_vec - 1 : 1);
1754         if (adapter->num_rx_qs != MAX_RX_QS)
1755                 dev_warn(&adapter->pdev->dev,
1756                         "Can create only %d RX queues", adapter->num_rx_qs);
1757
1758         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1759         for_all_rx_queues(adapter, rxo, i) {
1760                 rxo->adapter = adapter;
1761                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1762                 rxo->rx_eq.enable_aic = true;
1763
1764                 /* EQ */
1765                 eq = &rxo->rx_eq.q;
1766                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1767                                         sizeof(struct be_eq_entry));
1768                 if (rc)
1769                         goto err;
1770
1771                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1772                 if (rc)
1773                         goto err;
1774
1775                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1776
1777                 /* CQ */
1778                 cq = &rxo->cq;
1779                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1780                                 sizeof(struct be_eth_rx_compl));
1781                 if (rc)
1782                         goto err;
1783
1784                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1785                 if (rc)
1786                         goto err;
1787
1788                 /* Rx Q - will be created in be_open() */
1789                 q = &rxo->q;
1790                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1791                                 sizeof(struct be_eth_rx_d));
1792                 if (rc)
1793                         goto err;
1794
1795         }
1796
1797         return 0;
1798 err:
1799         be_rx_queues_destroy(adapter);
1800         return -1;
1801 }
1802
1803 static bool event_peek(struct be_eq_obj *eq_obj)
1804 {
1805         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1806         if (!eqe->evt)
1807                 return false;
1808         else
1809                 return true;
1810 }
1811
1812 static irqreturn_t be_intx(int irq, void *dev)
1813 {
1814         struct be_adapter *adapter = dev;
1815         struct be_rx_obj *rxo;
1816         int isr, i, tx = 0 , rx = 0;
1817
1818         if (lancer_chip(adapter)) {
1819                 if (event_peek(&adapter->tx_eq))
1820                         tx = event_handle(adapter, &adapter->tx_eq, false);
1821                 for_all_rx_queues(adapter, rxo, i) {
1822                         if (event_peek(&rxo->rx_eq))
1823                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1824                 }
1825
1826                 if (!(tx || rx))
1827                         return IRQ_NONE;
1828
1829         } else {
1830                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1831                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1832                 if (!isr)
1833                         return IRQ_NONE;
1834
1835                 if ((1 << adapter->tx_eq.eq_idx & isr))
1836                         event_handle(adapter, &adapter->tx_eq, false);
1837
1838                 for_all_rx_queues(adapter, rxo, i) {
1839                         if ((1 << rxo->rx_eq.eq_idx & isr))
1840                                 event_handle(adapter, &rxo->rx_eq, true);
1841                 }
1842         }
1843
1844         return IRQ_HANDLED;
1845 }
1846
1847 static irqreturn_t be_msix_rx(int irq, void *dev)
1848 {
1849         struct be_rx_obj *rxo = dev;
1850         struct be_adapter *adapter = rxo->adapter;
1851
1852         event_handle(adapter, &rxo->rx_eq, true);
1853
1854         return IRQ_HANDLED;
1855 }
1856
1857 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1858 {
1859         struct be_adapter *adapter = dev;
1860
1861         event_handle(adapter, &adapter->tx_eq, false);
1862
1863         return IRQ_HANDLED;
1864 }
1865
1866 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1867 {
1868         return (rxcp->tcpf && !rxcp->err) ? true : false;
1869 }
1870
1871 static int be_poll_rx(struct napi_struct *napi, int budget)
1872 {
1873         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1874         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1875         struct be_adapter *adapter = rxo->adapter;
1876         struct be_queue_info *rx_cq = &rxo->cq;
1877         struct be_rx_compl_info *rxcp;
1878         u32 work_done;
1879
1880         rx_stats(rxo)->rx_polls++;
1881         for (work_done = 0; work_done < budget; work_done++) {
1882                 rxcp = be_rx_compl_get(rxo);
1883                 if (!rxcp)
1884                         break;
1885
1886                 /* Is it a flush compl that has no data */
1887                 if (unlikely(rxcp->num_rcvd == 0))
1888                         goto loop_continue;
1889
1890                 /* Discard compl with partial DMA Lancer B0 */
1891                 if (unlikely(!rxcp->pkt_size)) {
1892                         be_rx_compl_discard(adapter, rxo, rxcp);
1893                         goto loop_continue;
1894                 }
1895
1896                 /* On BE drop pkts that arrive due to imperfect filtering in
1897                  * promiscuous mode on some skews
1898                  */
1899                 if (unlikely(rxcp->port != adapter->port_num &&
1900                                 !lancer_chip(adapter))) {
1901                         be_rx_compl_discard(adapter, rxo, rxcp);
1902                         goto loop_continue;
1903                 }
1904
1905                 if (do_gro(rxcp))
1906                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1907                 else
1908                         be_rx_compl_process(adapter, rxo, rxcp);
1909 loop_continue:
1910                 be_rx_stats_update(rxo, rxcp);
1911         }
1912
1913         be_cq_notify(adapter, rx_cq->id, false, work_done);
1914
1915         /* Refill the queue */
1916         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1917                 be_post_rx_frags(rxo, GFP_ATOMIC);
1918
1919         /* All consumed */
1920         if (work_done < budget) {
1921                 napi_complete(napi);
1922                 /* Arm CQ */
1923                 be_cq_notify(adapter, rx_cq->id, true, 0);
1924         }
1925         return work_done;
1926 }
1927
1928 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1929  * For TX/MCC we don't honour budget; consume everything
1930  */
1931 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1932 {
1933         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1934         struct be_adapter *adapter =
1935                 container_of(tx_eq, struct be_adapter, tx_eq);
1936         struct be_tx_obj *txo;
1937         struct be_eth_tx_compl *txcp;
1938         int tx_compl, mcc_compl, status = 0;
1939         u8 i;
1940         u16 num_wrbs;
1941
1942         for_all_tx_queues(adapter, txo, i) {
1943                 tx_compl = 0;
1944                 num_wrbs = 0;
1945                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1946                         num_wrbs += be_tx_compl_process(adapter, txo,
1947                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1948                                         wrb_index, txcp));
1949                         tx_compl++;
1950                 }
1951                 if (tx_compl) {
1952                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1953
1954                         atomic_sub(num_wrbs, &txo->q.used);
1955
1956                         /* As Tx wrbs have been freed up, wake up netdev queue
1957                          * if it was stopped due to lack of tx wrbs.  */
1958                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1959                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1960                                 netif_wake_subqueue(adapter->netdev, i);
1961                         }
1962
1963                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1964                         tx_stats(txo)->tx_compl += tx_compl;
1965                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1966                 }
1967         }
1968
1969         mcc_compl = be_process_mcc(adapter, &status);
1970
1971         if (mcc_compl) {
1972                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1973                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1974         }
1975
1976         napi_complete(napi);
1977
1978         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1979         adapter->drv_stats.tx_events++;
1980         return 1;
1981 }
1982
1983 void be_detect_dump_ue(struct be_adapter *adapter)
1984 {
1985         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1986         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1987         u32 i;
1988
1989         if (adapter->eeh_err || adapter->ue_detected)
1990                 return;
1991
1992         if (lancer_chip(adapter)) {
1993                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1994                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995                         sliport_err1 = ioread32(adapter->db +
1996                                         SLIPORT_ERROR1_OFFSET);
1997                         sliport_err2 = ioread32(adapter->db +
1998                                         SLIPORT_ERROR2_OFFSET);
1999                 }
2000         } else {
2001                 pci_read_config_dword(adapter->pdev,
2002                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2003                 pci_read_config_dword(adapter->pdev,
2004                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2005                 pci_read_config_dword(adapter->pdev,
2006                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2007                 pci_read_config_dword(adapter->pdev,
2008                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2009
2010                 ue_lo = (ue_lo & (~ue_lo_mask));
2011                 ue_hi = (ue_hi & (~ue_hi_mask));
2012         }
2013
2014         if (ue_lo || ue_hi ||
2015                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2016                 adapter->ue_detected = true;
2017                 adapter->eeh_err = true;
2018                 dev_err(&adapter->pdev->dev,
2019                         "Unrecoverable error in the card\n");
2020         }
2021
2022         if (ue_lo) {
2023                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2024                         if (ue_lo & 1)
2025                                 dev_err(&adapter->pdev->dev,
2026                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2027                 }
2028         }
2029         if (ue_hi) {
2030                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2031                         if (ue_hi & 1)
2032                                 dev_err(&adapter->pdev->dev,
2033                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2034                 }
2035         }
2036
2037         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2038                 dev_err(&adapter->pdev->dev,
2039                         "sliport status 0x%x\n", sliport_status);
2040                 dev_err(&adapter->pdev->dev,
2041                         "sliport error1 0x%x\n", sliport_err1);
2042                 dev_err(&adapter->pdev->dev,
2043                         "sliport error2 0x%x\n", sliport_err2);
2044         }
2045 }
2046
2047 static void be_worker(struct work_struct *work)
2048 {
2049         struct be_adapter *adapter =
2050                 container_of(work, struct be_adapter, work.work);
2051         struct be_rx_obj *rxo;
2052         int i;
2053
2054         be_detect_dump_ue(adapter);
2055
2056         /* when interrupts are not yet enabled, just reap any pending
2057         * mcc completions */
2058         if (!netif_running(adapter->netdev)) {
2059                 int mcc_compl, status = 0;
2060
2061                 mcc_compl = be_process_mcc(adapter, &status);
2062
2063                 if (mcc_compl) {
2064                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2065                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2066                 }
2067
2068                 goto reschedule;
2069         }
2070
2071         if (!adapter->stats_cmd_sent) {
2072                 if (lancer_chip(adapter))
2073                         lancer_cmd_get_pport_stats(adapter,
2074                                                 &adapter->stats_cmd);
2075                 else
2076                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
2077         }
2078
2079         for_all_rx_queues(adapter, rxo, i) {
2080                 be_rx_eqd_update(adapter, rxo);
2081
2082                 if (rxo->rx_post_starved) {
2083                         rxo->rx_post_starved = false;
2084                         be_post_rx_frags(rxo, GFP_KERNEL);
2085                 }
2086         }
2087
2088 reschedule:
2089         adapter->work_counter++;
2090         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2091 }
2092
2093 static void be_msix_disable(struct be_adapter *adapter)
2094 {
2095         if (msix_enabled(adapter)) {
2096                 pci_disable_msix(adapter->pdev);
2097                 adapter->num_msix_vec = 0;
2098         }
2099 }
2100
2101 static void be_msix_enable(struct be_adapter *adapter)
2102 {
2103 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2104         int i, status, num_vec;
2105
2106         num_vec = be_num_rxqs_want(adapter) + 1;
2107
2108         for (i = 0; i < num_vec; i++)
2109                 adapter->msix_entries[i].entry = i;
2110
2111         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2112         if (status == 0) {
2113                 goto done;
2114         } else if (status >= BE_MIN_MSIX_VECTORS) {
2115                 num_vec = status;
2116                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2117                                 num_vec) == 0)
2118                         goto done;
2119         }
2120         return;
2121 done:
2122         adapter->num_msix_vec = num_vec;
2123         return;
2124 }
2125
2126 static int be_sriov_enable(struct be_adapter *adapter)
2127 {
2128         be_check_sriov_fn_type(adapter);
2129 #ifdef CONFIG_PCI_IOV
2130         if (be_physfn(adapter) && num_vfs) {
2131                 int status, pos;
2132                 u16 nvfs;
2133
2134                 pos = pci_find_ext_capability(adapter->pdev,
2135                                                 PCI_EXT_CAP_ID_SRIOV);
2136                 pci_read_config_word(adapter->pdev,
2137                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2138
2139                 if (num_vfs > nvfs) {
2140                         dev_info(&adapter->pdev->dev,
2141                                         "Device supports %d VFs and not %d\n",
2142                                         nvfs, num_vfs);
2143                         num_vfs = nvfs;
2144                 }
2145
2146                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2147                 adapter->sriov_enabled = status ? false : true;
2148
2149                 if (adapter->sriov_enabled) {
2150                         adapter->vf_cfg = kcalloc(num_vfs,
2151                                                 sizeof(struct be_vf_cfg),
2152                                                 GFP_KERNEL);
2153                         if (!adapter->vf_cfg)
2154                                 return -ENOMEM;
2155                 }
2156         }
2157 #endif
2158         return 0;
2159 }
2160
2161 static void be_sriov_disable(struct be_adapter *adapter)
2162 {
2163 #ifdef CONFIG_PCI_IOV
2164         if (adapter->sriov_enabled) {
2165                 pci_disable_sriov(adapter->pdev);
2166                 kfree(adapter->vf_cfg);
2167                 adapter->sriov_enabled = false;
2168         }
2169 #endif
2170 }
2171
2172 static inline int be_msix_vec_get(struct be_adapter *adapter,
2173                                         struct be_eq_obj *eq_obj)
2174 {
2175         return adapter->msix_entries[eq_obj->eq_idx].vector;
2176 }
2177
2178 static int be_request_irq(struct be_adapter *adapter,
2179                 struct be_eq_obj *eq_obj,
2180                 void *handler, char *desc, void *context)
2181 {
2182         struct net_device *netdev = adapter->netdev;
2183         int vec;
2184
2185         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2186         vec = be_msix_vec_get(adapter, eq_obj);
2187         return request_irq(vec, handler, 0, eq_obj->desc, context);
2188 }
2189
2190 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2191                         void *context)
2192 {
2193         int vec = be_msix_vec_get(adapter, eq_obj);
2194         free_irq(vec, context);
2195 }
2196
2197 static int be_msix_register(struct be_adapter *adapter)
2198 {
2199         struct be_rx_obj *rxo;
2200         int status, i;
2201         char qname[10];
2202
2203         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2204                                 adapter);
2205         if (status)
2206                 goto err;
2207
2208         for_all_rx_queues(adapter, rxo, i) {
2209                 sprintf(qname, "rxq%d", i);
2210                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2211                                 qname, rxo);
2212                 if (status)
2213                         goto err_msix;
2214         }
2215
2216         return 0;
2217
2218 err_msix:
2219         be_free_irq(adapter, &adapter->tx_eq, adapter);
2220
2221         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2222                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2223
2224 err:
2225         dev_warn(&adapter->pdev->dev,
2226                 "MSIX Request IRQ failed - err %d\n", status);
2227         be_msix_disable(adapter);
2228         return status;
2229 }
2230
2231 static int be_irq_register(struct be_adapter *adapter)
2232 {
2233         struct net_device *netdev = adapter->netdev;
2234         int status;
2235
2236         if (msix_enabled(adapter)) {
2237                 status = be_msix_register(adapter);
2238                 if (status == 0)
2239                         goto done;
2240                 /* INTx is not supported for VF */
2241                 if (!be_physfn(adapter))
2242                         return status;
2243         }
2244
2245         /* INTx */
2246         netdev->irq = adapter->pdev->irq;
2247         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2248                         adapter);
2249         if (status) {
2250                 dev_err(&adapter->pdev->dev,
2251                         "INTx request IRQ failed - err %d\n", status);
2252                 return status;
2253         }
2254 done:
2255         adapter->isr_registered = true;
2256         return 0;
2257 }
2258
2259 static void be_irq_unregister(struct be_adapter *adapter)
2260 {
2261         struct net_device *netdev = adapter->netdev;
2262         struct be_rx_obj *rxo;
2263         int i;
2264
2265         if (!adapter->isr_registered)
2266                 return;
2267
2268         /* INTx */
2269         if (!msix_enabled(adapter)) {
2270                 free_irq(netdev->irq, adapter);
2271                 goto done;
2272         }
2273
2274         /* MSIx */
2275         be_free_irq(adapter, &adapter->tx_eq, adapter);
2276
2277         for_all_rx_queues(adapter, rxo, i)
2278                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2279
2280 done:
2281         adapter->isr_registered = false;
2282 }
2283
2284 static void be_rx_queues_clear(struct be_adapter *adapter)
2285 {
2286         struct be_queue_info *q;
2287         struct be_rx_obj *rxo;
2288         int i;
2289
2290         for_all_rx_queues(adapter, rxo, i) {
2291                 q = &rxo->q;
2292                 if (q->created) {
2293                         be_cmd_rxq_destroy(adapter, q);
2294                         /* After the rxq is invalidated, wait for a grace time
2295                          * of 1ms for all dma to end and the flush compl to
2296                          * arrive
2297                          */
2298                         mdelay(1);
2299                         be_rx_q_clean(adapter, rxo);
2300                 }
2301
2302                 /* Clear any residual events */
2303                 q = &rxo->rx_eq.q;
2304                 if (q->created)
2305                         be_eq_clean(adapter, &rxo->rx_eq);
2306         }
2307 }
2308
2309 static int be_close(struct net_device *netdev)
2310 {
2311         struct be_adapter *adapter = netdev_priv(netdev);
2312         struct be_rx_obj *rxo;
2313         struct be_tx_obj *txo;
2314         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2315         int vec, i;
2316
2317         be_async_mcc_disable(adapter);
2318
2319         if (!lancer_chip(adapter))
2320                 be_intr_set(adapter, false);
2321
2322         for_all_rx_queues(adapter, rxo, i)
2323                 napi_disable(&rxo->rx_eq.napi);
2324
2325         napi_disable(&tx_eq->napi);
2326
2327         if (lancer_chip(adapter)) {
2328                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2329                 for_all_rx_queues(adapter, rxo, i)
2330                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2331                 for_all_tx_queues(adapter, txo, i)
2332                          be_cq_notify(adapter, txo->cq.id, false, 0);
2333         }
2334
2335         if (msix_enabled(adapter)) {
2336                 vec = be_msix_vec_get(adapter, tx_eq);
2337                 synchronize_irq(vec);
2338
2339                 for_all_rx_queues(adapter, rxo, i) {
2340                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2341                         synchronize_irq(vec);
2342                 }
2343         } else {
2344                 synchronize_irq(netdev->irq);
2345         }
2346         be_irq_unregister(adapter);
2347
2348         /* Wait for all pending tx completions to arrive so that
2349          * all tx skbs are freed.
2350          */
2351         for_all_tx_queues(adapter, txo, i)
2352                 be_tx_compl_clean(adapter, txo);
2353
2354         be_rx_queues_clear(adapter);
2355         return 0;
2356 }
2357
2358 static int be_rx_queues_setup(struct be_adapter *adapter)
2359 {
2360         struct be_rx_obj *rxo;
2361         int rc, i;
2362         u8 rsstable[MAX_RSS_QS];
2363
2364         for_all_rx_queues(adapter, rxo, i) {
2365                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2366                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2367                         adapter->if_handle,
2368                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2369                 if (rc)
2370                         return rc;
2371         }
2372
2373         if (be_multi_rxq(adapter)) {
2374                 for_all_rss_queues(adapter, rxo, i)
2375                         rsstable[i] = rxo->rss_id;
2376
2377                 rc = be_cmd_rss_config(adapter, rsstable,
2378                         adapter->num_rx_qs - 1);
2379                 if (rc)
2380                         return rc;
2381         }
2382
2383         /* First time posting */
2384         for_all_rx_queues(adapter, rxo, i) {
2385                 be_post_rx_frags(rxo, GFP_KERNEL);
2386                 napi_enable(&rxo->rx_eq.napi);
2387         }
2388         return 0;
2389 }
2390
2391 static int be_open(struct net_device *netdev)
2392 {
2393         struct be_adapter *adapter = netdev_priv(netdev);
2394         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2395         struct be_rx_obj *rxo;
2396         int status, i;
2397
2398         status = be_rx_queues_setup(adapter);
2399         if (status)
2400                 goto err;
2401
2402         napi_enable(&tx_eq->napi);
2403
2404         be_irq_register(adapter);
2405
2406         if (!lancer_chip(adapter))
2407                 be_intr_set(adapter, true);
2408
2409         /* The evt queues are created in unarmed state; arm them */
2410         for_all_rx_queues(adapter, rxo, i) {
2411                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2412                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2413         }
2414         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2415
2416         /* Now that interrupts are on we can process async mcc */
2417         be_async_mcc_enable(adapter);
2418
2419         return 0;
2420 err:
2421         be_close(adapter->netdev);
2422         return -EIO;
2423 }
2424
2425 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2426 {
2427         struct be_dma_mem cmd;
2428         int status = 0;
2429         u8 mac[ETH_ALEN];
2430
2431         memset(mac, 0, ETH_ALEN);
2432
2433         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2434         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2435                                     GFP_KERNEL);
2436         if (cmd.va == NULL)
2437                 return -1;
2438         memset(cmd.va, 0, cmd.size);
2439
2440         if (enable) {
2441                 status = pci_write_config_dword(adapter->pdev,
2442                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2443                 if (status) {
2444                         dev_err(&adapter->pdev->dev,
2445                                 "Could not enable Wake-on-lan\n");
2446                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2447                                           cmd.dma);
2448                         return status;
2449                 }
2450                 status = be_cmd_enable_magic_wol(adapter,
2451                                 adapter->netdev->dev_addr, &cmd);
2452                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2453                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2454         } else {
2455                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2456                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2457                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2458         }
2459
2460         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2461         return status;
2462 }
2463
2464 /*
2465  * Generate a seed MAC address from the PF MAC Address using jhash.
2466  * MAC Address for VFs are assigned incrementally starting from the seed.
2467  * These addresses are programmed in the ASIC by the PF and the VF driver
2468  * queries for the MAC address during its probe.
2469  */
2470 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2471 {
2472         u32 vf;
2473         int status = 0;
2474         u8 mac[ETH_ALEN];
2475
2476         be_vf_eth_addr_generate(adapter, mac);
2477
2478         for (vf = 0; vf < num_vfs; vf++) {
2479                 if (lancer_chip(adapter)) {
2480                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2481                 } else {
2482                         status = be_cmd_pmac_add(adapter, mac,
2483                                         adapter->vf_cfg[vf].vf_if_handle,
2484                                         &adapter->vf_cfg[vf].vf_pmac_id,
2485                                         vf + 1);
2486                 }
2487
2488                 if (status)
2489                         dev_err(&adapter->pdev->dev,
2490                         "Mac address assignment failed for VF %d\n", vf);
2491                 else
2492                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2493
2494                 mac[5] += 1;
2495         }
2496         return status;
2497 }
2498
2499 static void be_vf_clear(struct be_adapter *adapter)
2500 {
2501         u32 vf;
2502
2503         for (vf = 0; vf < num_vfs; vf++) {
2504                 if (lancer_chip(adapter))
2505                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2506                 else
2507                         be_cmd_pmac_del(adapter,
2508                                         adapter->vf_cfg[vf].vf_if_handle,
2509                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2510         }
2511
2512         for (vf = 0; vf < num_vfs; vf++)
2513                 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2514                                 vf + 1);
2515 }
2516
2517 static int be_clear(struct be_adapter *adapter)
2518 {
2519         if (be_physfn(adapter) && adapter->sriov_enabled)
2520                 be_vf_clear(adapter);
2521
2522         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2523
2524         be_mcc_queues_destroy(adapter);
2525         be_rx_queues_destroy(adapter);
2526         be_tx_queues_destroy(adapter);
2527
2528         /* tell fw we're done with firing cmds */
2529         be_cmd_fw_clean(adapter);
2530         return 0;
2531 }
2532
2533 static void be_vf_setup_init(struct be_adapter *adapter)
2534 {
2535         int vf;
2536
2537         for (vf = 0; vf < num_vfs; vf++) {
2538                 adapter->vf_cfg[vf].vf_if_handle = -1;
2539                 adapter->vf_cfg[vf].vf_pmac_id = -1;
2540         }
2541 }
2542
2543 static int be_vf_setup(struct be_adapter *adapter)
2544 {
2545         u32 cap_flags, en_flags, vf;
2546         u16 lnk_speed;
2547         int status;
2548
2549         be_vf_setup_init(adapter);
2550
2551         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2552                                 BE_IF_FLAGS_MULTICAST;
2553
2554         for (vf = 0; vf < num_vfs; vf++) {
2555                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2556                                         &adapter->vf_cfg[vf].vf_if_handle,
2557                                         NULL, vf+1);
2558                 if (status)
2559                         goto err;
2560         }
2561
2562         status = be_vf_eth_addr_config(adapter);
2563         if (status)
2564                 goto err;
2565
2566         for (vf = 0; vf < num_vfs; vf++) {
2567                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2568                                 vf + 1);
2569                 if (status)
2570                         goto err;
2571                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2572         }
2573         return 0;
2574 err:
2575         return status;
2576 }
2577
2578 static void be_setup_init(struct be_adapter *adapter)
2579 {
2580         adapter->vlan_prio_bmap = 0xff;
2581         adapter->link_speed = -1;
2582         adapter->if_handle = -1;
2583         adapter->be3_native = false;
2584         adapter->promiscuous = false;
2585         adapter->eq_next_idx = 0;
2586 }
2587
2588 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2589 {
2590         u32 pmac_id;
2591         int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2592         if (status != 0)
2593                 goto do_none;
2594         status = be_cmd_mac_addr_query(adapter, mac,
2595                         MAC_ADDRESS_TYPE_NETWORK,
2596                         false, adapter->if_handle, pmac_id);
2597         if (status != 0)
2598                 goto do_none;
2599         status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2600                         &adapter->pmac_id, 0);
2601 do_none:
2602         return status;
2603 }
2604
2605 static int be_setup(struct be_adapter *adapter)
2606 {
2607         struct net_device *netdev = adapter->netdev;
2608         u32 cap_flags, en_flags;
2609         u32 tx_fc, rx_fc;
2610         int status, i;
2611         u8 mac[ETH_ALEN];
2612         struct be_tx_obj *txo;
2613
2614         be_setup_init(adapter);
2615
2616         be_cmd_req_native_mode(adapter);
2617
2618         status = be_tx_queues_create(adapter);
2619         if (status != 0)
2620                 goto err;
2621
2622         status = be_rx_queues_create(adapter);
2623         if (status != 0)
2624                 goto err;
2625
2626         status = be_mcc_queues_create(adapter);
2627         if (status != 0)
2628                 goto err;
2629
2630         memset(mac, 0, ETH_ALEN);
2631         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2632                         true /*permanent */, 0, 0);
2633         if (status)
2634                 return status;
2635         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2636         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2637
2638         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2639                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2640         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2641                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2642
2643         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2644                 cap_flags |= BE_IF_FLAGS_RSS;
2645                 en_flags |= BE_IF_FLAGS_RSS;
2646         }
2647         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2648                         netdev->dev_addr, &adapter->if_handle,
2649                         &adapter->pmac_id, 0);
2650         if (status != 0)
2651                 goto err;
2652
2653          for_all_tx_queues(adapter, txo, i) {
2654                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2655                 if (status)
2656                         goto err;
2657         }
2658
2659          /* The VF's permanent mac queried from card is incorrect.
2660           * For BEx: Query the mac configued by the PF using if_handle
2661           * For Lancer: Get and use mac_list to obtain mac address.
2662           */
2663         if (!be_physfn(adapter)) {
2664                 if (lancer_chip(adapter))
2665                         status = be_configure_mac_from_list(adapter, mac);
2666                 else
2667                         status = be_cmd_mac_addr_query(adapter, mac,
2668                                         MAC_ADDRESS_TYPE_NETWORK, false,
2669                                         adapter->if_handle, 0);
2670                 if (!status) {
2671                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2672                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2673                 }
2674         }
2675
2676         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2677
2678         status = be_vid_config(adapter, false, 0);
2679         if (status)
2680                 goto err;
2681
2682         be_set_rx_mode(adapter->netdev);
2683
2684         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2685         /* For Lancer: It is legal for this cmd to fail on VF */
2686         if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2687                 goto err;
2688
2689         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2690                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2691                                         adapter->rx_fc);
2692                 /* For Lancer: It is legal for this cmd to fail on VF */
2693                 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2694                         goto err;
2695         }
2696
2697         pcie_set_readrq(adapter->pdev, 4096);
2698
2699         if (be_physfn(adapter) && adapter->sriov_enabled) {
2700                 status = be_vf_setup(adapter);
2701                 if (status)
2702                         goto err;
2703         }
2704
2705         return 0;
2706 err:
2707         be_clear(adapter);
2708         return status;
2709 }
2710
2711 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2712 static bool be_flash_redboot(struct be_adapter *adapter,
2713                         const u8 *p, u32 img_start, int image_size,
2714                         int hdr_size)
2715 {
2716         u32 crc_offset;
2717         u8 flashed_crc[4];
2718         int status;
2719
2720         crc_offset = hdr_size + img_start + image_size - 4;
2721
2722         p += crc_offset;
2723
2724         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2725                         (image_size - 4));
2726         if (status) {
2727                 dev_err(&adapter->pdev->dev,
2728                 "could not get crc from flash, not flashing redboot\n");
2729                 return false;
2730         }
2731
2732         /*update redboot only if crc does not match*/
2733         if (!memcmp(flashed_crc, p, 4))
2734                 return false;
2735         else
2736                 return true;
2737 }
2738
2739 static bool phy_flashing_required(struct be_adapter *adapter)
2740 {
2741         int status = 0;
2742         struct be_phy_info phy_info;
2743
2744         status = be_cmd_get_phy_info(adapter, &phy_info);
2745         if (status)
2746                 return false;
2747         if ((phy_info.phy_type == TN_8022) &&
2748                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2749                 return true;
2750         }
2751         return false;
2752 }
2753
2754 static int be_flash_data(struct be_adapter *adapter,
2755                         const struct firmware *fw,
2756                         struct be_dma_mem *flash_cmd, int num_of_images)
2757
2758 {
2759         int status = 0, i, filehdr_size = 0;
2760         u32 total_bytes = 0, flash_op;
2761         int num_bytes;
2762         const u8 *p = fw->data;
2763         struct be_cmd_write_flashrom *req = flash_cmd->va;
2764         const struct flash_comp *pflashcomp;
2765         int num_comp;
2766
2767         static const struct flash_comp gen3_flash_types[10] = {
2768                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2769                         FLASH_IMAGE_MAX_SIZE_g3},
2770                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2771                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2772                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2773                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2774                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2775                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2776                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2777                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2778                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2779                         FLASH_IMAGE_MAX_SIZE_g3},
2780                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2781                         FLASH_IMAGE_MAX_SIZE_g3},
2782                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2783                         FLASH_IMAGE_MAX_SIZE_g3},
2784                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2785                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2786                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2787                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2788         };
2789         static const struct flash_comp gen2_flash_types[8] = {
2790                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2791                         FLASH_IMAGE_MAX_SIZE_g2},
2792                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2793                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2794                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2795                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2796                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2797                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2798                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2799                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2800                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2801                         FLASH_IMAGE_MAX_SIZE_g2},
2802                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2803                         FLASH_IMAGE_MAX_SIZE_g2},
2804                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2805                          FLASH_IMAGE_MAX_SIZE_g2}
2806         };
2807
2808         if (adapter->generation == BE_GEN3) {
2809                 pflashcomp = gen3_flash_types;
2810                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2811                 num_comp = ARRAY_SIZE(gen3_flash_types);
2812         } else {
2813                 pflashcomp = gen2_flash_types;
2814                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2815                 num_comp = ARRAY_SIZE(gen2_flash_types);
2816         }
2817         for (i = 0; i < num_comp; i++) {
2818                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2819                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2820                         continue;
2821                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2822                         if (!phy_flashing_required(adapter))
2823                                 continue;
2824                 }
2825                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2826                         (!be_flash_redboot(adapter, fw->data,
2827                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2828                         (num_of_images * sizeof(struct image_hdr)))))
2829                         continue;
2830                 p = fw->data;
2831                 p += filehdr_size + pflashcomp[i].offset
2832                         + (num_of_images * sizeof(struct image_hdr));
2833                 if (p + pflashcomp[i].size > fw->data + fw->size)
2834                         return -1;
2835                 total_bytes = pflashcomp[i].size;
2836                 while (total_bytes) {
2837                         if (total_bytes > 32*1024)
2838                                 num_bytes = 32*1024;
2839                         else
2840                                 num_bytes = total_bytes;
2841                         total_bytes -= num_bytes;
2842                         if (!total_bytes) {
2843                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2844                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2845                                 else
2846                                         flash_op = FLASHROM_OPER_FLASH;
2847                         } else {
2848                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2849                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2850                                 else
2851                                         flash_op = FLASHROM_OPER_SAVE;
2852                         }
2853                         memcpy(req->params.data_buf, p, num_bytes);
2854                         p += num_bytes;
2855                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2856                                 pflashcomp[i].optype, flash_op, num_bytes);
2857                         if (status) {
2858                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2859                                         (pflashcomp[i].optype ==
2860                                                 IMG_TYPE_PHY_FW))
2861                                         break;
2862                                 dev_err(&adapter->pdev->dev,
2863                                         "cmd to write to flash rom failed.\n");
2864                                 return -1;
2865                         }
2866                 }
2867         }
2868         return 0;
2869 }
2870
2871 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2872 {
2873         if (fhdr == NULL)
2874                 return 0;
2875         if (fhdr->build[0] == '3')
2876                 return BE_GEN3;
2877         else if (fhdr->build[0] == '2')
2878                 return BE_GEN2;
2879         else
2880                 return 0;
2881 }
2882
2883 static int lancer_fw_download(struct be_adapter *adapter,
2884                                 const struct firmware *fw)
2885 {
2886 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2887 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2888         struct be_dma_mem flash_cmd;
2889         const u8 *data_ptr = NULL;
2890         u8 *dest_image_ptr = NULL;
2891         size_t image_size = 0;
2892         u32 chunk_size = 0;
2893         u32 data_written = 0;
2894         u32 offset = 0;
2895         int status = 0;
2896         u8 add_status = 0;
2897
2898         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2899                 dev_err(&adapter->pdev->dev,
2900                         "FW Image not properly aligned. "
2901                         "Length must be 4 byte aligned.\n");
2902                 status = -EINVAL;
2903                 goto lancer_fw_exit;
2904         }
2905
2906         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2907                                 + LANCER_FW_DOWNLOAD_CHUNK;
2908         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2909                                                 &flash_cmd.dma, GFP_KERNEL);
2910         if (!flash_cmd.va) {
2911                 status = -ENOMEM;
2912                 dev_err(&adapter->pdev->dev,
2913                         "Memory allocation failure while flashing\n");
2914                 goto lancer_fw_exit;
2915         }
2916
2917         dest_image_ptr = flash_cmd.va +
2918                                 sizeof(struct lancer_cmd_req_write_object);
2919         image_size = fw->size;
2920         data_ptr = fw->data;
2921
2922         while (image_size) {
2923                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2924
2925                 /* Copy the image chunk content. */
2926                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2927
2928                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2929                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2930                                 &data_written, &add_status);
2931
2932                 if (status)
2933                         break;
2934
2935                 offset += data_written;
2936                 data_ptr += data_written;
2937                 image_size -= data_written;
2938         }
2939
2940         if (!status) {
2941                 /* Commit the FW written */
2942                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2943                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2944                                         &data_written, &add_status);
2945         }
2946
2947         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2948                                 flash_cmd.dma);
2949         if (status) {
2950                 dev_err(&adapter->pdev->dev,
2951                         "Firmware load error. "
2952                         "Status code: 0x%x Additional Status: 0x%x\n",
2953                         status, add_status);
2954                 goto lancer_fw_exit;
2955         }
2956
2957         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2958 lancer_fw_exit:
2959         return status;
2960 }
2961
2962 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2963 {
2964         struct flash_file_hdr_g2 *fhdr;
2965         struct flash_file_hdr_g3 *fhdr3;
2966         struct image_hdr *img_hdr_ptr = NULL;
2967         struct be_dma_mem flash_cmd;
2968         const u8 *p;
2969         int status = 0, i = 0, num_imgs = 0;
2970
2971         p = fw->data;
2972         fhdr = (struct flash_file_hdr_g2 *) p;
2973
2974         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2975         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2976                                           &flash_cmd.dma, GFP_KERNEL);
2977         if (!flash_cmd.va) {
2978                 status = -ENOMEM;
2979                 dev_err(&adapter->pdev->dev,
2980                         "Memory allocation failure while flashing\n");
2981                 goto be_fw_exit;
2982         }
2983
2984         if ((adapter->generation == BE_GEN3) &&
2985                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2986                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2987                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2988                 for (i = 0; i < num_imgs; i++) {
2989                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2990                                         (sizeof(struct flash_file_hdr_g3) +
2991                                          i * sizeof(struct image_hdr)));
2992                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2993                                 status = be_flash_data(adapter, fw, &flash_cmd,
2994                                                         num_imgs);
2995                 }
2996         } else if ((adapter->generation == BE_GEN2) &&
2997                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2998                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2999         } else {
3000                 dev_err(&adapter->pdev->dev,
3001                         "UFI and Interface are not compatible for flashing\n");
3002                 status = -1;
3003         }
3004
3005         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3006                           flash_cmd.dma);
3007         if (status) {
3008                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3009                 goto be_fw_exit;
3010         }
3011
3012         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3013
3014 be_fw_exit:
3015         return status;
3016 }
3017
3018 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3019 {
3020         const struct firmware *fw;
3021         int status;
3022
3023         if (!netif_running(adapter->netdev)) {
3024                 dev_err(&adapter->pdev->dev,
3025                         "Firmware load not allowed (interface is down)\n");
3026                 return -1;
3027         }
3028
3029         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3030         if (status)
3031                 goto fw_exit;
3032
3033         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3034
3035         if (lancer_chip(adapter))
3036                 status = lancer_fw_download(adapter, fw);
3037         else
3038                 status = be_fw_download(adapter, fw);
3039
3040 fw_exit:
3041         release_firmware(fw);
3042         return status;
3043 }
3044
3045 static struct net_device_ops be_netdev_ops = {
3046         .ndo_open               = be_open,
3047         .ndo_stop               = be_close,
3048         .ndo_start_xmit         = be_xmit,
3049         .ndo_set_rx_mode        = be_set_rx_mode,
3050         .ndo_set_mac_address    = be_mac_addr_set,
3051         .ndo_change_mtu         = be_change_mtu,
3052         .ndo_get_stats64        = be_get_stats64,
3053         .ndo_validate_addr      = eth_validate_addr,
3054         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3055         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3056         .ndo_set_vf_mac         = be_set_vf_mac,
3057         .ndo_set_vf_vlan        = be_set_vf_vlan,
3058         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3059         .ndo_get_vf_config      = be_get_vf_config
3060 };
3061
3062 static void be_netdev_init(struct net_device *netdev)
3063 {
3064         struct be_adapter *adapter = netdev_priv(netdev);
3065         struct be_rx_obj *rxo;
3066         int i;
3067
3068         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3069                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3070                 NETIF_F_HW_VLAN_TX;
3071         if (be_multi_rxq(adapter))
3072                 netdev->hw_features |= NETIF_F_RXHASH;
3073
3074         netdev->features |= netdev->hw_features |
3075                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3076
3077         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3078                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3079
3080         netdev->flags |= IFF_MULTICAST;
3081
3082         netif_set_gso_max_size(netdev, 65535);
3083
3084         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3085
3086         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3087
3088         for_all_rx_queues(adapter, rxo, i)
3089                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3090                                 BE_NAPI_WEIGHT);
3091
3092         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3093                 BE_NAPI_WEIGHT);
3094 }
3095
3096 static void be_unmap_pci_bars(struct be_adapter *adapter)
3097 {
3098         if (adapter->csr)
3099                 iounmap(adapter->csr);
3100         if (adapter->db)
3101                 iounmap(adapter->db);
3102 }
3103
3104 static int be_map_pci_bars(struct be_adapter *adapter)
3105 {
3106         u8 __iomem *addr;
3107         int db_reg;
3108
3109         if (lancer_chip(adapter)) {
3110                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3111                         pci_resource_len(adapter->pdev, 0));
3112                 if (addr == NULL)
3113                         return -ENOMEM;
3114                 adapter->db = addr;
3115                 return 0;
3116         }
3117
3118         if (be_physfn(adapter)) {
3119                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3120                                 pci_resource_len(adapter->pdev, 2));
3121                 if (addr == NULL)
3122                         return -ENOMEM;
3123                 adapter->csr = addr;
3124         }
3125
3126         if (adapter->generation == BE_GEN2) {
3127                 db_reg = 4;
3128         } else {
3129                 if (be_physfn(adapter))
3130                         db_reg = 4;
3131                 else
3132                         db_reg = 0;
3133         }
3134         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3135                                 pci_resource_len(adapter->pdev, db_reg));
3136         if (addr == NULL)
3137                 goto pci_map_err;
3138         adapter->db = addr;
3139
3140         return 0;
3141 pci_map_err:
3142         be_unmap_pci_bars(adapter);
3143         return -ENOMEM;
3144 }
3145
3146
3147 static void be_ctrl_cleanup(struct be_adapter *adapter)
3148 {
3149         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3150
3151         be_unmap_pci_bars(adapter);
3152
3153         if (mem->va)
3154                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3155                                   mem->dma);
3156
3157         mem = &adapter->rx_filter;
3158         if (mem->va)
3159                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3160                                   mem->dma);
3161 }
3162
3163 static int be_ctrl_init(struct be_adapter *adapter)
3164 {
3165         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3166         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3167         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3168         int status;
3169
3170         status = be_map_pci_bars(adapter);
3171         if (status)
3172                 goto done;
3173
3174         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3175         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3176                                                 mbox_mem_alloc->size,
3177                                                 &mbox_mem_alloc->dma,
3178                                                 GFP_KERNEL);
3179         if (!mbox_mem_alloc->va) {
3180                 status = -ENOMEM;
3181                 goto unmap_pci_bars;
3182         }
3183         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3184         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3185         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3186         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3187
3188         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3189         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3190                                         &rx_filter->dma, GFP_KERNEL);
3191         if (rx_filter->va == NULL) {
3192                 status = -ENOMEM;
3193                 goto free_mbox;
3194         }
3195         memset(rx_filter->va, 0, rx_filter->size);
3196
3197         mutex_init(&adapter->mbox_lock);
3198         spin_lock_init(&adapter->mcc_lock);
3199         spin_lock_init(&adapter->mcc_cq_lock);
3200
3201         init_completion(&adapter->flash_compl);
3202         pci_save_state(adapter->pdev);
3203         return 0;
3204
3205 free_mbox:
3206         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3207                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3208
3209 unmap_pci_bars:
3210         be_unmap_pci_bars(adapter);
3211
3212 done:
3213         return status;
3214 }
3215
3216 static void be_stats_cleanup(struct be_adapter *adapter)
3217 {
3218         struct be_dma_mem *cmd = &adapter->stats_cmd;
3219
3220         if (cmd->va)
3221                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3222                                   cmd->va, cmd->dma);
3223 }
3224
3225 static int be_stats_init(struct be_adapter *adapter)
3226 {
3227         struct be_dma_mem *cmd = &adapter->stats_cmd;
3228
3229         if (adapter->generation == BE_GEN2) {
3230                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3231         } else {
3232                 if (lancer_chip(adapter))
3233                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3234                 else
3235                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3236         }
3237         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3238                                      GFP_KERNEL);
3239         if (cmd->va == NULL)
3240                 return -1;
3241         memset(cmd->va, 0, cmd->size);
3242         return 0;
3243 }
3244
3245 static void __devexit be_remove(struct pci_dev *pdev)
3246 {
3247         struct be_adapter *adapter = pci_get_drvdata(pdev);
3248
3249         if (!adapter)
3250                 return;
3251
3252         cancel_delayed_work_sync(&adapter->work);
3253
3254         unregister_netdev(adapter->netdev);
3255
3256         be_clear(adapter);
3257
3258         be_stats_cleanup(adapter);
3259
3260         be_ctrl_cleanup(adapter);
3261
3262         be_sriov_disable(adapter);
3263
3264         be_msix_disable(adapter);
3265
3266         pci_set_drvdata(pdev, NULL);
3267         pci_release_regions(pdev);
3268         pci_disable_device(pdev);
3269
3270         free_netdev(adapter->netdev);
3271 }
3272
3273 static int be_get_config(struct be_adapter *adapter)
3274 {
3275         int status;
3276
3277         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3278                         &adapter->function_mode, &adapter->function_caps);
3279         if (status)
3280                 return status;
3281
3282         if (adapter->function_mode & FLEX10_MODE)
3283                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3284         else
3285                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3286
3287         status = be_cmd_get_cntl_attributes(adapter);
3288         if (status)
3289                 return status;
3290
3291         return 0;
3292 }
3293
3294 static int be_dev_family_check(struct be_adapter *adapter)
3295 {
3296         struct pci_dev *pdev = adapter->pdev;
3297         u32 sli_intf = 0, if_type;
3298
3299         switch (pdev->device) {
3300         case BE_DEVICE_ID1:
3301         case OC_DEVICE_ID1:
3302                 adapter->generation = BE_GEN2;
3303                 break;
3304         case BE_DEVICE_ID2:
3305         case OC_DEVICE_ID2:
3306                 adapter->generation = BE_GEN3;
3307                 break;
3308         case OC_DEVICE_ID3:
3309         case OC_DEVICE_ID4:
3310                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3311                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3312                                                 SLI_INTF_IF_TYPE_SHIFT;
3313
3314                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3315                         if_type != 0x02) {
3316                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3317                         return -EINVAL;
3318                 }
3319                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3320                                          SLI_INTF_FAMILY_SHIFT);
3321                 adapter->generation = BE_GEN3;
3322                 break;
3323         default:
3324                 adapter->generation = 0;
3325         }
3326         return 0;
3327 }
3328
3329 static int lancer_wait_ready(struct be_adapter *adapter)
3330 {
3331 #define SLIPORT_READY_TIMEOUT 500
3332         u32 sliport_status;
3333         int status = 0, i;
3334
3335         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3336                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3337                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3338                         break;
3339
3340                 msleep(20);
3341         }
3342
3343         if (i == SLIPORT_READY_TIMEOUT)
3344                 status = -1;
3345
3346         return status;
3347 }
3348
3349 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3350 {
3351         int status;
3352         u32 sliport_status, err, reset_needed;
3353         status = lancer_wait_ready(adapter);
3354         if (!status) {
3355                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3356                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3357                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3358                 if (err && reset_needed) {
3359                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3360                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3361
3362                         /* check adapter has corrected the error */
3363                         status = lancer_wait_ready(adapter);
3364                         sliport_status = ioread32(adapter->db +
3365                                                         SLIPORT_STATUS_OFFSET);
3366                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3367                                                 SLIPORT_STATUS_RN_MASK);
3368                         if (status || sliport_status)
3369                                 status = -1;
3370                 } else if (err || reset_needed) {
3371                         status = -1;
3372                 }
3373         }
3374         return status;
3375 }
3376
3377 static int __devinit be_probe(struct pci_dev *pdev,
3378                         const struct pci_device_id *pdev_id)
3379 {
3380         int status = 0;
3381         struct be_adapter *adapter;
3382         struct net_device *netdev;
3383
3384         status = pci_enable_device(pdev);
3385         if (status)
3386                 goto do_none;
3387
3388         status = pci_request_regions(pdev, DRV_NAME);
3389         if (status)
3390                 goto disable_dev;
3391         pci_set_master(pdev);
3392
3393         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3394         if (netdev == NULL) {
3395                 status = -ENOMEM;
3396                 goto rel_reg;
3397         }
3398         adapter = netdev_priv(netdev);
3399         adapter->pdev = pdev;
3400         pci_set_drvdata(pdev, adapter);
3401
3402         status = be_dev_family_check(adapter);
3403         if (status)
3404                 goto free_netdev;
3405
3406         adapter->netdev = netdev;
3407         SET_NETDEV_DEV(netdev, &pdev->dev);
3408
3409         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3410         if (!status) {
3411                 netdev->features |= NETIF_F_HIGHDMA;
3412         } else {
3413                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3414                 if (status) {
3415                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3416                         goto free_netdev;
3417                 }
3418         }
3419
3420         status = be_sriov_enable(adapter);
3421         if (status)
3422                 goto free_netdev;
3423
3424         status = be_ctrl_init(adapter);
3425         if (status)
3426                 goto disable_sriov;
3427
3428         if (lancer_chip(adapter)) {
3429                 status = lancer_test_and_set_rdy_state(adapter);
3430                 if (status) {
3431                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3432                         goto ctrl_clean;
3433                 }
3434         }
3435
3436         /* sync up with fw's ready state */
3437         if (be_physfn(adapter)) {
3438                 status = be_cmd_POST(adapter);
3439                 if (status)
3440                         goto ctrl_clean;
3441         }
3442
3443         /* tell fw we're ready to fire cmds */
3444         status = be_cmd_fw_init(adapter);
3445         if (status)
3446                 goto ctrl_clean;
3447
3448         status = be_cmd_reset_function(adapter);
3449         if (status)
3450                 goto ctrl_clean;
3451
3452         status = be_stats_init(adapter);
3453         if (status)
3454                 goto ctrl_clean;
3455
3456         status = be_get_config(adapter);
3457         if (status)
3458                 goto stats_clean;
3459
3460         /* The INTR bit may be set in the card when probed by a kdump kernel
3461          * after a crash.
3462          */
3463         if (!lancer_chip(adapter))
3464                 be_intr_set(adapter, false);
3465
3466         be_msix_enable(adapter);
3467
3468         INIT_DELAYED_WORK(&adapter->work, be_worker);
3469         adapter->rx_fc = adapter->tx_fc = true;
3470
3471         status = be_setup(adapter);
3472         if (status)
3473                 goto msix_disable;
3474
3475         be_netdev_init(netdev);
3476         status = register_netdev(netdev);
3477         if (status != 0)
3478                 goto unsetup;
3479
3480         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3481
3482         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3483         return 0;
3484
3485 unsetup:
3486         be_clear(adapter);
3487 msix_disable:
3488         be_msix_disable(adapter);
3489 stats_clean:
3490         be_stats_cleanup(adapter);
3491 ctrl_clean:
3492         be_ctrl_cleanup(adapter);
3493 disable_sriov:
3494         be_sriov_disable(adapter);
3495 free_netdev:
3496         free_netdev(netdev);
3497         pci_set_drvdata(pdev, NULL);
3498 rel_reg:
3499         pci_release_regions(pdev);
3500 disable_dev:
3501         pci_disable_device(pdev);
3502 do_none:
3503         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3504         return status;
3505 }
3506
3507 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3508 {
3509         struct be_adapter *adapter = pci_get_drvdata(pdev);
3510         struct net_device *netdev =  adapter->netdev;
3511
3512         cancel_delayed_work_sync(&adapter->work);
3513         if (adapter->wol)
3514                 be_setup_wol(adapter, true);
3515
3516         netif_device_detach(netdev);
3517         if (netif_running(netdev)) {
3518                 rtnl_lock();
3519                 be_close(netdev);
3520                 rtnl_unlock();
3521         }
3522         be_clear(adapter);
3523
3524         be_msix_disable(adapter);
3525         pci_save_state(pdev);
3526         pci_disable_device(pdev);
3527         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3528         return 0;
3529 }
3530
3531 static int be_resume(struct pci_dev *pdev)
3532 {
3533         int status = 0;
3534         struct be_adapter *adapter = pci_get_drvdata(pdev);
3535         struct net_device *netdev =  adapter->netdev;
3536
3537         netif_device_detach(netdev);
3538
3539         status = pci_enable_device(pdev);
3540         if (status)
3541                 return status;
3542
3543         pci_set_power_state(pdev, 0);
3544         pci_restore_state(pdev);
3545
3546         be_msix_enable(adapter);
3547         /* tell fw we're ready to fire cmds */
3548         status = be_cmd_fw_init(adapter);
3549         if (status)
3550                 return status;
3551
3552         be_setup(adapter);
3553         if (netif_running(netdev)) {
3554                 rtnl_lock();
3555                 be_open(netdev);
3556                 rtnl_unlock();
3557         }
3558         netif_device_attach(netdev);
3559
3560         if (adapter->wol)
3561                 be_setup_wol(adapter, false);
3562
3563         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3564         return 0;
3565 }
3566
3567 /*
3568  * An FLR will stop BE from DMAing any data.
3569  */
3570 static void be_shutdown(struct pci_dev *pdev)
3571 {
3572         struct be_adapter *adapter = pci_get_drvdata(pdev);
3573
3574         if (!adapter)
3575                 return;
3576
3577         cancel_delayed_work_sync(&adapter->work);
3578
3579         netif_device_detach(adapter->netdev);
3580
3581         if (adapter->wol)
3582                 be_setup_wol(adapter, true);
3583
3584         be_cmd_reset_function(adapter);
3585
3586         pci_disable_device(pdev);
3587 }
3588
3589 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3590                                 pci_channel_state_t state)
3591 {
3592         struct be_adapter *adapter = pci_get_drvdata(pdev);
3593         struct net_device *netdev =  adapter->netdev;
3594
3595         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3596
3597         adapter->eeh_err = true;
3598
3599         netif_device_detach(netdev);
3600
3601         if (netif_running(netdev)) {
3602                 rtnl_lock();
3603                 be_close(netdev);
3604                 rtnl_unlock();
3605         }
3606         be_clear(adapter);
3607
3608         if (state == pci_channel_io_perm_failure)
3609                 return PCI_ERS_RESULT_DISCONNECT;
3610
3611         pci_disable_device(pdev);
3612
3613         return PCI_ERS_RESULT_NEED_RESET;
3614 }
3615
3616 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3617 {
3618         struct be_adapter *adapter = pci_get_drvdata(pdev);
3619         int status;
3620
3621         dev_info(&adapter->pdev->dev, "EEH reset\n");
3622         adapter->eeh_err = false;
3623         adapter->ue_detected = false;
3624         adapter->fw_timeout = false;
3625
3626         status = pci_enable_device(pdev);
3627         if (status)
3628                 return PCI_ERS_RESULT_DISCONNECT;
3629
3630         pci_set_master(pdev);
3631         pci_set_power_state(pdev, 0);
3632         pci_restore_state(pdev);
3633
3634         /* Check if card is ok and fw is ready */
3635         status = be_cmd_POST(adapter);
3636         if (status)
3637                 return PCI_ERS_RESULT_DISCONNECT;
3638
3639         return PCI_ERS_RESULT_RECOVERED;
3640 }
3641
3642 static void be_eeh_resume(struct pci_dev *pdev)
3643 {
3644         int status = 0;
3645         struct be_adapter *adapter = pci_get_drvdata(pdev);
3646         struct net_device *netdev =  adapter->netdev;
3647
3648         dev_info(&adapter->pdev->dev, "EEH resume\n");
3649
3650         pci_save_state(pdev);
3651
3652         /* tell fw we're ready to fire cmds */
3653         status = be_cmd_fw_init(adapter);
3654         if (status)
3655                 goto err;
3656
3657         status = be_setup(adapter);
3658         if (status)
3659                 goto err;
3660
3661         if (netif_running(netdev)) {
3662                 status = be_open(netdev);
3663                 if (status)
3664                         goto err;
3665         }
3666         netif_device_attach(netdev);
3667         return;
3668 err:
3669         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3670 }
3671
3672 static struct pci_error_handlers be_eeh_handlers = {
3673         .error_detected = be_eeh_err_detected,
3674         .slot_reset = be_eeh_reset,
3675         .resume = be_eeh_resume,
3676 };
3677
3678 static struct pci_driver be_driver = {
3679         .name = DRV_NAME,
3680         .id_table = be_dev_ids,
3681         .probe = be_probe,
3682         .remove = be_remove,
3683         .suspend = be_suspend,
3684         .resume = be_resume,
3685         .shutdown = be_shutdown,
3686         .err_handler = &be_eeh_handlers
3687 };
3688
3689 static int __init be_init_module(void)
3690 {
3691         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3692             rx_frag_size != 2048) {
3693                 printk(KERN_WARNING DRV_NAME
3694                         " : Module param rx_frag_size must be 2048/4096/8192."
3695                         " Using 2048\n");
3696                 rx_frag_size = 2048;
3697         }
3698
3699         return pci_register_driver(&be_driver);
3700 }
3701 module_init(be_init_module);
3702
3703 static void __exit be_exit_module(void)
3704 {
3705         pci_unregister_driver(&be_driver);
3706 }
3707 module_exit(be_exit_module);