be2net: enable WOL by default if h/w supports it
[linux-2.6.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va) {
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133                 mem->va = NULL;
134         }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138                 u16 len, u16 entry_size)
139 {
140         struct be_dma_mem *mem = &q->dma_mem;
141
142         memset(q, 0, sizeof(*q));
143         q->len = len;
144         q->entry_size = entry_size;
145         mem->size = len * entry_size;
146         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147                                      GFP_KERNEL);
148         if (!mem->va)
149                 return -ENOMEM;
150         memset(mem->va, 0, mem->size);
151         return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156         u32 reg, enabled;
157
158         if (adapter->eeh_err)
159                 return;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_RQ_RING_ID_MASK;
180         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_TXULP_RING_ID_MASK;
190         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197                 bool arm, bool clear_int, u16 num_popped)
198 {
199         u32 val = 0;
200         val |= qid & DB_EQ_RING_ID_MASK;
201         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204         if (adapter->eeh_err)
205                 return;
206
207         if (arm)
208                 val |= 1 << DB_EQ_REARM_SHIFT;
209         if (clear_int)
210                 val |= 1 << DB_EQ_CLR_SHIFT;
211         val |= 1 << DB_EQ_EVNT_SHIFT;
212         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213         iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218         u32 val = 0;
219         val |= qid & DB_CQ_RING_ID_MASK;
220         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223         if (adapter->eeh_err)
224                 return;
225
226         if (arm)
227                 val |= 1 << DB_CQ_REARM_SHIFT;
228         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234         struct be_adapter *adapter = netdev_priv(netdev);
235         struct sockaddr *addr = p;
236         int status = 0;
237         u8 current_mac[ETH_ALEN];
238         u32 pmac_id = adapter->pmac_id;
239
240         if (!is_valid_ether_addr(addr->sa_data))
241                 return -EADDRNOTAVAIL;
242
243         status = be_cmd_mac_addr_query(adapter, current_mac,
244                                 MAC_ADDRESS_TYPE_NETWORK, false,
245                                 adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id, 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
425         for_all_rx_queues(adapter, rxo, i) {
426                 /* below erx HW counter can actually wrap around after
427                  * 65535. Driver accumulates a 32-bit value
428                  */
429                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431         }
432 }
433
434 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435                                         struct rtnl_link_stats64 *stats)
436 {
437         struct be_adapter *adapter = netdev_priv(netdev);
438         struct be_drv_stats *drvs = &adapter->drv_stats;
439         struct be_rx_obj *rxo;
440         struct be_tx_obj *txo;
441         u64 pkts, bytes;
442         unsigned int start;
443         int i;
444
445         for_all_rx_queues(adapter, rxo, i) {
446                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447                 do {
448                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449                         pkts = rx_stats(rxo)->rx_pkts;
450                         bytes = rx_stats(rxo)->rx_bytes;
451                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452                 stats->rx_packets += pkts;
453                 stats->rx_bytes += bytes;
454                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456                                         rx_stats(rxo)->rx_drops_no_frags;
457         }
458
459         for_all_tx_queues(adapter, txo, i) {
460                 const struct be_tx_stats *tx_stats = tx_stats(txo);
461                 do {
462                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463                         pkts = tx_stats(txo)->tx_pkts;
464                         bytes = tx_stats(txo)->tx_bytes;
465                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466                 stats->tx_packets += pkts;
467                 stats->tx_bytes += bytes;
468         }
469
470         /* bad pkts received */
471         stats->rx_errors = drvs->rx_crc_errors +
472                 drvs->rx_alignment_symbol_errors +
473                 drvs->rx_in_range_errors +
474                 drvs->rx_out_range_errors +
475                 drvs->rx_frame_too_long +
476                 drvs->rx_dropped_too_small +
477                 drvs->rx_dropped_too_short +
478                 drvs->rx_dropped_header_too_small +
479                 drvs->rx_dropped_tcp_length +
480                 drvs->rx_dropped_runt;
481
482         /* detailed rx errors */
483         stats->rx_length_errors = drvs->rx_in_range_errors +
484                 drvs->rx_out_range_errors +
485                 drvs->rx_frame_too_long;
486
487         stats->rx_crc_errors = drvs->rx_crc_errors;
488
489         /* frame alignment errors */
490         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
491
492         /* receiver fifo overrun */
493         /* drops_no_pbuf is no per i/f, it's per BE card */
494         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
495                                 drvs->rx_input_fifo_overflow_drop +
496                                 drvs->rx_drops_no_pbuf;
497         return stats;
498 }
499
500 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
501 {
502         struct net_device *netdev = adapter->netdev;
503
504         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
505                 netif_carrier_off(netdev);
506                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
507         }
508
509         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510                 netif_carrier_on(netdev);
511         else
512                 netif_carrier_off(netdev);
513 }
514
515 static void be_tx_stats_update(struct be_tx_obj *txo,
516                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
517 {
518         struct be_tx_stats *stats = tx_stats(txo);
519
520         u64_stats_update_begin(&stats->sync);
521         stats->tx_reqs++;
522         stats->tx_wrbs += wrb_cnt;
523         stats->tx_bytes += copied;
524         stats->tx_pkts += (gso_segs ? gso_segs : 1);
525         if (stopped)
526                 stats->tx_stops++;
527         u64_stats_update_end(&stats->sync);
528 }
529
530 /* Determine number of WRB entries needed to xmit data in an skb */
531 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532                                                                 bool *dummy)
533 {
534         int cnt = (skb->len > skb->data_len);
535
536         cnt += skb_shinfo(skb)->nr_frags;
537
538         /* to account for hdr wrb */
539         cnt++;
540         if (lancer_chip(adapter) || !(cnt & 1)) {
541                 *dummy = false;
542         } else {
543                 /* add a dummy to make it an even num */
544                 cnt++;
545                 *dummy = true;
546         }
547         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548         return cnt;
549 }
550
551 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552 {
553         wrb->frag_pa_hi = upper_32_bits(addr);
554         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556 }
557
558 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559                                         struct sk_buff *skb)
560 {
561         u8 vlan_prio;
562         u16 vlan_tag;
563
564         vlan_tag = vlan_tx_tag_get(skb);
565         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566         /* If vlan priority provided by OS is NOT in available bmap */
567         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569                                 adapter->recommended_prio;
570
571         return vlan_tag;
572 }
573
574 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
576 {
577         u16 vlan_tag;
578
579         memset(hdr, 0, sizeof(*hdr));
580
581         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
583         if (skb_is_gso(skb)) {
584                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586                         hdr, skb_shinfo(skb)->gso_size);
587                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
588                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
589                 if (lancer_chip(adapter) && adapter->sli_family  ==
590                                                         LANCER_A0_SLI_FAMILY) {
591                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592                         if (is_tcp_pkt(skb))
593                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594                                                                 tcpcs, hdr, 1);
595                         else if (is_udp_pkt(skb))
596                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597                                                                 udpcs, hdr, 1);
598                 }
599         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600                 if (is_tcp_pkt(skb))
601                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602                 else if (is_udp_pkt(skb))
603                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604         }
605
606         if (vlan_tx_tag_present(skb)) {
607                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
608                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
609                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
610         }
611
612         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616 }
617
618 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
619                 bool unmap_single)
620 {
621         dma_addr_t dma;
622
623         be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
626         if (wrb->frag_len) {
627                 if (unmap_single)
628                         dma_unmap_single(dev, dma, wrb->frag_len,
629                                          DMA_TO_DEVICE);
630                 else
631                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
632         }
633 }
634
635 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
636                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637 {
638         dma_addr_t busaddr;
639         int i, copied = 0;
640         struct device *dev = &adapter->pdev->dev;
641         struct sk_buff *first_skb = skb;
642         struct be_eth_wrb *wrb;
643         struct be_eth_hdr_wrb *hdr;
644         bool map_single = false;
645         u16 map_head;
646
647         hdr = queue_head_node(txq);
648         queue_head_inc(txq);
649         map_head = txq->head;
650
651         if (skb->len > skb->data_len) {
652                 int len = skb_headlen(skb);
653                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654                 if (dma_mapping_error(dev, busaddr))
655                         goto dma_err;
656                 map_single = true;
657                 wrb = queue_head_node(txq);
658                 wrb_fill(wrb, busaddr, len);
659                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660                 queue_head_inc(txq);
661                 copied += len;
662         }
663
664         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
665                 const struct skb_frag_struct *frag =
666                         &skb_shinfo(skb)->frags[i];
667                 busaddr = skb_frag_dma_map(dev, frag, 0,
668                                            skb_frag_size(frag), DMA_TO_DEVICE);
669                 if (dma_mapping_error(dev, busaddr))
670                         goto dma_err;
671                 wrb = queue_head_node(txq);
672                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
673                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674                 queue_head_inc(txq);
675                 copied += skb_frag_size(frag);
676         }
677
678         if (dummy_wrb) {
679                 wrb = queue_head_node(txq);
680                 wrb_fill(wrb, 0, 0);
681                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682                 queue_head_inc(txq);
683         }
684
685         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
686         be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688         return copied;
689 dma_err:
690         txq->head = map_head;
691         while (copied) {
692                 wrb = queue_head_node(txq);
693                 unmap_tx_frag(dev, wrb, map_single);
694                 map_single = false;
695                 copied -= wrb->frag_len;
696                 queue_head_inc(txq);
697         }
698         return 0;
699 }
700
701 static netdev_tx_t be_xmit(struct sk_buff *skb,
702                         struct net_device *netdev)
703 {
704         struct be_adapter *adapter = netdev_priv(netdev);
705         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706         struct be_queue_info *txq = &txo->q;
707         u32 wrb_cnt = 0, copied = 0;
708         u32 start = txq->head;
709         bool dummy_wrb, stopped = false;
710
711         /* For vlan tagged pkts, BE
712          * 1) calculates checksum even when CSO is not requested
713          * 2) calculates checksum wrongly for padded pkt less than
714          * 60 bytes long.
715          * As a workaround disable TX vlan offloading in such cases.
716          */
717         if (unlikely(vlan_tx_tag_present(skb) &&
718                      (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719                 skb = skb_share_check(skb, GFP_ATOMIC);
720                 if (unlikely(!skb))
721                         goto tx_drop;
722
723                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724                 if (unlikely(!skb))
725                         goto tx_drop;
726
727                 skb->vlan_tci = 0;
728         }
729
730         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
731
732         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
733         if (copied) {
734                 /* record the sent skb in the sent_skb table */
735                 BUG_ON(txo->sent_skb_list[start]);
736                 txo->sent_skb_list[start] = skb;
737
738                 /* Ensure txq has space for the next skb; Else stop the queue
739                  * *BEFORE* ringing the tx doorbell, so that we serialze the
740                  * tx compls of the current transmit which'll wake up the queue
741                  */
742                 atomic_add(wrb_cnt, &txq->used);
743                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744                                                                 txq->len) {
745                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
746                         stopped = true;
747                 }
748
749                 be_txq_notify(adapter, txq->id, wrb_cnt);
750
751                 be_tx_stats_update(txo, wrb_cnt, copied,
752                                 skb_shinfo(skb)->gso_segs, stopped);
753         } else {
754                 txq->head = start;
755                 dev_kfree_skb_any(skb);
756         }
757 tx_drop:
758         return NETDEV_TX_OK;
759 }
760
761 static int be_change_mtu(struct net_device *netdev, int new_mtu)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764         if (new_mtu < BE_MIN_MTU ||
765                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766                                         (ETH_HLEN + ETH_FCS_LEN))) {
767                 dev_info(&adapter->pdev->dev,
768                         "MTU must be between %d and %d bytes\n",
769                         BE_MIN_MTU,
770                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
771                 return -EINVAL;
772         }
773         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774                         netdev->mtu, new_mtu);
775         netdev->mtu = new_mtu;
776         return 0;
777 }
778
779 /*
780  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781  * If the user configures more, place BE in vlan promiscuous mode.
782  */
783 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
784 {
785         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
786         u16 vtag[BE_NUM_VLANS_SUPPORTED];
787         u16 ntags = 0, i;
788         int status = 0;
789
790         if (vf) {
791                 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792                 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793                                             1, 1, 0);
794         }
795
796         /* No need to further configure vids if in promiscuous mode */
797         if (adapter->promiscuous)
798                 return 0;
799
800         if (adapter->vlans_added <= adapter->max_vlans)  {
801                 /* Construct VLAN Table to give to HW */
802                 for (i = 0; i < VLAN_N_VID; i++) {
803                         if (adapter->vlan_tag[i]) {
804                                 vtag[ntags] = cpu_to_le16(i);
805                                 ntags++;
806                         }
807                 }
808                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809                                         vtag, ntags, 1, 0);
810         } else {
811                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
812                                         NULL, 0, 1, 1);
813         }
814
815         return status;
816 }
817
818 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
819 {
820         struct be_adapter *adapter = netdev_priv(netdev);
821         int status = 0;
822
823         if (!be_physfn(adapter)) {
824                 status = -EINVAL;
825                 goto ret;
826         }
827
828         adapter->vlan_tag[vid] = 1;
829         if (adapter->vlans_added <= (adapter->max_vlans + 1))
830                 status = be_vid_config(adapter, false, 0);
831
832         if (!status)
833                 adapter->vlans_added++;
834         else
835                 adapter->vlan_tag[vid] = 0;
836 ret:
837         return status;
838 }
839
840 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
841 {
842         struct be_adapter *adapter = netdev_priv(netdev);
843         int status = 0;
844
845         if (!be_physfn(adapter)) {
846                 status = -EINVAL;
847                 goto ret;
848         }
849
850         adapter->vlan_tag[vid] = 0;
851         if (adapter->vlans_added <= adapter->max_vlans)
852                 status = be_vid_config(adapter, false, 0);
853
854         if (!status)
855                 adapter->vlans_added--;
856         else
857                 adapter->vlan_tag[vid] = 1;
858 ret:
859         return status;
860 }
861
862 static void be_set_rx_mode(struct net_device *netdev)
863 {
864         struct be_adapter *adapter = netdev_priv(netdev);
865
866         if (netdev->flags & IFF_PROMISC) {
867                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
868                 adapter->promiscuous = true;
869                 goto done;
870         }
871
872         /* BE was previously in promiscuous mode; disable it */
873         if (adapter->promiscuous) {
874                 adapter->promiscuous = false;
875                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
876
877                 if (adapter->vlans_added)
878                         be_vid_config(adapter, false, 0);
879         }
880
881         /* Enable multicast promisc if num configured exceeds what we support */
882         if (netdev->flags & IFF_ALLMULTI ||
883                         netdev_mc_count(netdev) > BE_MAX_MC) {
884                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
885                 goto done;
886         }
887
888         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
889 done:
890         return;
891 }
892
893 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
894 {
895         struct be_adapter *adapter = netdev_priv(netdev);
896         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
897         int status;
898
899         if (!sriov_enabled(adapter))
900                 return -EPERM;
901
902         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
903                 return -EINVAL;
904
905         if (lancer_chip(adapter)) {
906                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
907         } else {
908                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
909                                          vf_cfg->pmac_id, vf + 1);
910
911                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
912                                          &vf_cfg->pmac_id, vf + 1);
913         }
914
915         if (status)
916                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
917                                 mac, vf);
918         else
919                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
920
921         return status;
922 }
923
924 static int be_get_vf_config(struct net_device *netdev, int vf,
925                         struct ifla_vf_info *vi)
926 {
927         struct be_adapter *adapter = netdev_priv(netdev);
928         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
929
930         if (!sriov_enabled(adapter))
931                 return -EPERM;
932
933         if (vf >= adapter->num_vfs)
934                 return -EINVAL;
935
936         vi->vf = vf;
937         vi->tx_rate = vf_cfg->tx_rate;
938         vi->vlan = vf_cfg->vlan_tag;
939         vi->qos = 0;
940         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
941
942         return 0;
943 }
944
945 static int be_set_vf_vlan(struct net_device *netdev,
946                         int vf, u16 vlan, u8 qos)
947 {
948         struct be_adapter *adapter = netdev_priv(netdev);
949         int status = 0;
950
951         if (!sriov_enabled(adapter))
952                 return -EPERM;
953
954         if (vf >= adapter->num_vfs || vlan > 4095)
955                 return -EINVAL;
956
957         if (vlan) {
958                 adapter->vf_cfg[vf].vlan_tag = vlan;
959                 adapter->vlans_added++;
960         } else {
961                 adapter->vf_cfg[vf].vlan_tag = 0;
962                 adapter->vlans_added--;
963         }
964
965         status = be_vid_config(adapter, true, vf);
966
967         if (status)
968                 dev_info(&adapter->pdev->dev,
969                                 "VLAN %d config on VF %d failed\n", vlan, vf);
970         return status;
971 }
972
973 static int be_set_vf_tx_rate(struct net_device *netdev,
974                         int vf, int rate)
975 {
976         struct be_adapter *adapter = netdev_priv(netdev);
977         int status = 0;
978
979         if (!sriov_enabled(adapter))
980                 return -EPERM;
981
982         if (vf >= adapter->num_vfs)
983                 return -EINVAL;
984
985         if (rate < 100 || rate > 10000) {
986                 dev_err(&adapter->pdev->dev,
987                         "tx rate must be between 100 and 10000 Mbps\n");
988                 return -EINVAL;
989         }
990
991         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
992
993         if (status)
994                 dev_err(&adapter->pdev->dev,
995                                 "tx rate %d on VF %d failed\n", rate, vf);
996         else
997                 adapter->vf_cfg[vf].tx_rate = rate;
998         return status;
999 }
1000
1001 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1002 {
1003         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1004         ulong now = jiffies;
1005         ulong delta = now - stats->rx_jiffies;
1006         u64 pkts;
1007         unsigned int start, eqd;
1008
1009         if (!eqo->enable_aic) {
1010                 eqd = eqo->eqd;
1011                 goto modify_eqd;
1012         }
1013
1014         if (eqo->idx >= adapter->num_rx_qs)
1015                 return;
1016
1017         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1018
1019         /* Wrapped around */
1020         if (time_before(now, stats->rx_jiffies)) {
1021                 stats->rx_jiffies = now;
1022                 return;
1023         }
1024
1025         /* Update once a second */
1026         if (delta < HZ)
1027                 return;
1028
1029         do {
1030                 start = u64_stats_fetch_begin_bh(&stats->sync);
1031                 pkts = stats->rx_pkts;
1032         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1033
1034         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1035         stats->rx_pkts_prev = pkts;
1036         stats->rx_jiffies = now;
1037         eqd = (stats->rx_pps / 110000) << 3;
1038         eqd = min(eqd, eqo->max_eqd);
1039         eqd = max(eqd, eqo->min_eqd);
1040         if (eqd < 10)
1041                 eqd = 0;
1042
1043 modify_eqd:
1044         if (eqd != eqo->cur_eqd) {
1045                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1046                 eqo->cur_eqd = eqd;
1047         }
1048 }
1049
1050 static void be_rx_stats_update(struct be_rx_obj *rxo,
1051                 struct be_rx_compl_info *rxcp)
1052 {
1053         struct be_rx_stats *stats = rx_stats(rxo);
1054
1055         u64_stats_update_begin(&stats->sync);
1056         stats->rx_compl++;
1057         stats->rx_bytes += rxcp->pkt_size;
1058         stats->rx_pkts++;
1059         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1060                 stats->rx_mcast_pkts++;
1061         if (rxcp->err)
1062                 stats->rx_compl_err++;
1063         u64_stats_update_end(&stats->sync);
1064 }
1065
1066 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1067 {
1068         /* L4 checksum is not reliable for non TCP/UDP packets.
1069          * Also ignore ipcksm for ipv6 pkts */
1070         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1071                                 (rxcp->ip_csum || rxcp->ipv6);
1072 }
1073
1074 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1075                                                 u16 frag_idx)
1076 {
1077         struct be_adapter *adapter = rxo->adapter;
1078         struct be_rx_page_info *rx_page_info;
1079         struct be_queue_info *rxq = &rxo->q;
1080
1081         rx_page_info = &rxo->page_info_tbl[frag_idx];
1082         BUG_ON(!rx_page_info->page);
1083
1084         if (rx_page_info->last_page_user) {
1085                 dma_unmap_page(&adapter->pdev->dev,
1086                                dma_unmap_addr(rx_page_info, bus),
1087                                adapter->big_page_size, DMA_FROM_DEVICE);
1088                 rx_page_info->last_page_user = false;
1089         }
1090
1091         atomic_dec(&rxq->used);
1092         return rx_page_info;
1093 }
1094
1095 /* Throwaway the data in the Rx completion */
1096 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1097                                 struct be_rx_compl_info *rxcp)
1098 {
1099         struct be_queue_info *rxq = &rxo->q;
1100         struct be_rx_page_info *page_info;
1101         u16 i, num_rcvd = rxcp->num_rcvd;
1102
1103         for (i = 0; i < num_rcvd; i++) {
1104                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1105                 put_page(page_info->page);
1106                 memset(page_info, 0, sizeof(*page_info));
1107                 index_inc(&rxcp->rxq_idx, rxq->len);
1108         }
1109 }
1110
1111 /*
1112  * skb_fill_rx_data forms a complete skb for an ether frame
1113  * indicated by rxcp.
1114  */
1115 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1116                              struct be_rx_compl_info *rxcp)
1117 {
1118         struct be_queue_info *rxq = &rxo->q;
1119         struct be_rx_page_info *page_info;
1120         u16 i, j;
1121         u16 hdr_len, curr_frag_len, remaining;
1122         u8 *start;
1123
1124         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1125         start = page_address(page_info->page) + page_info->page_offset;
1126         prefetch(start);
1127
1128         /* Copy data in the first descriptor of this completion */
1129         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1130
1131         /* Copy the header portion into skb_data */
1132         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1133         memcpy(skb->data, start, hdr_len);
1134         skb->len = curr_frag_len;
1135         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1136                 /* Complete packet has now been moved to data */
1137                 put_page(page_info->page);
1138                 skb->data_len = 0;
1139                 skb->tail += curr_frag_len;
1140         } else {
1141                 skb_shinfo(skb)->nr_frags = 1;
1142                 skb_frag_set_page(skb, 0, page_info->page);
1143                 skb_shinfo(skb)->frags[0].page_offset =
1144                                         page_info->page_offset + hdr_len;
1145                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1146                 skb->data_len = curr_frag_len - hdr_len;
1147                 skb->truesize += rx_frag_size;
1148                 skb->tail += hdr_len;
1149         }
1150         page_info->page = NULL;
1151
1152         if (rxcp->pkt_size <= rx_frag_size) {
1153                 BUG_ON(rxcp->num_rcvd != 1);
1154                 return;
1155         }
1156
1157         /* More frags present for this completion */
1158         index_inc(&rxcp->rxq_idx, rxq->len);
1159         remaining = rxcp->pkt_size - curr_frag_len;
1160         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1161                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1162                 curr_frag_len = min(remaining, rx_frag_size);
1163
1164                 /* Coalesce all frags from the same physical page in one slot */
1165                 if (page_info->page_offset == 0) {
1166                         /* Fresh page */
1167                         j++;
1168                         skb_frag_set_page(skb, j, page_info->page);
1169                         skb_shinfo(skb)->frags[j].page_offset =
1170                                                         page_info->page_offset;
1171                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1172                         skb_shinfo(skb)->nr_frags++;
1173                 } else {
1174                         put_page(page_info->page);
1175                 }
1176
1177                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1178                 skb->len += curr_frag_len;
1179                 skb->data_len += curr_frag_len;
1180                 skb->truesize += rx_frag_size;
1181                 remaining -= curr_frag_len;
1182                 index_inc(&rxcp->rxq_idx, rxq->len);
1183                 page_info->page = NULL;
1184         }
1185         BUG_ON(j > MAX_SKB_FRAGS);
1186 }
1187
1188 /* Process the RX completion indicated by rxcp when GRO is disabled */
1189 static void be_rx_compl_process(struct be_rx_obj *rxo,
1190                                 struct be_rx_compl_info *rxcp)
1191 {
1192         struct be_adapter *adapter = rxo->adapter;
1193         struct net_device *netdev = adapter->netdev;
1194         struct sk_buff *skb;
1195
1196         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1197         if (unlikely(!skb)) {
1198                 rx_stats(rxo)->rx_drops_no_skbs++;
1199                 be_rx_compl_discard(rxo, rxcp);
1200                 return;
1201         }
1202
1203         skb_fill_rx_data(rxo, skb, rxcp);
1204
1205         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1206                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1207         else
1208                 skb_checksum_none_assert(skb);
1209
1210         skb->protocol = eth_type_trans(skb, netdev);
1211         if (netdev->features & NETIF_F_RXHASH)
1212                 skb->rxhash = rxcp->rss_hash;
1213
1214
1215         if (rxcp->vlanf)
1216                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1217
1218         netif_receive_skb(skb);
1219 }
1220
1221 /* Process the RX completion indicated by rxcp when GRO is enabled */
1222 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1223                              struct be_rx_compl_info *rxcp)
1224 {
1225         struct be_adapter *adapter = rxo->adapter;
1226         struct be_rx_page_info *page_info;
1227         struct sk_buff *skb = NULL;
1228         struct be_queue_info *rxq = &rxo->q;
1229         u16 remaining, curr_frag_len;
1230         u16 i, j;
1231
1232         skb = napi_get_frags(napi);
1233         if (!skb) {
1234                 be_rx_compl_discard(rxo, rxcp);
1235                 return;
1236         }
1237
1238         remaining = rxcp->pkt_size;
1239         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1240                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1241
1242                 curr_frag_len = min(remaining, rx_frag_size);
1243
1244                 /* Coalesce all frags from the same physical page in one slot */
1245                 if (i == 0 || page_info->page_offset == 0) {
1246                         /* First frag or Fresh page */
1247                         j++;
1248                         skb_frag_set_page(skb, j, page_info->page);
1249                         skb_shinfo(skb)->frags[j].page_offset =
1250                                                         page_info->page_offset;
1251                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1252                 } else {
1253                         put_page(page_info->page);
1254                 }
1255                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1256                 skb->truesize += rx_frag_size;
1257                 remaining -= curr_frag_len;
1258                 index_inc(&rxcp->rxq_idx, rxq->len);
1259                 memset(page_info, 0, sizeof(*page_info));
1260         }
1261         BUG_ON(j > MAX_SKB_FRAGS);
1262
1263         skb_shinfo(skb)->nr_frags = j + 1;
1264         skb->len = rxcp->pkt_size;
1265         skb->data_len = rxcp->pkt_size;
1266         skb->ip_summed = CHECKSUM_UNNECESSARY;
1267         if (adapter->netdev->features & NETIF_F_RXHASH)
1268                 skb->rxhash = rxcp->rss_hash;
1269
1270         if (rxcp->vlanf)
1271                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1272
1273         napi_gro_frags(napi);
1274 }
1275
1276 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1277                                  struct be_rx_compl_info *rxcp)
1278 {
1279         rxcp->pkt_size =
1280                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1281         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1282         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1283         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1284         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1285         rxcp->ip_csum =
1286                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1287         rxcp->l4_csum =
1288                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1289         rxcp->ipv6 =
1290                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1291         rxcp->rxq_idx =
1292                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1293         rxcp->num_rcvd =
1294                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1295         rxcp->pkt_type =
1296                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1297         rxcp->rss_hash =
1298                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1299         if (rxcp->vlanf) {
1300                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1301                                           compl);
1302                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1303                                                compl);
1304         }
1305         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1306 }
1307
1308 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1309                                  struct be_rx_compl_info *rxcp)
1310 {
1311         rxcp->pkt_size =
1312                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1313         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1314         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1315         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1316         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1317         rxcp->ip_csum =
1318                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1319         rxcp->l4_csum =
1320                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1321         rxcp->ipv6 =
1322                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1323         rxcp->rxq_idx =
1324                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1325         rxcp->num_rcvd =
1326                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1327         rxcp->pkt_type =
1328                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1329         rxcp->rss_hash =
1330                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1331         if (rxcp->vlanf) {
1332                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1333                                           compl);
1334                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1335                                                compl);
1336         }
1337         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1338 }
1339
1340 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1341 {
1342         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1343         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1344         struct be_adapter *adapter = rxo->adapter;
1345
1346         /* For checking the valid bit it is Ok to use either definition as the
1347          * valid bit is at the same position in both v0 and v1 Rx compl */
1348         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1349                 return NULL;
1350
1351         rmb();
1352         be_dws_le_to_cpu(compl, sizeof(*compl));
1353
1354         if (adapter->be3_native)
1355                 be_parse_rx_compl_v1(compl, rxcp);
1356         else
1357                 be_parse_rx_compl_v0(compl, rxcp);
1358
1359         if (rxcp->vlanf) {
1360                 /* vlanf could be wrongly set in some cards.
1361                  * ignore if vtm is not set */
1362                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1363                         rxcp->vlanf = 0;
1364
1365                 if (!lancer_chip(adapter))
1366                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1367
1368                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1369                     !adapter->vlan_tag[rxcp->vlan_tag])
1370                         rxcp->vlanf = 0;
1371         }
1372
1373         /* As the compl has been parsed, reset it; we wont touch it again */
1374         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1375
1376         queue_tail_inc(&rxo->cq);
1377         return rxcp;
1378 }
1379
1380 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1381 {
1382         u32 order = get_order(size);
1383
1384         if (order > 0)
1385                 gfp |= __GFP_COMP;
1386         return  alloc_pages(gfp, order);
1387 }
1388
1389 /*
1390  * Allocate a page, split it to fragments of size rx_frag_size and post as
1391  * receive buffers to BE
1392  */
1393 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1394 {
1395         struct be_adapter *adapter = rxo->adapter;
1396         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1397         struct be_queue_info *rxq = &rxo->q;
1398         struct page *pagep = NULL;
1399         struct be_eth_rx_d *rxd;
1400         u64 page_dmaaddr = 0, frag_dmaaddr;
1401         u32 posted, page_offset = 0;
1402
1403         page_info = &rxo->page_info_tbl[rxq->head];
1404         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1405                 if (!pagep) {
1406                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1407                         if (unlikely(!pagep)) {
1408                                 rx_stats(rxo)->rx_post_fail++;
1409                                 break;
1410                         }
1411                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412                                                     0, adapter->big_page_size,
1413                                                     DMA_FROM_DEVICE);
1414                         page_info->page_offset = 0;
1415                 } else {
1416                         get_page(pagep);
1417                         page_info->page_offset = page_offset + rx_frag_size;
1418                 }
1419                 page_offset = page_info->page_offset;
1420                 page_info->page = pagep;
1421                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1422                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423
1424                 rxd = queue_head_node(rxq);
1425                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1427
1428                 /* Any space left in the current big page for another frag? */
1429                 if ((page_offset + rx_frag_size + rx_frag_size) >
1430                                         adapter->big_page_size) {
1431                         pagep = NULL;
1432                         page_info->last_page_user = true;
1433                 }
1434
1435                 prev_page_info = page_info;
1436                 queue_head_inc(rxq);
1437                 page_info = &rxo->page_info_tbl[rxq->head];
1438         }
1439         if (pagep)
1440                 prev_page_info->last_page_user = true;
1441
1442         if (posted) {
1443                 atomic_add(posted, &rxq->used);
1444                 be_rxq_notify(adapter, rxq->id, posted);
1445         } else if (atomic_read(&rxq->used) == 0) {
1446                 /* Let be_worker replenish when memory is available */
1447                 rxo->rx_post_starved = true;
1448         }
1449 }
1450
1451 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1452 {
1453         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454
1455         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1456                 return NULL;
1457
1458         rmb();
1459         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460
1461         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462
1463         queue_tail_inc(tx_cq);
1464         return txcp;
1465 }
1466
1467 static u16 be_tx_compl_process(struct be_adapter *adapter,
1468                 struct be_tx_obj *txo, u16 last_index)
1469 {
1470         struct be_queue_info *txq = &txo->q;
1471         struct be_eth_wrb *wrb;
1472         struct sk_buff **sent_skbs = txo->sent_skb_list;
1473         struct sk_buff *sent_skb;
1474         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475         bool unmap_skb_hdr = true;
1476
1477         sent_skb = sent_skbs[txq->tail];
1478         BUG_ON(!sent_skb);
1479         sent_skbs[txq->tail] = NULL;
1480
1481         /* skip header wrb */
1482         queue_tail_inc(txq);
1483
1484         do {
1485                 cur_index = txq->tail;
1486                 wrb = queue_tail_node(txq);
1487                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1488                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1489                 unmap_skb_hdr = false;
1490
1491                 num_wrbs++;
1492                 queue_tail_inc(txq);
1493         } while (cur_index != last_index);
1494
1495         kfree_skb(sent_skb);
1496         return num_wrbs;
1497 }
1498
1499 /* Return the number of events in the event queue */
1500 static inline int events_get(struct be_eq_obj *eqo)
1501 {
1502         struct be_eq_entry *eqe;
1503         int num = 0;
1504
1505         do {
1506                 eqe = queue_tail_node(&eqo->q);
1507                 if (eqe->evt == 0)
1508                         break;
1509
1510                 rmb();
1511                 eqe->evt = 0;
1512                 num++;
1513                 queue_tail_inc(&eqo->q);
1514         } while (true);
1515
1516         return num;
1517 }
1518
1519 static int event_handle(struct be_eq_obj *eqo)
1520 {
1521         bool rearm = false;
1522         int num = events_get(eqo);
1523
1524         /* Deal with any spurious interrupts that come without events */
1525         if (!num)
1526                 rearm = true;
1527
1528         be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1529         if (num)
1530                 napi_schedule(&eqo->napi);
1531
1532         return num;
1533 }
1534
1535 /* Leaves the EQ is disarmed state */
1536 static void be_eq_clean(struct be_eq_obj *eqo)
1537 {
1538         int num = events_get(eqo);
1539
1540         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1541 }
1542
1543 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1544 {
1545         struct be_rx_page_info *page_info;
1546         struct be_queue_info *rxq = &rxo->q;
1547         struct be_queue_info *rx_cq = &rxo->cq;
1548         struct be_rx_compl_info *rxcp;
1549         u16 tail;
1550
1551         /* First cleanup pending rx completions */
1552         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1553                 be_rx_compl_discard(rxo, rxcp);
1554                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1555         }
1556
1557         /* Then free posted rx buffer that were not used */
1558         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1559         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1560                 page_info = get_rx_page_info(rxo, tail);
1561                 put_page(page_info->page);
1562                 memset(page_info, 0, sizeof(*page_info));
1563         }
1564         BUG_ON(atomic_read(&rxq->used));
1565         rxq->tail = rxq->head = 0;
1566 }
1567
1568 static void be_tx_compl_clean(struct be_adapter *adapter)
1569 {
1570         struct be_tx_obj *txo;
1571         struct be_queue_info *txq;
1572         struct be_eth_tx_compl *txcp;
1573         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1574         struct sk_buff *sent_skb;
1575         bool dummy_wrb;
1576         int i, pending_txqs;
1577
1578         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1579         do {
1580                 pending_txqs = adapter->num_tx_qs;
1581
1582                 for_all_tx_queues(adapter, txo, i) {
1583                         txq = &txo->q;
1584                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1585                                 end_idx =
1586                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1587                                                       wrb_index, txcp);
1588                                 num_wrbs += be_tx_compl_process(adapter, txo,
1589                                                                 end_idx);
1590                                 cmpl++;
1591                         }
1592                         if (cmpl) {
1593                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1594                                 atomic_sub(num_wrbs, &txq->used);
1595                                 cmpl = 0;
1596                                 num_wrbs = 0;
1597                         }
1598                         if (atomic_read(&txq->used) == 0)
1599                                 pending_txqs--;
1600                 }
1601
1602                 if (pending_txqs == 0 || ++timeo > 200)
1603                         break;
1604
1605                 mdelay(1);
1606         } while (true);
1607
1608         for_all_tx_queues(adapter, txo, i) {
1609                 txq = &txo->q;
1610                 if (atomic_read(&txq->used))
1611                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1612                                 atomic_read(&txq->used));
1613
1614                 /* free posted tx for which compls will never arrive */
1615                 while (atomic_read(&txq->used)) {
1616                         sent_skb = txo->sent_skb_list[txq->tail];
1617                         end_idx = txq->tail;
1618                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1619                                                    &dummy_wrb);
1620                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1621                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1622                         atomic_sub(num_wrbs, &txq->used);
1623                 }
1624         }
1625 }
1626
1627 static void be_evt_queues_destroy(struct be_adapter *adapter)
1628 {
1629         struct be_eq_obj *eqo;
1630         int i;
1631
1632         for_all_evt_queues(adapter, eqo, i) {
1633                 be_eq_clean(eqo);
1634                 if (eqo->q.created)
1635                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1636                 be_queue_free(adapter, &eqo->q);
1637         }
1638 }
1639
1640 static int be_evt_queues_create(struct be_adapter *adapter)
1641 {
1642         struct be_queue_info *eq;
1643         struct be_eq_obj *eqo;
1644         int i, rc;
1645
1646         adapter->num_evt_qs = num_irqs(adapter);
1647
1648         for_all_evt_queues(adapter, eqo, i) {
1649                 eqo->adapter = adapter;
1650                 eqo->tx_budget = BE_TX_BUDGET;
1651                 eqo->idx = i;
1652                 eqo->max_eqd = BE_MAX_EQD;
1653                 eqo->enable_aic = true;
1654
1655                 eq = &eqo->q;
1656                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1657                                         sizeof(struct be_eq_entry));
1658                 if (rc)
1659                         return rc;
1660
1661                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1662                 if (rc)
1663                         return rc;
1664         }
1665         return 0;
1666 }
1667
1668 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1669 {
1670         struct be_queue_info *q;
1671
1672         q = &adapter->mcc_obj.q;
1673         if (q->created)
1674                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1675         be_queue_free(adapter, q);
1676
1677         q = &adapter->mcc_obj.cq;
1678         if (q->created)
1679                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1680         be_queue_free(adapter, q);
1681 }
1682
1683 /* Must be called only after TX qs are created as MCC shares TX EQ */
1684 static int be_mcc_queues_create(struct be_adapter *adapter)
1685 {
1686         struct be_queue_info *q, *cq;
1687
1688         cq = &adapter->mcc_obj.cq;
1689         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1690                         sizeof(struct be_mcc_compl)))
1691                 goto err;
1692
1693         /* Use the default EQ for MCC completions */
1694         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1695                 goto mcc_cq_free;
1696
1697         q = &adapter->mcc_obj.q;
1698         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1699                 goto mcc_cq_destroy;
1700
1701         if (be_cmd_mccq_create(adapter, q, cq))
1702                 goto mcc_q_free;
1703
1704         return 0;
1705
1706 mcc_q_free:
1707         be_queue_free(adapter, q);
1708 mcc_cq_destroy:
1709         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1710 mcc_cq_free:
1711         be_queue_free(adapter, cq);
1712 err:
1713         return -1;
1714 }
1715
1716 static void be_tx_queues_destroy(struct be_adapter *adapter)
1717 {
1718         struct be_queue_info *q;
1719         struct be_tx_obj *txo;
1720         u8 i;
1721
1722         for_all_tx_queues(adapter, txo, i) {
1723                 q = &txo->q;
1724                 if (q->created)
1725                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1726                 be_queue_free(adapter, q);
1727
1728                 q = &txo->cq;
1729                 if (q->created)
1730                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1731                 be_queue_free(adapter, q);
1732         }
1733 }
1734
1735 static int be_num_txqs_want(struct be_adapter *adapter)
1736 {
1737         if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1738                 lancer_chip(adapter) || !be_physfn(adapter) ||
1739                 adapter->generation == BE_GEN2)
1740                 return 1;
1741         else
1742                 return MAX_TX_QS;
1743 }
1744
1745 static int be_tx_cqs_create(struct be_adapter *adapter)
1746 {
1747         struct be_queue_info *cq, *eq;
1748         int status;
1749         struct be_tx_obj *txo;
1750         u8 i;
1751
1752         adapter->num_tx_qs = be_num_txqs_want(adapter);
1753         if (adapter->num_tx_qs != MAX_TX_QS) {
1754                 rtnl_lock();
1755                 netif_set_real_num_tx_queues(adapter->netdev,
1756                         adapter->num_tx_qs);
1757                 rtnl_unlock();
1758         }
1759
1760         for_all_tx_queues(adapter, txo, i) {
1761                 cq = &txo->cq;
1762                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1763                                         sizeof(struct be_eth_tx_compl));
1764                 if (status)
1765                         return status;
1766
1767                 /* If num_evt_qs is less than num_tx_qs, then more than
1768                  * one txq share an eq
1769                  */
1770                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1771                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1772                 if (status)
1773                         return status;
1774         }
1775         return 0;
1776 }
1777
1778 static int be_tx_qs_create(struct be_adapter *adapter)
1779 {
1780         struct be_tx_obj *txo;
1781         int i, status;
1782
1783         for_all_tx_queues(adapter, txo, i) {
1784                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1785                                         sizeof(struct be_eth_wrb));
1786                 if (status)
1787                         return status;
1788
1789                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1790                 if (status)
1791                         return status;
1792         }
1793
1794         return 0;
1795 }
1796
1797 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1798 {
1799         struct be_queue_info *q;
1800         struct be_rx_obj *rxo;
1801         int i;
1802
1803         for_all_rx_queues(adapter, rxo, i) {
1804                 q = &rxo->cq;
1805                 if (q->created)
1806                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1807                 be_queue_free(adapter, q);
1808         }
1809 }
1810
1811 static int be_rx_cqs_create(struct be_adapter *adapter)
1812 {
1813         struct be_queue_info *eq, *cq;
1814         struct be_rx_obj *rxo;
1815         int rc, i;
1816
1817         /* We'll create as many RSS rings as there are irqs.
1818          * But when there's only one irq there's no use creating RSS rings
1819          */
1820         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1821                                 num_irqs(adapter) + 1 : 1;
1822
1823         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1824         for_all_rx_queues(adapter, rxo, i) {
1825                 rxo->adapter = adapter;
1826                 cq = &rxo->cq;
1827                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1828                                 sizeof(struct be_eth_rx_compl));
1829                 if (rc)
1830                         return rc;
1831
1832                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1833                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1834                 if (rc)
1835                         return rc;
1836         }
1837
1838         if (adapter->num_rx_qs != MAX_RX_QS)
1839                 dev_info(&adapter->pdev->dev,
1840                         "Created only %d receive queues", adapter->num_rx_qs);
1841
1842         return 0;
1843 }
1844
1845 static irqreturn_t be_intx(int irq, void *dev)
1846 {
1847         struct be_adapter *adapter = dev;
1848         int num_evts;
1849
1850         /* With INTx only one EQ is used */
1851         num_evts = event_handle(&adapter->eq_obj[0]);
1852         if (num_evts)
1853                 return IRQ_HANDLED;
1854         else
1855                 return IRQ_NONE;
1856 }
1857
1858 static irqreturn_t be_msix(int irq, void *dev)
1859 {
1860         struct be_eq_obj *eqo = dev;
1861
1862         event_handle(eqo);
1863         return IRQ_HANDLED;
1864 }
1865
1866 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1867 {
1868         return (rxcp->tcpf && !rxcp->err) ? true : false;
1869 }
1870
1871 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1872                         int budget)
1873 {
1874         struct be_adapter *adapter = rxo->adapter;
1875         struct be_queue_info *rx_cq = &rxo->cq;
1876         struct be_rx_compl_info *rxcp;
1877         u32 work_done;
1878
1879         for (work_done = 0; work_done < budget; work_done++) {
1880                 rxcp = be_rx_compl_get(rxo);
1881                 if (!rxcp)
1882                         break;
1883
1884                 /* Is it a flush compl that has no data */
1885                 if (unlikely(rxcp->num_rcvd == 0))
1886                         goto loop_continue;
1887
1888                 /* Discard compl with partial DMA Lancer B0 */
1889                 if (unlikely(!rxcp->pkt_size)) {
1890                         be_rx_compl_discard(rxo, rxcp);
1891                         goto loop_continue;
1892                 }
1893
1894                 /* On BE drop pkts that arrive due to imperfect filtering in
1895                  * promiscuous mode on some skews
1896                  */
1897                 if (unlikely(rxcp->port != adapter->port_num &&
1898                                 !lancer_chip(adapter))) {
1899                         be_rx_compl_discard(rxo, rxcp);
1900                         goto loop_continue;
1901                 }
1902
1903                 if (do_gro(rxcp))
1904                         be_rx_compl_process_gro(rxo, napi, rxcp);
1905                 else
1906                         be_rx_compl_process(rxo, rxcp);
1907 loop_continue:
1908                 be_rx_stats_update(rxo, rxcp);
1909         }
1910
1911         if (work_done) {
1912                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1913
1914                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1915                         be_post_rx_frags(rxo, GFP_ATOMIC);
1916         }
1917
1918         return work_done;
1919 }
1920
1921 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1922                           int budget, int idx)
1923 {
1924         struct be_eth_tx_compl *txcp;
1925         int num_wrbs = 0, work_done;
1926
1927         for (work_done = 0; work_done < budget; work_done++) {
1928                 txcp = be_tx_compl_get(&txo->cq);
1929                 if (!txcp)
1930                         break;
1931                 num_wrbs += be_tx_compl_process(adapter, txo,
1932                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1933                                         wrb_index, txcp));
1934         }
1935
1936         if (work_done) {
1937                 be_cq_notify(adapter, txo->cq.id, true, work_done);
1938                 atomic_sub(num_wrbs, &txo->q.used);
1939
1940                 /* As Tx wrbs have been freed up, wake up netdev queue
1941                  * if it was stopped due to lack of tx wrbs.  */
1942                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1943                         atomic_read(&txo->q.used) < txo->q.len / 2) {
1944                         netif_wake_subqueue(adapter->netdev, idx);
1945                 }
1946
1947                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1948                 tx_stats(txo)->tx_compl += work_done;
1949                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1950         }
1951         return (work_done < budget); /* Done */
1952 }
1953
1954 int be_poll(struct napi_struct *napi, int budget)
1955 {
1956         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1957         struct be_adapter *adapter = eqo->adapter;
1958         int max_work = 0, work, i;
1959         bool tx_done;
1960
1961         /* Process all TXQs serviced by this EQ */
1962         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1963                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1964                                         eqo->tx_budget, i);
1965                 if (!tx_done)
1966                         max_work = budget;
1967         }
1968
1969         /* This loop will iterate twice for EQ0 in which
1970          * completions of the last RXQ (default one) are also processed
1971          * For other EQs the loop iterates only once
1972          */
1973         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1974                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1975                 max_work = max(work, max_work);
1976         }
1977
1978         if (is_mcc_eqo(eqo))
1979                 be_process_mcc(adapter);
1980
1981         if (max_work < budget) {
1982                 napi_complete(napi);
1983                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
1984         } else {
1985                 /* As we'll continue in polling mode, count and clear events */
1986                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
1987         }
1988         return max_work;
1989 }
1990
1991 void be_detect_dump_ue(struct be_adapter *adapter)
1992 {
1993         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1994         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1995         u32 i;
1996
1997         if (adapter->eeh_err || adapter->ue_detected)
1998                 return;
1999
2000         if (lancer_chip(adapter)) {
2001                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2002                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2003                         sliport_err1 = ioread32(adapter->db +
2004                                         SLIPORT_ERROR1_OFFSET);
2005                         sliport_err2 = ioread32(adapter->db +
2006                                         SLIPORT_ERROR2_OFFSET);
2007                 }
2008         } else {
2009                 pci_read_config_dword(adapter->pdev,
2010                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2011                 pci_read_config_dword(adapter->pdev,
2012                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2013                 pci_read_config_dword(adapter->pdev,
2014                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2015                 pci_read_config_dword(adapter->pdev,
2016                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2017
2018                 ue_lo = (ue_lo & (~ue_lo_mask));
2019                 ue_hi = (ue_hi & (~ue_hi_mask));
2020         }
2021
2022         if (ue_lo || ue_hi ||
2023                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2024                 adapter->ue_detected = true;
2025                 adapter->eeh_err = true;
2026                 dev_err(&adapter->pdev->dev,
2027                         "Unrecoverable error in the card\n");
2028         }
2029
2030         if (ue_lo) {
2031                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2032                         if (ue_lo & 1)
2033                                 dev_err(&adapter->pdev->dev,
2034                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2035                 }
2036         }
2037         if (ue_hi) {
2038                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2039                         if (ue_hi & 1)
2040                                 dev_err(&adapter->pdev->dev,
2041                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2042                 }
2043         }
2044
2045         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2046                 dev_err(&adapter->pdev->dev,
2047                         "sliport status 0x%x\n", sliport_status);
2048                 dev_err(&adapter->pdev->dev,
2049                         "sliport error1 0x%x\n", sliport_err1);
2050                 dev_err(&adapter->pdev->dev,
2051                         "sliport error2 0x%x\n", sliport_err2);
2052         }
2053 }
2054
2055 static void be_msix_disable(struct be_adapter *adapter)
2056 {
2057         if (msix_enabled(adapter)) {
2058                 pci_disable_msix(adapter->pdev);
2059                 adapter->num_msix_vec = 0;
2060         }
2061 }
2062
2063 static uint be_num_rss_want(struct be_adapter *adapter)
2064 {
2065         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2066              adapter->num_vfs == 0 && be_physfn(adapter) &&
2067              !be_is_mc(adapter))
2068                 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2069         else
2070                 return 0;
2071 }
2072
2073 static void be_msix_enable(struct be_adapter *adapter)
2074 {
2075 #define BE_MIN_MSIX_VECTORS             1
2076         int i, status, num_vec;
2077
2078         /* If RSS queues are not used, need a vec for default RX Q */
2079         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2080         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2081
2082         for (i = 0; i < num_vec; i++)
2083                 adapter->msix_entries[i].entry = i;
2084
2085         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2086         if (status == 0) {
2087                 goto done;
2088         } else if (status >= BE_MIN_MSIX_VECTORS) {
2089                 num_vec = status;
2090                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2091                                 num_vec) == 0)
2092                         goto done;
2093         }
2094         return;
2095 done:
2096         adapter->num_msix_vec = num_vec;
2097         return;
2098 }
2099
2100 static int be_sriov_enable(struct be_adapter *adapter)
2101 {
2102         be_check_sriov_fn_type(adapter);
2103
2104 #ifdef CONFIG_PCI_IOV
2105         if (be_physfn(adapter) && num_vfs) {
2106                 int status, pos;
2107                 u16 dev_vfs;
2108
2109                 pos = pci_find_ext_capability(adapter->pdev,
2110                                                 PCI_EXT_CAP_ID_SRIOV);
2111                 pci_read_config_word(adapter->pdev,
2112                                      pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2113
2114                 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2115                 if (adapter->num_vfs != num_vfs)
2116                         dev_info(&adapter->pdev->dev,
2117                                  "Device supports %d VFs and not %d\n",
2118                                  adapter->num_vfs, num_vfs);
2119
2120                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2121                 if (status)
2122                         adapter->num_vfs = 0;
2123
2124                 if (adapter->num_vfs) {
2125                         adapter->vf_cfg = kcalloc(num_vfs,
2126                                                 sizeof(struct be_vf_cfg),
2127                                                 GFP_KERNEL);
2128                         if (!adapter->vf_cfg)
2129                                 return -ENOMEM;
2130                 }
2131         }
2132 #endif
2133         return 0;
2134 }
2135
2136 static void be_sriov_disable(struct be_adapter *adapter)
2137 {
2138 #ifdef CONFIG_PCI_IOV
2139         if (sriov_enabled(adapter)) {
2140                 pci_disable_sriov(adapter->pdev);
2141                 kfree(adapter->vf_cfg);
2142                 adapter->num_vfs = 0;
2143         }
2144 #endif
2145 }
2146
2147 static inline int be_msix_vec_get(struct be_adapter *adapter,
2148                                 struct be_eq_obj *eqo)
2149 {
2150         return adapter->msix_entries[eqo->idx].vector;
2151 }
2152
2153 static int be_msix_register(struct be_adapter *adapter)
2154 {
2155         struct net_device *netdev = adapter->netdev;
2156         struct be_eq_obj *eqo;
2157         int status, i, vec;
2158
2159         for_all_evt_queues(adapter, eqo, i) {
2160                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2161                 vec = be_msix_vec_get(adapter, eqo);
2162                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2163                 if (status)
2164                         goto err_msix;
2165         }
2166
2167         return 0;
2168 err_msix:
2169         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2170                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2171         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2172                 status);
2173         be_msix_disable(adapter);
2174         return status;
2175 }
2176
2177 static int be_irq_register(struct be_adapter *adapter)
2178 {
2179         struct net_device *netdev = adapter->netdev;
2180         int status;
2181
2182         if (msix_enabled(adapter)) {
2183                 status = be_msix_register(adapter);
2184                 if (status == 0)
2185                         goto done;
2186                 /* INTx is not supported for VF */
2187                 if (!be_physfn(adapter))
2188                         return status;
2189         }
2190
2191         /* INTx */
2192         netdev->irq = adapter->pdev->irq;
2193         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2194                         adapter);
2195         if (status) {
2196                 dev_err(&adapter->pdev->dev,
2197                         "INTx request IRQ failed - err %d\n", status);
2198                 return status;
2199         }
2200 done:
2201         adapter->isr_registered = true;
2202         return 0;
2203 }
2204
2205 static void be_irq_unregister(struct be_adapter *adapter)
2206 {
2207         struct net_device *netdev = adapter->netdev;
2208         struct be_eq_obj *eqo;
2209         int i;
2210
2211         if (!adapter->isr_registered)
2212                 return;
2213
2214         /* INTx */
2215         if (!msix_enabled(adapter)) {
2216                 free_irq(netdev->irq, adapter);
2217                 goto done;
2218         }
2219
2220         /* MSIx */
2221         for_all_evt_queues(adapter, eqo, i)
2222                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2223
2224 done:
2225         adapter->isr_registered = false;
2226 }
2227
2228 static void be_rx_qs_destroy(struct be_adapter *adapter)
2229 {
2230         struct be_queue_info *q;
2231         struct be_rx_obj *rxo;
2232         int i;
2233
2234         for_all_rx_queues(adapter, rxo, i) {
2235                 q = &rxo->q;
2236                 if (q->created) {
2237                         be_cmd_rxq_destroy(adapter, q);
2238                         /* After the rxq is invalidated, wait for a grace time
2239                          * of 1ms for all dma to end and the flush compl to
2240                          * arrive
2241                          */
2242                         mdelay(1);
2243                         be_rx_cq_clean(rxo);
2244                 }
2245                 be_queue_free(adapter, q);
2246         }
2247 }
2248
2249 static int be_close(struct net_device *netdev)
2250 {
2251         struct be_adapter *adapter = netdev_priv(netdev);
2252         struct be_eq_obj *eqo;
2253         int i;
2254
2255         be_async_mcc_disable(adapter);
2256
2257         if (!lancer_chip(adapter))
2258                 be_intr_set(adapter, false);
2259
2260         for_all_evt_queues(adapter, eqo, i) {
2261                 napi_disable(&eqo->napi);
2262                 if (msix_enabled(adapter))
2263                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2264                 else
2265                         synchronize_irq(netdev->irq);
2266                 be_eq_clean(eqo);
2267         }
2268
2269         be_irq_unregister(adapter);
2270
2271         /* Wait for all pending tx completions to arrive so that
2272          * all tx skbs are freed.
2273          */
2274         be_tx_compl_clean(adapter);
2275
2276         be_rx_qs_destroy(adapter);
2277         return 0;
2278 }
2279
2280 static int be_rx_qs_create(struct be_adapter *adapter)
2281 {
2282         struct be_rx_obj *rxo;
2283         int rc, i, j;
2284         u8 rsstable[128];
2285
2286         for_all_rx_queues(adapter, rxo, i) {
2287                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2288                                     sizeof(struct be_eth_rx_d));
2289                 if (rc)
2290                         return rc;
2291         }
2292
2293         /* The FW would like the default RXQ to be created first */
2294         rxo = default_rxo(adapter);
2295         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2296                                adapter->if_handle, false, &rxo->rss_id);
2297         if (rc)
2298                 return rc;
2299
2300         for_all_rss_queues(adapter, rxo, i) {
2301                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2302                                        rx_frag_size, adapter->if_handle,
2303                                        true, &rxo->rss_id);
2304                 if (rc)
2305                         return rc;
2306         }
2307
2308         if (be_multi_rxq(adapter)) {
2309                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2310                         for_all_rss_queues(adapter, rxo, i) {
2311                                 if ((j + i) >= 128)
2312                                         break;
2313                                 rsstable[j + i] = rxo->rss_id;
2314                         }
2315                 }
2316                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2317                 if (rc)
2318                         return rc;
2319         }
2320
2321         /* First time posting */
2322         for_all_rx_queues(adapter, rxo, i)
2323                 be_post_rx_frags(rxo, GFP_KERNEL);
2324         return 0;
2325 }
2326
2327 static int be_open(struct net_device *netdev)
2328 {
2329         struct be_adapter *adapter = netdev_priv(netdev);
2330         struct be_eq_obj *eqo;
2331         struct be_rx_obj *rxo;
2332         struct be_tx_obj *txo;
2333         u8 link_status;
2334         int status, i;
2335
2336         status = be_rx_qs_create(adapter);
2337         if (status)
2338                 goto err;
2339
2340         be_irq_register(adapter);
2341
2342         if (!lancer_chip(adapter))
2343                 be_intr_set(adapter, true);
2344
2345         for_all_rx_queues(adapter, rxo, i)
2346                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2347
2348         for_all_tx_queues(adapter, txo, i)
2349                 be_cq_notify(adapter, txo->cq.id, true, 0);
2350
2351         be_async_mcc_enable(adapter);
2352
2353         for_all_evt_queues(adapter, eqo, i) {
2354                 napi_enable(&eqo->napi);
2355                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2356         }
2357
2358         status = be_cmd_link_status_query(adapter, NULL, NULL,
2359                                           &link_status, 0);
2360         if (!status)
2361                 be_link_status_update(adapter, link_status);
2362
2363         return 0;
2364 err:
2365         be_close(adapter->netdev);
2366         return -EIO;
2367 }
2368
2369 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2370 {
2371         struct be_dma_mem cmd;
2372         int status = 0;
2373         u8 mac[ETH_ALEN];
2374
2375         memset(mac, 0, ETH_ALEN);
2376
2377         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2378         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2379                                     GFP_KERNEL);
2380         if (cmd.va == NULL)
2381                 return -1;
2382         memset(cmd.va, 0, cmd.size);
2383
2384         if (enable) {
2385                 status = pci_write_config_dword(adapter->pdev,
2386                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2387                 if (status) {
2388                         dev_err(&adapter->pdev->dev,
2389                                 "Could not enable Wake-on-lan\n");
2390                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2391                                           cmd.dma);
2392                         return status;
2393                 }
2394                 status = be_cmd_enable_magic_wol(adapter,
2395                                 adapter->netdev->dev_addr, &cmd);
2396                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2397                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2398         } else {
2399                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2400                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2401                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2402         }
2403
2404         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2405         return status;
2406 }
2407
2408 /*
2409  * Generate a seed MAC address from the PF MAC Address using jhash.
2410  * MAC Address for VFs are assigned incrementally starting from the seed.
2411  * These addresses are programmed in the ASIC by the PF and the VF driver
2412  * queries for the MAC address during its probe.
2413  */
2414 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2415 {
2416         u32 vf;
2417         int status = 0;
2418         u8 mac[ETH_ALEN];
2419         struct be_vf_cfg *vf_cfg;
2420
2421         be_vf_eth_addr_generate(adapter, mac);
2422
2423         for_all_vfs(adapter, vf_cfg, vf) {
2424                 if (lancer_chip(adapter)) {
2425                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2426                 } else {
2427                         status = be_cmd_pmac_add(adapter, mac,
2428                                                  vf_cfg->if_handle,
2429                                                  &vf_cfg->pmac_id, vf + 1);
2430                 }
2431
2432                 if (status)
2433                         dev_err(&adapter->pdev->dev,
2434                         "Mac address assignment failed for VF %d\n", vf);
2435                 else
2436                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2437
2438                 mac[5] += 1;
2439         }
2440         return status;
2441 }
2442
2443 static void be_vf_clear(struct be_adapter *adapter)
2444 {
2445         struct be_vf_cfg *vf_cfg;
2446         u32 vf;
2447
2448         for_all_vfs(adapter, vf_cfg, vf) {
2449                 if (lancer_chip(adapter))
2450                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2451                 else
2452                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2453                                         vf_cfg->pmac_id, vf + 1);
2454
2455                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2456         }
2457 }
2458
2459 static int be_clear(struct be_adapter *adapter)
2460 {
2461         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2462                 cancel_delayed_work_sync(&adapter->work);
2463                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2464         }
2465
2466         if (sriov_enabled(adapter))
2467                 be_vf_clear(adapter);
2468
2469         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2470
2471         be_mcc_queues_destroy(adapter);
2472         be_rx_cqs_destroy(adapter);
2473         be_tx_queues_destroy(adapter);
2474         be_evt_queues_destroy(adapter);
2475
2476         /* tell fw we're done with firing cmds */
2477         be_cmd_fw_clean(adapter);
2478
2479         be_msix_disable(adapter);
2480         return 0;
2481 }
2482
2483 static void be_vf_setup_init(struct be_adapter *adapter)
2484 {
2485         struct be_vf_cfg *vf_cfg;
2486         int vf;
2487
2488         for_all_vfs(adapter, vf_cfg, vf) {
2489                 vf_cfg->if_handle = -1;
2490                 vf_cfg->pmac_id = -1;
2491         }
2492 }
2493
2494 static int be_vf_setup(struct be_adapter *adapter)
2495 {
2496         struct be_vf_cfg *vf_cfg;
2497         u32 cap_flags, en_flags, vf;
2498         u16 lnk_speed;
2499         int status;
2500
2501         be_vf_setup_init(adapter);
2502
2503         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2504                                 BE_IF_FLAGS_MULTICAST;
2505         for_all_vfs(adapter, vf_cfg, vf) {
2506                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2507                                           &vf_cfg->if_handle, NULL, vf + 1);
2508                 if (status)
2509                         goto err;
2510         }
2511
2512         status = be_vf_eth_addr_config(adapter);
2513         if (status)
2514                 goto err;
2515
2516         for_all_vfs(adapter, vf_cfg, vf) {
2517                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2518                                                   NULL, vf + 1);
2519                 if (status)
2520                         goto err;
2521                 vf_cfg->tx_rate = lnk_speed * 10;
2522         }
2523         return 0;
2524 err:
2525         return status;
2526 }
2527
2528 static void be_setup_init(struct be_adapter *adapter)
2529 {
2530         adapter->vlan_prio_bmap = 0xff;
2531         adapter->link_speed = -1;
2532         adapter->if_handle = -1;
2533         adapter->be3_native = false;
2534         adapter->promiscuous = false;
2535         adapter->eq_next_idx = 0;
2536 }
2537
2538 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2539 {
2540         u32 pmac_id;
2541         int status;
2542         bool pmac_id_active;
2543
2544         status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2545                                                         &pmac_id, mac);
2546         if (status != 0)
2547                 goto do_none;
2548
2549         if (pmac_id_active) {
2550                 status = be_cmd_mac_addr_query(adapter, mac,
2551                                 MAC_ADDRESS_TYPE_NETWORK,
2552                                 false, adapter->if_handle, pmac_id);
2553
2554                 if (!status)
2555                         adapter->pmac_id = pmac_id;
2556         } else {
2557                 status = be_cmd_pmac_add(adapter, mac,
2558                                 adapter->if_handle, &adapter->pmac_id, 0);
2559         }
2560 do_none:
2561         return status;
2562 }
2563
2564 static int be_setup(struct be_adapter *adapter)
2565 {
2566         struct net_device *netdev = adapter->netdev;
2567         u32 cap_flags, en_flags;
2568         u32 tx_fc, rx_fc;
2569         int status;
2570         u8 mac[ETH_ALEN];
2571
2572         be_setup_init(adapter);
2573
2574         be_cmd_req_native_mode(adapter);
2575
2576         be_msix_enable(adapter);
2577
2578         status = be_evt_queues_create(adapter);
2579         if (status)
2580                 goto err;
2581
2582         status = be_tx_cqs_create(adapter);
2583         if (status)
2584                 goto err;
2585
2586         status = be_rx_cqs_create(adapter);
2587         if (status)
2588                 goto err;
2589
2590         status = be_mcc_queues_create(adapter);
2591         if (status)
2592                 goto err;
2593
2594         memset(mac, 0, ETH_ALEN);
2595         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2596                         true /*permanent */, 0, 0);
2597         if (status)
2598                 return status;
2599         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2600         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2601
2602         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2604         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2605                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2606
2607         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2608                 cap_flags |= BE_IF_FLAGS_RSS;
2609                 en_flags |= BE_IF_FLAGS_RSS;
2610         }
2611         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2612                         netdev->dev_addr, &adapter->if_handle,
2613                         &adapter->pmac_id, 0);
2614         if (status != 0)
2615                 goto err;
2616
2617          /* The VF's permanent mac queried from card is incorrect.
2618           * For BEx: Query the mac configued by the PF using if_handle
2619           * For Lancer: Get and use mac_list to obtain mac address.
2620           */
2621         if (!be_physfn(adapter)) {
2622                 if (lancer_chip(adapter))
2623                         status = be_add_mac_from_list(adapter, mac);
2624                 else
2625                         status = be_cmd_mac_addr_query(adapter, mac,
2626                                         MAC_ADDRESS_TYPE_NETWORK, false,
2627                                         adapter->if_handle, 0);
2628                 if (!status) {
2629                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2631                 }
2632         }
2633
2634         status = be_tx_qs_create(adapter);
2635         if (status)
2636                 goto err;
2637
2638         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2639
2640         status = be_vid_config(adapter, false, 0);
2641         if (status)
2642                 goto err;
2643
2644         be_set_rx_mode(adapter->netdev);
2645
2646         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2647         /* For Lancer: It is legal for this cmd to fail on VF */
2648         if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2649                 goto err;
2650
2651         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2652                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2653                                         adapter->rx_fc);
2654                 /* For Lancer: It is legal for this cmd to fail on VF */
2655                 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2656                         goto err;
2657         }
2658
2659         pcie_set_readrq(adapter->pdev, 4096);
2660
2661         if (sriov_enabled(adapter)) {
2662                 status = be_vf_setup(adapter);
2663                 if (status)
2664                         goto err;
2665         }
2666
2667         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2668         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2669
2670         return 0;
2671 err:
2672         be_clear(adapter);
2673         return status;
2674 }
2675
2676 #ifdef CONFIG_NET_POLL_CONTROLLER
2677 static void be_netpoll(struct net_device *netdev)
2678 {
2679         struct be_adapter *adapter = netdev_priv(netdev);
2680         struct be_eq_obj *eqo;
2681         int i;
2682
2683         for_all_evt_queues(adapter, eqo, i)
2684                 event_handle(eqo);
2685
2686         return;
2687 }
2688 #endif
2689
2690 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2691 static bool be_flash_redboot(struct be_adapter *adapter,
2692                         const u8 *p, u32 img_start, int image_size,
2693                         int hdr_size)
2694 {
2695         u32 crc_offset;
2696         u8 flashed_crc[4];
2697         int status;
2698
2699         crc_offset = hdr_size + img_start + image_size - 4;
2700
2701         p += crc_offset;
2702
2703         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2704                         (image_size - 4));
2705         if (status) {
2706                 dev_err(&adapter->pdev->dev,
2707                 "could not get crc from flash, not flashing redboot\n");
2708                 return false;
2709         }
2710
2711         /*update redboot only if crc does not match*/
2712         if (!memcmp(flashed_crc, p, 4))
2713                 return false;
2714         else
2715                 return true;
2716 }
2717
2718 static bool phy_flashing_required(struct be_adapter *adapter)
2719 {
2720         int status = 0;
2721         struct be_phy_info phy_info;
2722
2723         status = be_cmd_get_phy_info(adapter, &phy_info);
2724         if (status)
2725                 return false;
2726         if ((phy_info.phy_type == TN_8022) &&
2727                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2728                 return true;
2729         }
2730         return false;
2731 }
2732
2733 static int be_flash_data(struct be_adapter *adapter,
2734                         const struct firmware *fw,
2735                         struct be_dma_mem *flash_cmd, int num_of_images)
2736
2737 {
2738         int status = 0, i, filehdr_size = 0;
2739         u32 total_bytes = 0, flash_op;
2740         int num_bytes;
2741         const u8 *p = fw->data;
2742         struct be_cmd_write_flashrom *req = flash_cmd->va;
2743         const struct flash_comp *pflashcomp;
2744         int num_comp;
2745
2746         static const struct flash_comp gen3_flash_types[10] = {
2747                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2748                         FLASH_IMAGE_MAX_SIZE_g3},
2749                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2750                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2751                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2752                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2754                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2755                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2756                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2757                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2758                         FLASH_IMAGE_MAX_SIZE_g3},
2759                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2760                         FLASH_IMAGE_MAX_SIZE_g3},
2761                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2762                         FLASH_IMAGE_MAX_SIZE_g3},
2763                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2764                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2765                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2766                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2767         };
2768         static const struct flash_comp gen2_flash_types[8] = {
2769                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2770                         FLASH_IMAGE_MAX_SIZE_g2},
2771                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2772                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2773                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2774                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2776                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2777                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2778                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2779                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2780                         FLASH_IMAGE_MAX_SIZE_g2},
2781                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2782                         FLASH_IMAGE_MAX_SIZE_g2},
2783                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2784                          FLASH_IMAGE_MAX_SIZE_g2}
2785         };
2786
2787         if (adapter->generation == BE_GEN3) {
2788                 pflashcomp = gen3_flash_types;
2789                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2790                 num_comp = ARRAY_SIZE(gen3_flash_types);
2791         } else {
2792                 pflashcomp = gen2_flash_types;
2793                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2794                 num_comp = ARRAY_SIZE(gen2_flash_types);
2795         }
2796         for (i = 0; i < num_comp; i++) {
2797                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2798                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2799                         continue;
2800                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2801                         if (!phy_flashing_required(adapter))
2802                                 continue;
2803                 }
2804                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2805                         (!be_flash_redboot(adapter, fw->data,
2806                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2807                         (num_of_images * sizeof(struct image_hdr)))))
2808                         continue;
2809                 p = fw->data;
2810                 p += filehdr_size + pflashcomp[i].offset
2811                         + (num_of_images * sizeof(struct image_hdr));
2812                 if (p + pflashcomp[i].size > fw->data + fw->size)
2813                         return -1;
2814                 total_bytes = pflashcomp[i].size;
2815                 while (total_bytes) {
2816                         if (total_bytes > 32*1024)
2817                                 num_bytes = 32*1024;
2818                         else
2819                                 num_bytes = total_bytes;
2820                         total_bytes -= num_bytes;
2821                         if (!total_bytes) {
2822                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2823                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2824                                 else
2825                                         flash_op = FLASHROM_OPER_FLASH;
2826                         } else {
2827                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2828                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2829                                 else
2830                                         flash_op = FLASHROM_OPER_SAVE;
2831                         }
2832                         memcpy(req->params.data_buf, p, num_bytes);
2833                         p += num_bytes;
2834                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2835                                 pflashcomp[i].optype, flash_op, num_bytes);
2836                         if (status) {
2837                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2838                                         (pflashcomp[i].optype ==
2839                                                 IMG_TYPE_PHY_FW))
2840                                         break;
2841                                 dev_err(&adapter->pdev->dev,
2842                                         "cmd to write to flash rom failed.\n");
2843                                 return -1;
2844                         }
2845                 }
2846         }
2847         return 0;
2848 }
2849
2850 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2851 {
2852         if (fhdr == NULL)
2853                 return 0;
2854         if (fhdr->build[0] == '3')
2855                 return BE_GEN3;
2856         else if (fhdr->build[0] == '2')
2857                 return BE_GEN2;
2858         else
2859                 return 0;
2860 }
2861
2862 static int lancer_fw_download(struct be_adapter *adapter,
2863                                 const struct firmware *fw)
2864 {
2865 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2866 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2867         struct be_dma_mem flash_cmd;
2868         const u8 *data_ptr = NULL;
2869         u8 *dest_image_ptr = NULL;
2870         size_t image_size = 0;
2871         u32 chunk_size = 0;
2872         u32 data_written = 0;
2873         u32 offset = 0;
2874         int status = 0;
2875         u8 add_status = 0;
2876
2877         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2878                 dev_err(&adapter->pdev->dev,
2879                         "FW Image not properly aligned. "
2880                         "Length must be 4 byte aligned.\n");
2881                 status = -EINVAL;
2882                 goto lancer_fw_exit;
2883         }
2884
2885         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2886                                 + LANCER_FW_DOWNLOAD_CHUNK;
2887         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2888                                                 &flash_cmd.dma, GFP_KERNEL);
2889         if (!flash_cmd.va) {
2890                 status = -ENOMEM;
2891                 dev_err(&adapter->pdev->dev,
2892                         "Memory allocation failure while flashing\n");
2893                 goto lancer_fw_exit;
2894         }
2895
2896         dest_image_ptr = flash_cmd.va +
2897                                 sizeof(struct lancer_cmd_req_write_object);
2898         image_size = fw->size;
2899         data_ptr = fw->data;
2900
2901         while (image_size) {
2902                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2903
2904                 /* Copy the image chunk content. */
2905                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2906
2907                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2908                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2909                                 &data_written, &add_status);
2910
2911                 if (status)
2912                         break;
2913
2914                 offset += data_written;
2915                 data_ptr += data_written;
2916                 image_size -= data_written;
2917         }
2918
2919         if (!status) {
2920                 /* Commit the FW written */
2921                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2922                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2923                                         &data_written, &add_status);
2924         }
2925
2926         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2927                                 flash_cmd.dma);
2928         if (status) {
2929                 dev_err(&adapter->pdev->dev,
2930                         "Firmware load error. "
2931                         "Status code: 0x%x Additional Status: 0x%x\n",
2932                         status, add_status);
2933                 goto lancer_fw_exit;
2934         }
2935
2936         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2937 lancer_fw_exit:
2938         return status;
2939 }
2940
2941 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2942 {
2943         struct flash_file_hdr_g2 *fhdr;
2944         struct flash_file_hdr_g3 *fhdr3;
2945         struct image_hdr *img_hdr_ptr = NULL;
2946         struct be_dma_mem flash_cmd;
2947         const u8 *p;
2948         int status = 0, i = 0, num_imgs = 0;
2949
2950         p = fw->data;
2951         fhdr = (struct flash_file_hdr_g2 *) p;
2952
2953         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2954         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2955                                           &flash_cmd.dma, GFP_KERNEL);
2956         if (!flash_cmd.va) {
2957                 status = -ENOMEM;
2958                 dev_err(&adapter->pdev->dev,
2959                         "Memory allocation failure while flashing\n");
2960                 goto be_fw_exit;
2961         }
2962
2963         if ((adapter->generation == BE_GEN3) &&
2964                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2965                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2966                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2967                 for (i = 0; i < num_imgs; i++) {
2968                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2969                                         (sizeof(struct flash_file_hdr_g3) +
2970                                          i * sizeof(struct image_hdr)));
2971                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2972                                 status = be_flash_data(adapter, fw, &flash_cmd,
2973                                                         num_imgs);
2974                 }
2975         } else if ((adapter->generation == BE_GEN2) &&
2976                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2977                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2978         } else {
2979                 dev_err(&adapter->pdev->dev,
2980                         "UFI and Interface are not compatible for flashing\n");
2981                 status = -1;
2982         }
2983
2984         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2985                           flash_cmd.dma);
2986         if (status) {
2987                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2988                 goto be_fw_exit;
2989         }
2990
2991         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2992
2993 be_fw_exit:
2994         return status;
2995 }
2996
2997 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2998 {
2999         const struct firmware *fw;
3000         int status;
3001
3002         if (!netif_running(adapter->netdev)) {
3003                 dev_err(&adapter->pdev->dev,
3004                         "Firmware load not allowed (interface is down)\n");
3005                 return -1;
3006         }
3007
3008         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3009         if (status)
3010                 goto fw_exit;
3011
3012         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3013
3014         if (lancer_chip(adapter))
3015                 status = lancer_fw_download(adapter, fw);
3016         else
3017                 status = be_fw_download(adapter, fw);
3018
3019 fw_exit:
3020         release_firmware(fw);
3021         return status;
3022 }
3023
3024 static const struct net_device_ops be_netdev_ops = {
3025         .ndo_open               = be_open,
3026         .ndo_stop               = be_close,
3027         .ndo_start_xmit         = be_xmit,
3028         .ndo_set_rx_mode        = be_set_rx_mode,
3029         .ndo_set_mac_address    = be_mac_addr_set,
3030         .ndo_change_mtu         = be_change_mtu,
3031         .ndo_get_stats64        = be_get_stats64,
3032         .ndo_validate_addr      = eth_validate_addr,
3033         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3034         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3035         .ndo_set_vf_mac         = be_set_vf_mac,
3036         .ndo_set_vf_vlan        = be_set_vf_vlan,
3037         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3038         .ndo_get_vf_config      = be_get_vf_config,
3039 #ifdef CONFIG_NET_POLL_CONTROLLER
3040         .ndo_poll_controller    = be_netpoll,
3041 #endif
3042 };
3043
3044 static void be_netdev_init(struct net_device *netdev)
3045 {
3046         struct be_adapter *adapter = netdev_priv(netdev);
3047         struct be_eq_obj *eqo;
3048         int i;
3049
3050         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3051                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3052                 NETIF_F_HW_VLAN_TX;
3053         if (be_multi_rxq(adapter))
3054                 netdev->hw_features |= NETIF_F_RXHASH;
3055
3056         netdev->features |= netdev->hw_features |
3057                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3058
3059         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3060                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3061
3062         netdev->flags |= IFF_MULTICAST;
3063
3064         netif_set_gso_max_size(netdev, 65535);
3065
3066         netdev->netdev_ops = &be_netdev_ops;
3067
3068         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3069
3070         for_all_evt_queues(adapter, eqo, i)
3071                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3072 }
3073
3074 static void be_unmap_pci_bars(struct be_adapter *adapter)
3075 {
3076         if (adapter->csr)
3077                 iounmap(adapter->csr);
3078         if (adapter->db)
3079                 iounmap(adapter->db);
3080 }
3081
3082 static int be_map_pci_bars(struct be_adapter *adapter)
3083 {
3084         u8 __iomem *addr;
3085         int db_reg;
3086
3087         if (lancer_chip(adapter)) {
3088                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3089                         pci_resource_len(adapter->pdev, 0));
3090                 if (addr == NULL)
3091                         return -ENOMEM;
3092                 adapter->db = addr;
3093                 return 0;
3094         }
3095
3096         if (be_physfn(adapter)) {
3097                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3098                                 pci_resource_len(adapter->pdev, 2));
3099                 if (addr == NULL)
3100                         return -ENOMEM;
3101                 adapter->csr = addr;
3102         }
3103
3104         if (adapter->generation == BE_GEN2) {
3105                 db_reg = 4;
3106         } else {
3107                 if (be_physfn(adapter))
3108                         db_reg = 4;
3109                 else
3110                         db_reg = 0;
3111         }
3112         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3113                                 pci_resource_len(adapter->pdev, db_reg));
3114         if (addr == NULL)
3115                 goto pci_map_err;
3116         adapter->db = addr;
3117
3118         return 0;
3119 pci_map_err:
3120         be_unmap_pci_bars(adapter);
3121         return -ENOMEM;
3122 }
3123
3124
3125 static void be_ctrl_cleanup(struct be_adapter *adapter)
3126 {
3127         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3128
3129         be_unmap_pci_bars(adapter);
3130
3131         if (mem->va)
3132                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3133                                   mem->dma);
3134
3135         mem = &adapter->rx_filter;
3136         if (mem->va)
3137                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3138                                   mem->dma);
3139 }
3140
3141 static int be_ctrl_init(struct be_adapter *adapter)
3142 {
3143         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3144         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3145         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3146         int status;
3147
3148         status = be_map_pci_bars(adapter);
3149         if (status)
3150                 goto done;
3151
3152         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3153         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3154                                                 mbox_mem_alloc->size,
3155                                                 &mbox_mem_alloc->dma,
3156                                                 GFP_KERNEL);
3157         if (!mbox_mem_alloc->va) {
3158                 status = -ENOMEM;
3159                 goto unmap_pci_bars;
3160         }
3161         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3162         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3163         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3164         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3165
3166         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3167         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3168                                         &rx_filter->dma, GFP_KERNEL);
3169         if (rx_filter->va == NULL) {
3170                 status = -ENOMEM;
3171                 goto free_mbox;
3172         }
3173         memset(rx_filter->va, 0, rx_filter->size);
3174
3175         mutex_init(&adapter->mbox_lock);
3176         spin_lock_init(&adapter->mcc_lock);
3177         spin_lock_init(&adapter->mcc_cq_lock);
3178
3179         init_completion(&adapter->flash_compl);
3180         pci_save_state(adapter->pdev);
3181         return 0;
3182
3183 free_mbox:
3184         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3185                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3186
3187 unmap_pci_bars:
3188         be_unmap_pci_bars(adapter);
3189
3190 done:
3191         return status;
3192 }
3193
3194 static void be_stats_cleanup(struct be_adapter *adapter)
3195 {
3196         struct be_dma_mem *cmd = &adapter->stats_cmd;
3197
3198         if (cmd->va)
3199                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3200                                   cmd->va, cmd->dma);
3201 }
3202
3203 static int be_stats_init(struct be_adapter *adapter)
3204 {
3205         struct be_dma_mem *cmd = &adapter->stats_cmd;
3206
3207         if (adapter->generation == BE_GEN2) {
3208                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3209         } else {
3210                 if (lancer_chip(adapter))
3211                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3212                 else
3213                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3214         }
3215         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3216                                      GFP_KERNEL);
3217         if (cmd->va == NULL)
3218                 return -1;
3219         memset(cmd->va, 0, cmd->size);
3220         return 0;
3221 }
3222
3223 static void __devexit be_remove(struct pci_dev *pdev)
3224 {
3225         struct be_adapter *adapter = pci_get_drvdata(pdev);
3226
3227         if (!adapter)
3228                 return;
3229
3230         unregister_netdev(adapter->netdev);
3231
3232         be_clear(adapter);
3233
3234         be_stats_cleanup(adapter);
3235
3236         be_ctrl_cleanup(adapter);
3237
3238         be_sriov_disable(adapter);
3239
3240         pci_set_drvdata(pdev, NULL);
3241         pci_release_regions(pdev);
3242         pci_disable_device(pdev);
3243
3244         free_netdev(adapter->netdev);
3245 }
3246
3247 bool be_is_wol_supported(struct be_adapter *adapter)
3248 {
3249         return ((adapter->wol_cap & BE_WOL_CAP) &&
3250                 !be_is_wol_excluded(adapter)) ? true : false;
3251 }
3252
3253 static int be_get_config(struct be_adapter *adapter)
3254 {
3255         int status;
3256
3257         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3258                         &adapter->function_mode, &adapter->function_caps);
3259         if (status)
3260                 return status;
3261
3262         if (adapter->function_mode & FLEX10_MODE)
3263                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3264         else
3265                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3266
3267         status = be_cmd_get_cntl_attributes(adapter);
3268         if (status)
3269                 return status;
3270
3271         status = be_cmd_get_acpi_wol_cap(adapter);
3272         if (status) {
3273                 /* in case of a failure to get wol capabillities
3274                  * check the exclusion list to determine WOL capability */
3275                 if (!be_is_wol_excluded(adapter))
3276                         adapter->wol_cap |= BE_WOL_CAP;
3277         }
3278
3279         if (be_is_wol_supported(adapter))
3280                 adapter->wol = true;
3281
3282         return 0;
3283 }
3284
3285 static int be_dev_family_check(struct be_adapter *adapter)
3286 {
3287         struct pci_dev *pdev = adapter->pdev;
3288         u32 sli_intf = 0, if_type;
3289
3290         switch (pdev->device) {
3291         case BE_DEVICE_ID1:
3292         case OC_DEVICE_ID1:
3293                 adapter->generation = BE_GEN2;
3294                 break;
3295         case BE_DEVICE_ID2:
3296         case OC_DEVICE_ID2:
3297         case OC_DEVICE_ID5:
3298                 adapter->generation = BE_GEN3;
3299                 break;
3300         case OC_DEVICE_ID3:
3301         case OC_DEVICE_ID4:
3302                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3303                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3304                                                 SLI_INTF_IF_TYPE_SHIFT;
3305
3306                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3307                         if_type != 0x02) {
3308                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3309                         return -EINVAL;
3310                 }
3311                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3312                                          SLI_INTF_FAMILY_SHIFT);
3313                 adapter->generation = BE_GEN3;
3314                 break;
3315         default:
3316                 adapter->generation = 0;
3317         }
3318         return 0;
3319 }
3320
3321 static int lancer_wait_ready(struct be_adapter *adapter)
3322 {
3323 #define SLIPORT_READY_TIMEOUT 30
3324         u32 sliport_status;
3325         int status = 0, i;
3326
3327         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3328                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3329                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3330                         break;
3331
3332                 msleep(1000);
3333         }
3334
3335         if (i == SLIPORT_READY_TIMEOUT)
3336                 status = -1;
3337
3338         return status;
3339 }
3340
3341 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3342 {
3343         int status;
3344         u32 sliport_status, err, reset_needed;
3345         status = lancer_wait_ready(adapter);
3346         if (!status) {
3347                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3348                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3349                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3350                 if (err && reset_needed) {
3351                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3352                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3353
3354                         /* check adapter has corrected the error */
3355                         status = lancer_wait_ready(adapter);
3356                         sliport_status = ioread32(adapter->db +
3357                                                         SLIPORT_STATUS_OFFSET);
3358                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3359                                                 SLIPORT_STATUS_RN_MASK);
3360                         if (status || sliport_status)
3361                                 status = -1;
3362                 } else if (err || reset_needed) {
3363                         status = -1;
3364                 }
3365         }
3366         return status;
3367 }
3368
3369 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3370 {
3371         int status;
3372         u32 sliport_status;
3373
3374         if (adapter->eeh_err || adapter->ue_detected)
3375                 return;
3376
3377         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3378
3379         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3380                 dev_err(&adapter->pdev->dev,
3381                                 "Adapter in error state."
3382                                 "Trying to recover.\n");
3383
3384                 status = lancer_test_and_set_rdy_state(adapter);
3385                 if (status)
3386                         goto err;
3387
3388                 netif_device_detach(adapter->netdev);
3389
3390                 if (netif_running(adapter->netdev))
3391                         be_close(adapter->netdev);
3392
3393                 be_clear(adapter);
3394
3395                 adapter->fw_timeout = false;
3396
3397                 status = be_setup(adapter);
3398                 if (status)
3399                         goto err;
3400
3401                 if (netif_running(adapter->netdev)) {
3402                         status = be_open(adapter->netdev);
3403                         if (status)
3404                                 goto err;
3405                 }
3406
3407                 netif_device_attach(adapter->netdev);
3408
3409                 dev_err(&adapter->pdev->dev,
3410                                 "Adapter error recovery succeeded\n");
3411         }
3412         return;
3413 err:
3414         dev_err(&adapter->pdev->dev,
3415                         "Adapter error recovery failed\n");
3416 }
3417
3418 static void be_worker(struct work_struct *work)
3419 {
3420         struct be_adapter *adapter =
3421                 container_of(work, struct be_adapter, work.work);
3422         struct be_rx_obj *rxo;
3423         struct be_eq_obj *eqo;
3424         int i;
3425
3426         if (lancer_chip(adapter))
3427                 lancer_test_and_recover_fn_err(adapter);
3428
3429         be_detect_dump_ue(adapter);
3430
3431         /* when interrupts are not yet enabled, just reap any pending
3432         * mcc completions */
3433         if (!netif_running(adapter->netdev)) {
3434                 be_process_mcc(adapter);
3435                 goto reschedule;
3436         }
3437
3438         if (!adapter->stats_cmd_sent) {
3439                 if (lancer_chip(adapter))
3440                         lancer_cmd_get_pport_stats(adapter,
3441                                                 &adapter->stats_cmd);
3442                 else
3443                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3444         }
3445
3446         for_all_rx_queues(adapter, rxo, i) {
3447                 if (rxo->rx_post_starved) {
3448                         rxo->rx_post_starved = false;
3449                         be_post_rx_frags(rxo, GFP_KERNEL);
3450                 }
3451         }
3452
3453         for_all_evt_queues(adapter, eqo, i)
3454                 be_eqd_update(adapter, eqo);
3455
3456 reschedule:
3457         adapter->work_counter++;
3458         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3459 }
3460
3461 static int __devinit be_probe(struct pci_dev *pdev,
3462                         const struct pci_device_id *pdev_id)
3463 {
3464         int status = 0;
3465         struct be_adapter *adapter;
3466         struct net_device *netdev;
3467
3468         status = pci_enable_device(pdev);
3469         if (status)
3470                 goto do_none;
3471
3472         status = pci_request_regions(pdev, DRV_NAME);
3473         if (status)
3474                 goto disable_dev;
3475         pci_set_master(pdev);
3476
3477         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3478         if (netdev == NULL) {
3479                 status = -ENOMEM;
3480                 goto rel_reg;
3481         }
3482         adapter = netdev_priv(netdev);
3483         adapter->pdev = pdev;
3484         pci_set_drvdata(pdev, adapter);
3485
3486         status = be_dev_family_check(adapter);
3487         if (status)
3488                 goto free_netdev;
3489
3490         adapter->netdev = netdev;
3491         SET_NETDEV_DEV(netdev, &pdev->dev);
3492
3493         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3494         if (!status) {
3495                 netdev->features |= NETIF_F_HIGHDMA;
3496         } else {
3497                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3498                 if (status) {
3499                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3500                         goto free_netdev;
3501                 }
3502         }
3503
3504         status = be_sriov_enable(adapter);
3505         if (status)
3506                 goto free_netdev;
3507
3508         status = be_ctrl_init(adapter);
3509         if (status)
3510                 goto disable_sriov;
3511
3512         if (lancer_chip(adapter)) {
3513                 status = lancer_wait_ready(adapter);
3514                 if (!status) {
3515                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3516                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3517                         status = lancer_test_and_set_rdy_state(adapter);
3518                 }
3519                 if (status) {
3520                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3521                         goto ctrl_clean;
3522                 }
3523         }
3524
3525         /* sync up with fw's ready state */
3526         if (be_physfn(adapter)) {
3527                 status = be_cmd_POST(adapter);
3528                 if (status)
3529                         goto ctrl_clean;
3530         }
3531
3532         /* tell fw we're ready to fire cmds */
3533         status = be_cmd_fw_init(adapter);
3534         if (status)
3535                 goto ctrl_clean;
3536
3537         status = be_cmd_reset_function(adapter);
3538         if (status)
3539                 goto ctrl_clean;
3540
3541         /* The INTR bit may be set in the card when probed by a kdump kernel
3542          * after a crash.
3543          */
3544         if (!lancer_chip(adapter))
3545                 be_intr_set(adapter, false);
3546
3547         status = be_stats_init(adapter);
3548         if (status)
3549                 goto ctrl_clean;
3550
3551         status = be_get_config(adapter);
3552         if (status)
3553                 goto stats_clean;
3554
3555         INIT_DELAYED_WORK(&adapter->work, be_worker);
3556         adapter->rx_fc = adapter->tx_fc = true;
3557
3558         status = be_setup(adapter);
3559         if (status)
3560                 goto msix_disable;
3561
3562         be_netdev_init(netdev);
3563         status = register_netdev(netdev);
3564         if (status != 0)
3565                 goto unsetup;
3566
3567         dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3568                 adapter->port_num);
3569
3570         return 0;
3571
3572 unsetup:
3573         be_clear(adapter);
3574 msix_disable:
3575         be_msix_disable(adapter);
3576 stats_clean:
3577         be_stats_cleanup(adapter);
3578 ctrl_clean:
3579         be_ctrl_cleanup(adapter);
3580 disable_sriov:
3581         be_sriov_disable(adapter);
3582 free_netdev:
3583         free_netdev(netdev);
3584         pci_set_drvdata(pdev, NULL);
3585 rel_reg:
3586         pci_release_regions(pdev);
3587 disable_dev:
3588         pci_disable_device(pdev);
3589 do_none:
3590         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3591         return status;
3592 }
3593
3594 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3595 {
3596         struct be_adapter *adapter = pci_get_drvdata(pdev);
3597         struct net_device *netdev =  adapter->netdev;
3598
3599         if (adapter->wol)
3600                 be_setup_wol(adapter, true);
3601
3602         netif_device_detach(netdev);
3603         if (netif_running(netdev)) {
3604                 rtnl_lock();
3605                 be_close(netdev);
3606                 rtnl_unlock();
3607         }
3608         be_clear(adapter);
3609
3610         pci_save_state(pdev);
3611         pci_disable_device(pdev);
3612         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3613         return 0;
3614 }
3615
3616 static int be_resume(struct pci_dev *pdev)
3617 {
3618         int status = 0;
3619         struct be_adapter *adapter = pci_get_drvdata(pdev);
3620         struct net_device *netdev =  adapter->netdev;
3621
3622         netif_device_detach(netdev);
3623
3624         status = pci_enable_device(pdev);
3625         if (status)
3626                 return status;
3627
3628         pci_set_power_state(pdev, 0);
3629         pci_restore_state(pdev);
3630
3631         /* tell fw we're ready to fire cmds */
3632         status = be_cmd_fw_init(adapter);
3633         if (status)
3634                 return status;
3635
3636         be_setup(adapter);
3637         if (netif_running(netdev)) {
3638                 rtnl_lock();
3639                 be_open(netdev);
3640                 rtnl_unlock();
3641         }
3642         netif_device_attach(netdev);
3643
3644         if (adapter->wol)
3645                 be_setup_wol(adapter, false);
3646
3647         return 0;
3648 }
3649
3650 /*
3651  * An FLR will stop BE from DMAing any data.
3652  */
3653 static void be_shutdown(struct pci_dev *pdev)
3654 {
3655         struct be_adapter *adapter = pci_get_drvdata(pdev);
3656
3657         if (!adapter)
3658                 return;
3659
3660         cancel_delayed_work_sync(&adapter->work);
3661
3662         netif_device_detach(adapter->netdev);
3663
3664         if (adapter->wol)
3665                 be_setup_wol(adapter, true);
3666
3667         be_cmd_reset_function(adapter);
3668
3669         pci_disable_device(pdev);
3670 }
3671
3672 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3673                                 pci_channel_state_t state)
3674 {
3675         struct be_adapter *adapter = pci_get_drvdata(pdev);
3676         struct net_device *netdev =  adapter->netdev;
3677
3678         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3679
3680         adapter->eeh_err = true;
3681
3682         netif_device_detach(netdev);
3683
3684         if (netif_running(netdev)) {
3685                 rtnl_lock();
3686                 be_close(netdev);
3687                 rtnl_unlock();
3688         }
3689         be_clear(adapter);
3690
3691         if (state == pci_channel_io_perm_failure)
3692                 return PCI_ERS_RESULT_DISCONNECT;
3693
3694         pci_disable_device(pdev);
3695
3696         return PCI_ERS_RESULT_NEED_RESET;
3697 }
3698
3699 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3700 {
3701         struct be_adapter *adapter = pci_get_drvdata(pdev);
3702         int status;
3703
3704         dev_info(&adapter->pdev->dev, "EEH reset\n");
3705         adapter->eeh_err = false;
3706         adapter->ue_detected = false;
3707         adapter->fw_timeout = false;
3708
3709         status = pci_enable_device(pdev);
3710         if (status)
3711                 return PCI_ERS_RESULT_DISCONNECT;
3712
3713         pci_set_master(pdev);
3714         pci_set_power_state(pdev, 0);
3715         pci_restore_state(pdev);
3716
3717         /* Check if card is ok and fw is ready */
3718         status = be_cmd_POST(adapter);
3719         if (status)
3720                 return PCI_ERS_RESULT_DISCONNECT;
3721
3722         return PCI_ERS_RESULT_RECOVERED;
3723 }
3724
3725 static void be_eeh_resume(struct pci_dev *pdev)
3726 {
3727         int status = 0;
3728         struct be_adapter *adapter = pci_get_drvdata(pdev);
3729         struct net_device *netdev =  adapter->netdev;
3730
3731         dev_info(&adapter->pdev->dev, "EEH resume\n");
3732
3733         pci_save_state(pdev);
3734
3735         /* tell fw we're ready to fire cmds */
3736         status = be_cmd_fw_init(adapter);
3737         if (status)
3738                 goto err;
3739
3740         status = be_setup(adapter);
3741         if (status)
3742                 goto err;
3743
3744         if (netif_running(netdev)) {
3745                 status = be_open(netdev);
3746                 if (status)
3747                         goto err;
3748         }
3749         netif_device_attach(netdev);
3750         return;
3751 err:
3752         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3753 }
3754
3755 static struct pci_error_handlers be_eeh_handlers = {
3756         .error_detected = be_eeh_err_detected,
3757         .slot_reset = be_eeh_reset,
3758         .resume = be_eeh_resume,
3759 };
3760
3761 static struct pci_driver be_driver = {
3762         .name = DRV_NAME,
3763         .id_table = be_dev_ids,
3764         .probe = be_probe,
3765         .remove = be_remove,
3766         .suspend = be_suspend,
3767         .resume = be_resume,
3768         .shutdown = be_shutdown,
3769         .err_handler = &be_eeh_handlers
3770 };
3771
3772 static int __init be_init_module(void)
3773 {
3774         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3775             rx_frag_size != 2048) {
3776                 printk(KERN_WARNING DRV_NAME
3777                         " : Module param rx_frag_size must be 2048/4096/8192."
3778                         " Using 2048\n");
3779                 rx_frag_size = 2048;
3780         }
3781
3782         return pci_register_driver(&be_driver);
3783 }
3784 module_init(be_init_module);
3785
3786 static void __exit be_exit_module(void)
3787 {
3788         pci_unregister_driver(&be_driver);
3789 }
3790 module_exit(be_exit_module);