net: make vlan ndo_vlan_rx_[add/kill]_vid return error value
[linux-2.6.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static ushort rx_frag_size = 2048;
31 static unsigned int num_vfs;
32 module_param(rx_frag_size, ushort, S_IRUGO);
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static const char * const ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static const char * const ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 /* Is BE in a multi-channel mode */
119 static inline bool be_is_mc(struct be_adapter *adapter) {
120         return (adapter->function_mode & FLEX10_MODE ||
121                 adapter->function_mode & VNIC_MODE ||
122                 adapter->function_mode & UMC_ENABLED);
123 }
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va)
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131 }
132
133 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134                 u16 len, u16 entry_size)
135 {
136         struct be_dma_mem *mem = &q->dma_mem;
137
138         memset(q, 0, sizeof(*q));
139         q->len = len;
140         q->entry_size = entry_size;
141         mem->size = len * entry_size;
142         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143                                      GFP_KERNEL);
144         if (!mem->va)
145                 return -1;
146         memset(mem->va, 0, mem->size);
147         return 0;
148 }
149
150 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 {
152         u32 reg, enabled;
153
154         if (adapter->eeh_err)
155                 return;
156
157         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158                                 &reg);
159         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
161         if (!enabled && enable)
162                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else if (enabled && !enable)
164                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else
166                 return;
167
168         pci_write_config_dword(adapter->pdev,
169                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170 }
171
172 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 {
174         u32 val = 0;
175         val |= qid & DB_RQ_RING_ID_MASK;
176         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
177
178         wmb();
179         iowrite32(val, adapter->db + DB_RQ_OFFSET);
180 }
181
182 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
183 {
184         u32 val = 0;
185         val |= qid & DB_TXULP_RING_ID_MASK;
186         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
187
188         wmb();
189         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
190 }
191
192 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193                 bool arm, bool clear_int, u16 num_popped)
194 {
195         u32 val = 0;
196         val |= qid & DB_EQ_RING_ID_MASK;
197         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_EQ_REARM_SHIFT;
205         if (clear_int)
206                 val |= 1 << DB_EQ_CLR_SHIFT;
207         val |= 1 << DB_EQ_EVNT_SHIFT;
208         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209         iowrite32(val, adapter->db + DB_EQ_OFFSET);
210 }
211
212 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_CQ_RING_ID_MASK;
216         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_err)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_CQ_REARM_SHIFT;
224         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_CQ_OFFSET);
226 }
227
228 static int be_mac_addr_set(struct net_device *netdev, void *p)
229 {
230         struct be_adapter *adapter = netdev_priv(netdev);
231         struct sockaddr *addr = p;
232         int status = 0;
233         u8 current_mac[ETH_ALEN];
234         u32 pmac_id = adapter->pmac_id;
235
236         if (!is_valid_ether_addr(addr->sa_data))
237                 return -EADDRNOTAVAIL;
238
239         status = be_cmd_mac_addr_query(adapter, current_mac,
240                                 MAC_ADDRESS_TYPE_NETWORK, false,
241                                 adapter->if_handle, 0);
242         if (status)
243                 goto err;
244
245         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
247                                 adapter->if_handle, &adapter->pmac_id, 0);
248                 if (status)
249                         goto err;
250
251                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252         }
253         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254         return 0;
255 err:
256         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
257         return status;
258 }
259
260 static void populate_be2_stats(struct be_adapter *adapter)
261 {
262         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
265         struct be_port_rxf_stats_v0 *port_stats =
266                                         &rxf_stats->port[adapter->port_num];
267         struct be_drv_stats *drvs = &adapter->drv_stats;
268
269         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
270         drvs->rx_pause_frames = port_stats->rx_pause_frames;
271         drvs->rx_crc_errors = port_stats->rx_crc_errors;
272         drvs->rx_control_frames = port_stats->rx_control_frames;
273         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
284         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
285         drvs->rx_dropped_header_too_small =
286                 port_stats->rx_dropped_header_too_small;
287         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
288         drvs->rx_alignment_symbol_errors =
289                 port_stats->rx_alignment_symbol_errors;
290
291         drvs->tx_pauseframes = port_stats->tx_pauseframes;
292         drvs->tx_controlframes = port_stats->tx_controlframes;
293
294         if (adapter->port_num)
295                 drvs->jabber_events = rxf_stats->port1_jabber_events;
296         else
297                 drvs->jabber_events = rxf_stats->port0_jabber_events;
298         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302         drvs->forwarded_packets = rxf_stats->forwarded_packets;
303         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
304         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
306         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307 }
308
309 static void populate_be3_stats(struct be_adapter *adapter)
310 {
311         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
314         struct be_port_rxf_stats_v1 *port_stats =
315                                         &rxf_stats->port[adapter->port_num];
316         struct be_drv_stats *drvs = &adapter->drv_stats;
317
318         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
319         drvs->rx_pause_frames = port_stats->rx_pause_frames;
320         drvs->rx_crc_errors = port_stats->rx_crc_errors;
321         drvs->rx_control_frames = port_stats->rx_control_frames;
322         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332         drvs->rx_dropped_header_too_small =
333                 port_stats->rx_dropped_header_too_small;
334         drvs->rx_input_fifo_overflow_drop =
335                 port_stats->rx_input_fifo_overflow_drop;
336         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
337         drvs->rx_alignment_symbol_errors =
338                 port_stats->rx_alignment_symbol_errors;
339         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
340         drvs->tx_pauseframes = port_stats->tx_pauseframes;
341         drvs->tx_controlframes = port_stats->tx_controlframes;
342         drvs->jabber_events = port_stats->jabber_events;
343         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347         drvs->forwarded_packets = rxf_stats->forwarded_packets;
348         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
351         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352 }
353
354 static void populate_lancer_stats(struct be_adapter *adapter)
355 {
356
357         struct be_drv_stats *drvs = &adapter->drv_stats;
358         struct lancer_pport_stats *pport_stats =
359                                         pport_stats_from_cmd(adapter);
360
361         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
365         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
366         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
367         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371         drvs->rx_dropped_tcp_length =
372                                 pport_stats->rx_dropped_invalid_tcp_length;
373         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376         drvs->rx_dropped_header_too_small =
377                                 pport_stats->rx_dropped_header_too_small;
378         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
380         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
381         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
382         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
384         drvs->jabber_events = pport_stats->rx_jabbers;
385         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
386         drvs->forwarded_packets = pport_stats->num_forwards_lo;
387         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
388         drvs->rx_drops_too_many_frags =
389                                 pport_stats->rx_drops_too_many_frags_lo;
390 }
391
392 static void accumulate_16bit_val(u32 *acc, u16 val)
393 {
394 #define lo(x)                   (x & 0xFFFF)
395 #define hi(x)                   (x & 0xFFFF0000)
396         bool wrapped = val < lo(*acc);
397         u32 newacc = hi(*acc) + val;
398
399         if (wrapped)
400                 newacc += 65536;
401         ACCESS_ONCE(*acc) = newacc;
402 }
403
404 void be_parse_stats(struct be_adapter *adapter)
405 {
406         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407         struct be_rx_obj *rxo;
408         int i;
409
410         if (adapter->generation == BE_GEN3) {
411                 if (lancer_chip(adapter))
412                         populate_lancer_stats(adapter);
413                  else
414                         populate_be3_stats(adapter);
415         } else {
416                 populate_be2_stats(adapter);
417         }
418
419         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
420         for_all_rx_queues(adapter, rxo, i) {
421                 /* below erx HW counter can actually wrap around after
422                  * 65535. Driver accumulates a 32-bit value
423                  */
424                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426         }
427 }
428
429 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430                                         struct rtnl_link_stats64 *stats)
431 {
432         struct be_adapter *adapter = netdev_priv(netdev);
433         struct be_drv_stats *drvs = &adapter->drv_stats;
434         struct be_rx_obj *rxo;
435         struct be_tx_obj *txo;
436         u64 pkts, bytes;
437         unsigned int start;
438         int i;
439
440         for_all_rx_queues(adapter, rxo, i) {
441                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442                 do {
443                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444                         pkts = rx_stats(rxo)->rx_pkts;
445                         bytes = rx_stats(rxo)->rx_bytes;
446                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447                 stats->rx_packets += pkts;
448                 stats->rx_bytes += bytes;
449                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451                                         rx_stats(rxo)->rx_drops_no_frags;
452         }
453
454         for_all_tx_queues(adapter, txo, i) {
455                 const struct be_tx_stats *tx_stats = tx_stats(txo);
456                 do {
457                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458                         pkts = tx_stats(txo)->tx_pkts;
459                         bytes = tx_stats(txo)->tx_bytes;
460                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461                 stats->tx_packets += pkts;
462                 stats->tx_bytes += bytes;
463         }
464
465         /* bad pkts received */
466         stats->rx_errors = drvs->rx_crc_errors +
467                 drvs->rx_alignment_symbol_errors +
468                 drvs->rx_in_range_errors +
469                 drvs->rx_out_range_errors +
470                 drvs->rx_frame_too_long +
471                 drvs->rx_dropped_too_small +
472                 drvs->rx_dropped_too_short +
473                 drvs->rx_dropped_header_too_small +
474                 drvs->rx_dropped_tcp_length +
475                 drvs->rx_dropped_runt;
476
477         /* detailed rx errors */
478         stats->rx_length_errors = drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long;
481
482         stats->rx_crc_errors = drvs->rx_crc_errors;
483
484         /* frame alignment errors */
485         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
486
487         /* receiver fifo overrun */
488         /* drops_no_pbuf is no per i/f, it's per BE card */
489         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
490                                 drvs->rx_input_fifo_overflow_drop +
491                                 drvs->rx_drops_no_pbuf;
492         return stats;
493 }
494
495 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
496 {
497         struct net_device *netdev = adapter->netdev;
498
499         /* when link status changes, link speed must be re-queried from card */
500         adapter->link_speed = -1;
501         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502                 netif_carrier_on(netdev);
503                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504         } else {
505                 netif_carrier_off(netdev);
506                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
507         }
508 }
509
510 static void be_tx_stats_update(struct be_tx_obj *txo,
511                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
512 {
513         struct be_tx_stats *stats = tx_stats(txo);
514
515         u64_stats_update_begin(&stats->sync);
516         stats->tx_reqs++;
517         stats->tx_wrbs += wrb_cnt;
518         stats->tx_bytes += copied;
519         stats->tx_pkts += (gso_segs ? gso_segs : 1);
520         if (stopped)
521                 stats->tx_stops++;
522         u64_stats_update_end(&stats->sync);
523 }
524
525 /* Determine number of WRB entries needed to xmit data in an skb */
526 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527                                                                 bool *dummy)
528 {
529         int cnt = (skb->len > skb->data_len);
530
531         cnt += skb_shinfo(skb)->nr_frags;
532
533         /* to account for hdr wrb */
534         cnt++;
535         if (lancer_chip(adapter) || !(cnt & 1)) {
536                 *dummy = false;
537         } else {
538                 /* add a dummy to make it an even num */
539                 cnt++;
540                 *dummy = true;
541         }
542         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543         return cnt;
544 }
545
546 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547 {
548         wrb->frag_pa_hi = upper_32_bits(addr);
549         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551 }
552
553 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
555 {
556         u8 vlan_prio = 0;
557         u16 vlan_tag = 0;
558
559         memset(hdr, 0, sizeof(*hdr));
560
561         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
563         if (skb_is_gso(skb)) {
564                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566                         hdr, skb_shinfo(skb)->gso_size);
567                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
568                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
569                 if (lancer_chip(adapter) && adapter->sli_family  ==
570                                                         LANCER_A0_SLI_FAMILY) {
571                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572                         if (is_tcp_pkt(skb))
573                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574                                                                 tcpcs, hdr, 1);
575                         else if (is_udp_pkt(skb))
576                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577                                                                 udpcs, hdr, 1);
578                 }
579         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580                 if (is_tcp_pkt(skb))
581                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582                 else if (is_udp_pkt(skb))
583                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584         }
585
586         if (vlan_tx_tag_present(skb)) {
587                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
588                 vlan_tag = vlan_tx_tag_get(skb);
589                 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590                 /* If vlan priority provided by OS is NOT in available bmap */
591                 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592                         vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593                                         adapter->recommended_prio;
594                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
595         }
596
597         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601 }
602
603 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
604                 bool unmap_single)
605 {
606         dma_addr_t dma;
607
608         be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
611         if (wrb->frag_len) {
612                 if (unmap_single)
613                         dma_unmap_single(dev, dma, wrb->frag_len,
614                                          DMA_TO_DEVICE);
615                 else
616                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
617         }
618 }
619
620 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
621                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622 {
623         dma_addr_t busaddr;
624         int i, copied = 0;
625         struct device *dev = &adapter->pdev->dev;
626         struct sk_buff *first_skb = skb;
627         struct be_eth_wrb *wrb;
628         struct be_eth_hdr_wrb *hdr;
629         bool map_single = false;
630         u16 map_head;
631
632         hdr = queue_head_node(txq);
633         queue_head_inc(txq);
634         map_head = txq->head;
635
636         if (skb->len > skb->data_len) {
637                 int len = skb_headlen(skb);
638                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639                 if (dma_mapping_error(dev, busaddr))
640                         goto dma_err;
641                 map_single = true;
642                 wrb = queue_head_node(txq);
643                 wrb_fill(wrb, busaddr, len);
644                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645                 queue_head_inc(txq);
646                 copied += len;
647         }
648
649         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
650                 const struct skb_frag_struct *frag =
651                         &skb_shinfo(skb)->frags[i];
652                 busaddr = skb_frag_dma_map(dev, frag, 0,
653                                            skb_frag_size(frag), DMA_TO_DEVICE);
654                 if (dma_mapping_error(dev, busaddr))
655                         goto dma_err;
656                 wrb = queue_head_node(txq);
657                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
658                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659                 queue_head_inc(txq);
660                 copied += skb_frag_size(frag);
661         }
662
663         if (dummy_wrb) {
664                 wrb = queue_head_node(txq);
665                 wrb_fill(wrb, 0, 0);
666                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667                 queue_head_inc(txq);
668         }
669
670         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
671         be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673         return copied;
674 dma_err:
675         txq->head = map_head;
676         while (copied) {
677                 wrb = queue_head_node(txq);
678                 unmap_tx_frag(dev, wrb, map_single);
679                 map_single = false;
680                 copied -= wrb->frag_len;
681                 queue_head_inc(txq);
682         }
683         return 0;
684 }
685
686 static netdev_tx_t be_xmit(struct sk_buff *skb,
687                         struct net_device *netdev)
688 {
689         struct be_adapter *adapter = netdev_priv(netdev);
690         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691         struct be_queue_info *txq = &txo->q;
692         u32 wrb_cnt = 0, copied = 0;
693         u32 start = txq->head;
694         bool dummy_wrb, stopped = false;
695
696         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
697
698         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
699         if (copied) {
700                 /* record the sent skb in the sent_skb table */
701                 BUG_ON(txo->sent_skb_list[start]);
702                 txo->sent_skb_list[start] = skb;
703
704                 /* Ensure txq has space for the next skb; Else stop the queue
705                  * *BEFORE* ringing the tx doorbell, so that we serialze the
706                  * tx compls of the current transmit which'll wake up the queue
707                  */
708                 atomic_add(wrb_cnt, &txq->used);
709                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710                                                                 txq->len) {
711                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
712                         stopped = true;
713                 }
714
715                 be_txq_notify(adapter, txq->id, wrb_cnt);
716
717                 be_tx_stats_update(txo, wrb_cnt, copied,
718                                 skb_shinfo(skb)->gso_segs, stopped);
719         } else {
720                 txq->head = start;
721                 dev_kfree_skb_any(skb);
722         }
723         return NETDEV_TX_OK;
724 }
725
726 static int be_change_mtu(struct net_device *netdev, int new_mtu)
727 {
728         struct be_adapter *adapter = netdev_priv(netdev);
729         if (new_mtu < BE_MIN_MTU ||
730                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731                                         (ETH_HLEN + ETH_FCS_LEN))) {
732                 dev_info(&adapter->pdev->dev,
733                         "MTU must be between %d and %d bytes\n",
734                         BE_MIN_MTU,
735                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
736                 return -EINVAL;
737         }
738         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739                         netdev->mtu, new_mtu);
740         netdev->mtu = new_mtu;
741         return 0;
742 }
743
744 /*
745  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746  * If the user configures more, place BE in vlan promiscuous mode.
747  */
748 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
749 {
750         u16 vtag[BE_NUM_VLANS_SUPPORTED];
751         u16 ntags = 0, i;
752         int status = 0;
753         u32 if_handle;
754
755         if (vf) {
756                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759         }
760
761         /* No need to further configure vids if in promiscuous mode */
762         if (adapter->promiscuous)
763                 return 0;
764
765         if (adapter->vlans_added <= adapter->max_vlans)  {
766                 /* Construct VLAN Table to give to HW */
767                 for (i = 0; i < VLAN_N_VID; i++) {
768                         if (adapter->vlan_tag[i]) {
769                                 vtag[ntags] = cpu_to_le16(i);
770                                 ntags++;
771                         }
772                 }
773                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774                                         vtag, ntags, 1, 0);
775         } else {
776                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777                                         NULL, 0, 1, 1);
778         }
779
780         return status;
781 }
782
783 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
784 {
785         struct be_adapter *adapter = netdev_priv(netdev);
786
787         adapter->vlans_added++;
788         if (!be_physfn(adapter))
789                 return 0;
790
791         adapter->vlan_tag[vid] = 1;
792         if (adapter->vlans_added <= (adapter->max_vlans + 1))
793                 be_vid_config(adapter, false, 0);
794
795         return 0;
796 }
797
798 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
799 {
800         struct be_adapter *adapter = netdev_priv(netdev);
801
802         adapter->vlans_added--;
803
804         if (!be_physfn(adapter))
805                 return 0;
806
807         adapter->vlan_tag[vid] = 0;
808         if (adapter->vlans_added <= adapter->max_vlans)
809                 be_vid_config(adapter, false, 0);
810
811         return 0;
812 }
813
814 static void be_set_rx_mode(struct net_device *netdev)
815 {
816         struct be_adapter *adapter = netdev_priv(netdev);
817
818         if (netdev->flags & IFF_PROMISC) {
819                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
820                 adapter->promiscuous = true;
821                 goto done;
822         }
823
824         /* BE was previously in promiscuous mode; disable it */
825         if (adapter->promiscuous) {
826                 adapter->promiscuous = false;
827                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
828
829                 if (adapter->vlans_added)
830                         be_vid_config(adapter, false, 0);
831         }
832
833         /* Enable multicast promisc if num configured exceeds what we support */
834         if (netdev->flags & IFF_ALLMULTI ||
835                         netdev_mc_count(netdev) > BE_MAX_MC) {
836                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
837                 goto done;
838         }
839
840         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
841 done:
842         return;
843 }
844
845 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
846 {
847         struct be_adapter *adapter = netdev_priv(netdev);
848         int status;
849
850         if (!adapter->sriov_enabled)
851                 return -EPERM;
852
853         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
854                 return -EINVAL;
855
856         if (lancer_chip(adapter)) {
857                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
858         } else {
859                 status = be_cmd_pmac_del(adapter,
860                                 adapter->vf_cfg[vf].vf_if_handle,
861                                 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
862
863                 status = be_cmd_pmac_add(adapter, mac,
864                                 adapter->vf_cfg[vf].vf_if_handle,
865                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
866         }
867
868         if (status)
869                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
870                                 mac, vf);
871         else
872                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
873
874         return status;
875 }
876
877 static int be_get_vf_config(struct net_device *netdev, int vf,
878                         struct ifla_vf_info *vi)
879 {
880         struct be_adapter *adapter = netdev_priv(netdev);
881
882         if (!adapter->sriov_enabled)
883                 return -EPERM;
884
885         if (vf >= num_vfs)
886                 return -EINVAL;
887
888         vi->vf = vf;
889         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
890         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
891         vi->qos = 0;
892         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
893
894         return 0;
895 }
896
897 static int be_set_vf_vlan(struct net_device *netdev,
898                         int vf, u16 vlan, u8 qos)
899 {
900         struct be_adapter *adapter = netdev_priv(netdev);
901         int status = 0;
902
903         if (!adapter->sriov_enabled)
904                 return -EPERM;
905
906         if ((vf >= num_vfs) || (vlan > 4095))
907                 return -EINVAL;
908
909         if (vlan) {
910                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
911                 adapter->vlans_added++;
912         } else {
913                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
914                 adapter->vlans_added--;
915         }
916
917         status = be_vid_config(adapter, true, vf);
918
919         if (status)
920                 dev_info(&adapter->pdev->dev,
921                                 "VLAN %d config on VF %d failed\n", vlan, vf);
922         return status;
923 }
924
925 static int be_set_vf_tx_rate(struct net_device *netdev,
926                         int vf, int rate)
927 {
928         struct be_adapter *adapter = netdev_priv(netdev);
929         int status = 0;
930
931         if (!adapter->sriov_enabled)
932                 return -EPERM;
933
934         if ((vf >= num_vfs) || (rate < 0))
935                 return -EINVAL;
936
937         if (rate > 10000)
938                 rate = 10000;
939
940         adapter->vf_cfg[vf].vf_tx_rate = rate;
941         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
942
943         if (status)
944                 dev_info(&adapter->pdev->dev,
945                                 "tx rate %d on VF %d failed\n", rate, vf);
946         return status;
947 }
948
949 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
950 {
951         struct be_eq_obj *rx_eq = &rxo->rx_eq;
952         struct be_rx_stats *stats = rx_stats(rxo);
953         ulong now = jiffies;
954         ulong delta = now - stats->rx_jiffies;
955         u64 pkts;
956         unsigned int start, eqd;
957
958         if (!rx_eq->enable_aic)
959                 return;
960
961         /* Wrapped around */
962         if (time_before(now, stats->rx_jiffies)) {
963                 stats->rx_jiffies = now;
964                 return;
965         }
966
967         /* Update once a second */
968         if (delta < HZ)
969                 return;
970
971         do {
972                 start = u64_stats_fetch_begin_bh(&stats->sync);
973                 pkts = stats->rx_pkts;
974         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
975
976         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
977         stats->rx_pkts_prev = pkts;
978         stats->rx_jiffies = now;
979         eqd = stats->rx_pps / 110000;
980         eqd = eqd << 3;
981         if (eqd > rx_eq->max_eqd)
982                 eqd = rx_eq->max_eqd;
983         if (eqd < rx_eq->min_eqd)
984                 eqd = rx_eq->min_eqd;
985         if (eqd < 10)
986                 eqd = 0;
987         if (eqd != rx_eq->cur_eqd) {
988                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
989                 rx_eq->cur_eqd = eqd;
990         }
991 }
992
993 static void be_rx_stats_update(struct be_rx_obj *rxo,
994                 struct be_rx_compl_info *rxcp)
995 {
996         struct be_rx_stats *stats = rx_stats(rxo);
997
998         u64_stats_update_begin(&stats->sync);
999         stats->rx_compl++;
1000         stats->rx_bytes += rxcp->pkt_size;
1001         stats->rx_pkts++;
1002         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1003                 stats->rx_mcast_pkts++;
1004         if (rxcp->err)
1005                 stats->rx_compl_err++;
1006         u64_stats_update_end(&stats->sync);
1007 }
1008
1009 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1010 {
1011         /* L4 checksum is not reliable for non TCP/UDP packets.
1012          * Also ignore ipcksm for ipv6 pkts */
1013         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1014                                 (rxcp->ip_csum || rxcp->ipv6);
1015 }
1016
1017 static struct be_rx_page_info *
1018 get_rx_page_info(struct be_adapter *adapter,
1019                 struct be_rx_obj *rxo,
1020                 u16 frag_idx)
1021 {
1022         struct be_rx_page_info *rx_page_info;
1023         struct be_queue_info *rxq = &rxo->q;
1024
1025         rx_page_info = &rxo->page_info_tbl[frag_idx];
1026         BUG_ON(!rx_page_info->page);
1027
1028         if (rx_page_info->last_page_user) {
1029                 dma_unmap_page(&adapter->pdev->dev,
1030                                dma_unmap_addr(rx_page_info, bus),
1031                                adapter->big_page_size, DMA_FROM_DEVICE);
1032                 rx_page_info->last_page_user = false;
1033         }
1034
1035         atomic_dec(&rxq->used);
1036         return rx_page_info;
1037 }
1038
1039 /* Throwaway the data in the Rx completion */
1040 static void be_rx_compl_discard(struct be_adapter *adapter,
1041                 struct be_rx_obj *rxo,
1042                 struct be_rx_compl_info *rxcp)
1043 {
1044         struct be_queue_info *rxq = &rxo->q;
1045         struct be_rx_page_info *page_info;
1046         u16 i, num_rcvd = rxcp->num_rcvd;
1047
1048         for (i = 0; i < num_rcvd; i++) {
1049                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1050                 put_page(page_info->page);
1051                 memset(page_info, 0, sizeof(*page_info));
1052                 index_inc(&rxcp->rxq_idx, rxq->len);
1053         }
1054 }
1055
1056 /*
1057  * skb_fill_rx_data forms a complete skb for an ether frame
1058  * indicated by rxcp.
1059  */
1060 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1061                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1062 {
1063         struct be_queue_info *rxq = &rxo->q;
1064         struct be_rx_page_info *page_info;
1065         u16 i, j;
1066         u16 hdr_len, curr_frag_len, remaining;
1067         u8 *start;
1068
1069         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1070         start = page_address(page_info->page) + page_info->page_offset;
1071         prefetch(start);
1072
1073         /* Copy data in the first descriptor of this completion */
1074         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1075
1076         /* Copy the header portion into skb_data */
1077         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1078         memcpy(skb->data, start, hdr_len);
1079         skb->len = curr_frag_len;
1080         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1081                 /* Complete packet has now been moved to data */
1082                 put_page(page_info->page);
1083                 skb->data_len = 0;
1084                 skb->tail += curr_frag_len;
1085         } else {
1086                 skb_shinfo(skb)->nr_frags = 1;
1087                 skb_frag_set_page(skb, 0, page_info->page);
1088                 skb_shinfo(skb)->frags[0].page_offset =
1089                                         page_info->page_offset + hdr_len;
1090                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1091                 skb->data_len = curr_frag_len - hdr_len;
1092                 skb->truesize += rx_frag_size;
1093                 skb->tail += hdr_len;
1094         }
1095         page_info->page = NULL;
1096
1097         if (rxcp->pkt_size <= rx_frag_size) {
1098                 BUG_ON(rxcp->num_rcvd != 1);
1099                 return;
1100         }
1101
1102         /* More frags present for this completion */
1103         index_inc(&rxcp->rxq_idx, rxq->len);
1104         remaining = rxcp->pkt_size - curr_frag_len;
1105         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1106                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1107                 curr_frag_len = min(remaining, rx_frag_size);
1108
1109                 /* Coalesce all frags from the same physical page in one slot */
1110                 if (page_info->page_offset == 0) {
1111                         /* Fresh page */
1112                         j++;
1113                         skb_frag_set_page(skb, j, page_info->page);
1114                         skb_shinfo(skb)->frags[j].page_offset =
1115                                                         page_info->page_offset;
1116                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1117                         skb_shinfo(skb)->nr_frags++;
1118                 } else {
1119                         put_page(page_info->page);
1120                 }
1121
1122                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1123                 skb->len += curr_frag_len;
1124                 skb->data_len += curr_frag_len;
1125                 skb->truesize += rx_frag_size;
1126                 remaining -= curr_frag_len;
1127                 index_inc(&rxcp->rxq_idx, rxq->len);
1128                 page_info->page = NULL;
1129         }
1130         BUG_ON(j > MAX_SKB_FRAGS);
1131 }
1132
1133 /* Process the RX completion indicated by rxcp when GRO is disabled */
1134 static void be_rx_compl_process(struct be_adapter *adapter,
1135                         struct be_rx_obj *rxo,
1136                         struct be_rx_compl_info *rxcp)
1137 {
1138         struct net_device *netdev = adapter->netdev;
1139         struct sk_buff *skb;
1140
1141         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1142         if (unlikely(!skb)) {
1143                 rx_stats(rxo)->rx_drops_no_skbs++;
1144                 be_rx_compl_discard(adapter, rxo, rxcp);
1145                 return;
1146         }
1147
1148         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1149
1150         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1151                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1152         else
1153                 skb_checksum_none_assert(skb);
1154
1155         skb->protocol = eth_type_trans(skb, netdev);
1156         if (adapter->netdev->features & NETIF_F_RXHASH)
1157                 skb->rxhash = rxcp->rss_hash;
1158
1159
1160         if (rxcp->vlanf)
1161                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1162
1163         netif_receive_skb(skb);
1164 }
1165
1166 /* Process the RX completion indicated by rxcp when GRO is enabled */
1167 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1168                 struct be_rx_obj *rxo,
1169                 struct be_rx_compl_info *rxcp)
1170 {
1171         struct be_rx_page_info *page_info;
1172         struct sk_buff *skb = NULL;
1173         struct be_queue_info *rxq = &rxo->q;
1174         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1175         u16 remaining, curr_frag_len;
1176         u16 i, j;
1177
1178         skb = napi_get_frags(&eq_obj->napi);
1179         if (!skb) {
1180                 be_rx_compl_discard(adapter, rxo, rxcp);
1181                 return;
1182         }
1183
1184         remaining = rxcp->pkt_size;
1185         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1186                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1187
1188                 curr_frag_len = min(remaining, rx_frag_size);
1189
1190                 /* Coalesce all frags from the same physical page in one slot */
1191                 if (i == 0 || page_info->page_offset == 0) {
1192                         /* First frag or Fresh page */
1193                         j++;
1194                         skb_frag_set_page(skb, j, page_info->page);
1195                         skb_shinfo(skb)->frags[j].page_offset =
1196                                                         page_info->page_offset;
1197                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1198                 } else {
1199                         put_page(page_info->page);
1200                 }
1201                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1202                 skb->truesize += rx_frag_size;
1203                 remaining -= curr_frag_len;
1204                 index_inc(&rxcp->rxq_idx, rxq->len);
1205                 memset(page_info, 0, sizeof(*page_info));
1206         }
1207         BUG_ON(j > MAX_SKB_FRAGS);
1208
1209         skb_shinfo(skb)->nr_frags = j + 1;
1210         skb->len = rxcp->pkt_size;
1211         skb->data_len = rxcp->pkt_size;
1212         skb->ip_summed = CHECKSUM_UNNECESSARY;
1213         if (adapter->netdev->features & NETIF_F_RXHASH)
1214                 skb->rxhash = rxcp->rss_hash;
1215
1216         if (rxcp->vlanf)
1217                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1218
1219         napi_gro_frags(&eq_obj->napi);
1220 }
1221
1222 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1223                                 struct be_eth_rx_compl *compl,
1224                                 struct be_rx_compl_info *rxcp)
1225 {
1226         rxcp->pkt_size =
1227                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1228         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1229         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1230         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1231         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1232         rxcp->ip_csum =
1233                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1234         rxcp->l4_csum =
1235                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1236         rxcp->ipv6 =
1237                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1238         rxcp->rxq_idx =
1239                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1240         rxcp->num_rcvd =
1241                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1242         rxcp->pkt_type =
1243                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1244         rxcp->rss_hash =
1245                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1246         if (rxcp->vlanf) {
1247                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1248                                           compl);
1249                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1250                                                compl);
1251         }
1252         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1253 }
1254
1255 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1256                                 struct be_eth_rx_compl *compl,
1257                                 struct be_rx_compl_info *rxcp)
1258 {
1259         rxcp->pkt_size =
1260                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1261         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1262         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1263         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1264         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1265         rxcp->ip_csum =
1266                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1267         rxcp->l4_csum =
1268                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1269         rxcp->ipv6 =
1270                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1271         rxcp->rxq_idx =
1272                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1273         rxcp->num_rcvd =
1274                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1275         rxcp->pkt_type =
1276                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1277         rxcp->rss_hash =
1278                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1279         if (rxcp->vlanf) {
1280                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1281                                           compl);
1282                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1283                                                compl);
1284         }
1285         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1286 }
1287
1288 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1289 {
1290         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1291         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1292         struct be_adapter *adapter = rxo->adapter;
1293
1294         /* For checking the valid bit it is Ok to use either definition as the
1295          * valid bit is at the same position in both v0 and v1 Rx compl */
1296         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1297                 return NULL;
1298
1299         rmb();
1300         be_dws_le_to_cpu(compl, sizeof(*compl));
1301
1302         if (adapter->be3_native)
1303                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1304         else
1305                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1306
1307         if (rxcp->vlanf) {
1308                 /* vlanf could be wrongly set in some cards.
1309                  * ignore if vtm is not set */
1310                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1311                         rxcp->vlanf = 0;
1312
1313                 if (!lancer_chip(adapter))
1314                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1315
1316                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1317                     !adapter->vlan_tag[rxcp->vlan_tag])
1318                         rxcp->vlanf = 0;
1319         }
1320
1321         /* As the compl has been parsed, reset it; we wont touch it again */
1322         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1323
1324         queue_tail_inc(&rxo->cq);
1325         return rxcp;
1326 }
1327
1328 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1329 {
1330         u32 order = get_order(size);
1331
1332         if (order > 0)
1333                 gfp |= __GFP_COMP;
1334         return  alloc_pages(gfp, order);
1335 }
1336
1337 /*
1338  * Allocate a page, split it to fragments of size rx_frag_size and post as
1339  * receive buffers to BE
1340  */
1341 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1342 {
1343         struct be_adapter *adapter = rxo->adapter;
1344         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1345         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1346         struct be_queue_info *rxq = &rxo->q;
1347         struct page *pagep = NULL;
1348         struct be_eth_rx_d *rxd;
1349         u64 page_dmaaddr = 0, frag_dmaaddr;
1350         u32 posted, page_offset = 0;
1351
1352         page_info = &rxo->page_info_tbl[rxq->head];
1353         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1354                 if (!pagep) {
1355                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1356                         if (unlikely(!pagep)) {
1357                                 rx_stats(rxo)->rx_post_fail++;
1358                                 break;
1359                         }
1360                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1361                                                     0, adapter->big_page_size,
1362                                                     DMA_FROM_DEVICE);
1363                         page_info->page_offset = 0;
1364                 } else {
1365                         get_page(pagep);
1366                         page_info->page_offset = page_offset + rx_frag_size;
1367                 }
1368                 page_offset = page_info->page_offset;
1369                 page_info->page = pagep;
1370                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1371                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1372
1373                 rxd = queue_head_node(rxq);
1374                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1375                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1376
1377                 /* Any space left in the current big page for another frag? */
1378                 if ((page_offset + rx_frag_size + rx_frag_size) >
1379                                         adapter->big_page_size) {
1380                         pagep = NULL;
1381                         page_info->last_page_user = true;
1382                 }
1383
1384                 prev_page_info = page_info;
1385                 queue_head_inc(rxq);
1386                 page_info = &page_info_tbl[rxq->head];
1387         }
1388         if (pagep)
1389                 prev_page_info->last_page_user = true;
1390
1391         if (posted) {
1392                 atomic_add(posted, &rxq->used);
1393                 be_rxq_notify(adapter, rxq->id, posted);
1394         } else if (atomic_read(&rxq->used) == 0) {
1395                 /* Let be_worker replenish when memory is available */
1396                 rxo->rx_post_starved = true;
1397         }
1398 }
1399
1400 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1401 {
1402         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1403
1404         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1405                 return NULL;
1406
1407         rmb();
1408         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1409
1410         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1411
1412         queue_tail_inc(tx_cq);
1413         return txcp;
1414 }
1415
1416 static u16 be_tx_compl_process(struct be_adapter *adapter,
1417                 struct be_tx_obj *txo, u16 last_index)
1418 {
1419         struct be_queue_info *txq = &txo->q;
1420         struct be_eth_wrb *wrb;
1421         struct sk_buff **sent_skbs = txo->sent_skb_list;
1422         struct sk_buff *sent_skb;
1423         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1424         bool unmap_skb_hdr = true;
1425
1426         sent_skb = sent_skbs[txq->tail];
1427         BUG_ON(!sent_skb);
1428         sent_skbs[txq->tail] = NULL;
1429
1430         /* skip header wrb */
1431         queue_tail_inc(txq);
1432
1433         do {
1434                 cur_index = txq->tail;
1435                 wrb = queue_tail_node(txq);
1436                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1437                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1438                 unmap_skb_hdr = false;
1439
1440                 num_wrbs++;
1441                 queue_tail_inc(txq);
1442         } while (cur_index != last_index);
1443
1444         kfree_skb(sent_skb);
1445         return num_wrbs;
1446 }
1447
1448 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1449 {
1450         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1451
1452         if (!eqe->evt)
1453                 return NULL;
1454
1455         rmb();
1456         eqe->evt = le32_to_cpu(eqe->evt);
1457         queue_tail_inc(&eq_obj->q);
1458         return eqe;
1459 }
1460
1461 static int event_handle(struct be_adapter *adapter,
1462                         struct be_eq_obj *eq_obj,
1463                         bool rearm)
1464 {
1465         struct be_eq_entry *eqe;
1466         u16 num = 0;
1467
1468         while ((eqe = event_get(eq_obj)) != NULL) {
1469                 eqe->evt = 0;
1470                 num++;
1471         }
1472
1473         /* Deal with any spurious interrupts that come
1474          * without events
1475          */
1476         if (!num)
1477                 rearm = true;
1478
1479         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1480         if (num)
1481                 napi_schedule(&eq_obj->napi);
1482
1483         return num;
1484 }
1485
1486 /* Just read and notify events without processing them.
1487  * Used at the time of destroying event queues */
1488 static void be_eq_clean(struct be_adapter *adapter,
1489                         struct be_eq_obj *eq_obj)
1490 {
1491         struct be_eq_entry *eqe;
1492         u16 num = 0;
1493
1494         while ((eqe = event_get(eq_obj)) != NULL) {
1495                 eqe->evt = 0;
1496                 num++;
1497         }
1498
1499         if (num)
1500                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1501 }
1502
1503 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1504 {
1505         struct be_rx_page_info *page_info;
1506         struct be_queue_info *rxq = &rxo->q;
1507         struct be_queue_info *rx_cq = &rxo->cq;
1508         struct be_rx_compl_info *rxcp;
1509         u16 tail;
1510
1511         /* First cleanup pending rx completions */
1512         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1513                 be_rx_compl_discard(adapter, rxo, rxcp);
1514                 be_cq_notify(adapter, rx_cq->id, false, 1);
1515         }
1516
1517         /* Then free posted rx buffer that were not used */
1518         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1519         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1520                 page_info = get_rx_page_info(adapter, rxo, tail);
1521                 put_page(page_info->page);
1522                 memset(page_info, 0, sizeof(*page_info));
1523         }
1524         BUG_ON(atomic_read(&rxq->used));
1525         rxq->tail = rxq->head = 0;
1526 }
1527
1528 static void be_tx_compl_clean(struct be_adapter *adapter,
1529                                 struct be_tx_obj *txo)
1530 {
1531         struct be_queue_info *tx_cq = &txo->cq;
1532         struct be_queue_info *txq = &txo->q;
1533         struct be_eth_tx_compl *txcp;
1534         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1535         struct sk_buff **sent_skbs = txo->sent_skb_list;
1536         struct sk_buff *sent_skb;
1537         bool dummy_wrb;
1538
1539         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1540         do {
1541                 while ((txcp = be_tx_compl_get(tx_cq))) {
1542                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1543                                         wrb_index, txcp);
1544                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1545                         cmpl++;
1546                 }
1547                 if (cmpl) {
1548                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1549                         atomic_sub(num_wrbs, &txq->used);
1550                         cmpl = 0;
1551                         num_wrbs = 0;
1552                 }
1553
1554                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1555                         break;
1556
1557                 mdelay(1);
1558         } while (true);
1559
1560         if (atomic_read(&txq->used))
1561                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1562                         atomic_read(&txq->used));
1563
1564         /* free posted tx for which compls will never arrive */
1565         while (atomic_read(&txq->used)) {
1566                 sent_skb = sent_skbs[txq->tail];
1567                 end_idx = txq->tail;
1568                 index_adv(&end_idx,
1569                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1570                         txq->len);
1571                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1572                 atomic_sub(num_wrbs, &txq->used);
1573         }
1574 }
1575
1576 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1577 {
1578         struct be_queue_info *q;
1579
1580         q = &adapter->mcc_obj.q;
1581         if (q->created)
1582                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1583         be_queue_free(adapter, q);
1584
1585         q = &adapter->mcc_obj.cq;
1586         if (q->created)
1587                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1588         be_queue_free(adapter, q);
1589 }
1590
1591 /* Must be called only after TX qs are created as MCC shares TX EQ */
1592 static int be_mcc_queues_create(struct be_adapter *adapter)
1593 {
1594         struct be_queue_info *q, *cq;
1595
1596         /* Alloc MCC compl queue */
1597         cq = &adapter->mcc_obj.cq;
1598         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1599                         sizeof(struct be_mcc_compl)))
1600                 goto err;
1601
1602         /* Ask BE to create MCC compl queue; share TX's eq */
1603         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1604                 goto mcc_cq_free;
1605
1606         /* Alloc MCC queue */
1607         q = &adapter->mcc_obj.q;
1608         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1609                 goto mcc_cq_destroy;
1610
1611         /* Ask BE to create MCC queue */
1612         if (be_cmd_mccq_create(adapter, q, cq))
1613                 goto mcc_q_free;
1614
1615         return 0;
1616
1617 mcc_q_free:
1618         be_queue_free(adapter, q);
1619 mcc_cq_destroy:
1620         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1621 mcc_cq_free:
1622         be_queue_free(adapter, cq);
1623 err:
1624         return -1;
1625 }
1626
1627 static void be_tx_queues_destroy(struct be_adapter *adapter)
1628 {
1629         struct be_queue_info *q;
1630         struct be_tx_obj *txo;
1631         u8 i;
1632
1633         for_all_tx_queues(adapter, txo, i) {
1634                 q = &txo->q;
1635                 if (q->created)
1636                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1637                 be_queue_free(adapter, q);
1638
1639                 q = &txo->cq;
1640                 if (q->created)
1641                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1642                 be_queue_free(adapter, q);
1643         }
1644
1645         /* Clear any residual events */
1646         be_eq_clean(adapter, &adapter->tx_eq);
1647
1648         q = &adapter->tx_eq.q;
1649         if (q->created)
1650                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1651         be_queue_free(adapter, q);
1652 }
1653
1654 static int be_num_txqs_want(struct be_adapter *adapter)
1655 {
1656         if ((num_vfs && adapter->sriov_enabled) ||
1657                 be_is_mc(adapter) ||
1658                 lancer_chip(adapter) || !be_physfn(adapter) ||
1659                 adapter->generation == BE_GEN2)
1660                 return 1;
1661         else
1662                 return MAX_TX_QS;
1663 }
1664
1665 /* One TX event queue is shared by all TX compl qs */
1666 static int be_tx_queues_create(struct be_adapter *adapter)
1667 {
1668         struct be_queue_info *eq, *q, *cq;
1669         struct be_tx_obj *txo;
1670         u8 i;
1671
1672         adapter->num_tx_qs = be_num_txqs_want(adapter);
1673         if (adapter->num_tx_qs != MAX_TX_QS) {
1674                 rtnl_lock();
1675                 netif_set_real_num_tx_queues(adapter->netdev,
1676                         adapter->num_tx_qs);
1677                 rtnl_unlock();
1678         }
1679
1680         adapter->tx_eq.max_eqd = 0;
1681         adapter->tx_eq.min_eqd = 0;
1682         adapter->tx_eq.cur_eqd = 96;
1683         adapter->tx_eq.enable_aic = false;
1684
1685         eq = &adapter->tx_eq.q;
1686         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1687                 sizeof(struct be_eq_entry)))
1688                 return -1;
1689
1690         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1691                 goto err;
1692         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1693
1694         for_all_tx_queues(adapter, txo, i) {
1695                 cq = &txo->cq;
1696                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1697                         sizeof(struct be_eth_tx_compl)))
1698                         goto err;
1699
1700                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1701                         goto err;
1702
1703                 q = &txo->q;
1704                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1705                         sizeof(struct be_eth_wrb)))
1706                         goto err;
1707         }
1708         return 0;
1709
1710 err:
1711         be_tx_queues_destroy(adapter);
1712         return -1;
1713 }
1714
1715 static void be_rx_queues_destroy(struct be_adapter *adapter)
1716 {
1717         struct be_queue_info *q;
1718         struct be_rx_obj *rxo;
1719         int i;
1720
1721         for_all_rx_queues(adapter, rxo, i) {
1722                 be_queue_free(adapter, &rxo->q);
1723
1724                 q = &rxo->cq;
1725                 if (q->created)
1726                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1727                 be_queue_free(adapter, q);
1728
1729                 q = &rxo->rx_eq.q;
1730                 if (q->created)
1731                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1732                 be_queue_free(adapter, q);
1733         }
1734 }
1735
1736 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1737 {
1738         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1739                 !adapter->sriov_enabled && be_physfn(adapter) &&
1740                 !be_is_mc(adapter)) {
1741                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1742         } else {
1743                 dev_warn(&adapter->pdev->dev,
1744                         "No support for multiple RX queues\n");
1745                 return 1;
1746         }
1747 }
1748
1749 static int be_rx_queues_create(struct be_adapter *adapter)
1750 {
1751         struct be_queue_info *eq, *q, *cq;
1752         struct be_rx_obj *rxo;
1753         int rc, i;
1754
1755         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1756                                 msix_enabled(adapter) ?
1757                                         adapter->num_msix_vec - 1 : 1);
1758         if (adapter->num_rx_qs != MAX_RX_QS)
1759                 dev_warn(&adapter->pdev->dev,
1760                         "Can create only %d RX queues", adapter->num_rx_qs);
1761
1762         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1763         for_all_rx_queues(adapter, rxo, i) {
1764                 rxo->adapter = adapter;
1765                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1766                 rxo->rx_eq.enable_aic = true;
1767
1768                 /* EQ */
1769                 eq = &rxo->rx_eq.q;
1770                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1771                                         sizeof(struct be_eq_entry));
1772                 if (rc)
1773                         goto err;
1774
1775                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1776                 if (rc)
1777                         goto err;
1778
1779                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1780
1781                 /* CQ */
1782                 cq = &rxo->cq;
1783                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1784                                 sizeof(struct be_eth_rx_compl));
1785                 if (rc)
1786                         goto err;
1787
1788                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1789                 if (rc)
1790                         goto err;
1791
1792                 /* Rx Q - will be created in be_open() */
1793                 q = &rxo->q;
1794                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1795                                 sizeof(struct be_eth_rx_d));
1796                 if (rc)
1797                         goto err;
1798
1799         }
1800
1801         return 0;
1802 err:
1803         be_rx_queues_destroy(adapter);
1804         return -1;
1805 }
1806
1807 static bool event_peek(struct be_eq_obj *eq_obj)
1808 {
1809         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1810         if (!eqe->evt)
1811                 return false;
1812         else
1813                 return true;
1814 }
1815
1816 static irqreturn_t be_intx(int irq, void *dev)
1817 {
1818         struct be_adapter *adapter = dev;
1819         struct be_rx_obj *rxo;
1820         int isr, i, tx = 0 , rx = 0;
1821
1822         if (lancer_chip(adapter)) {
1823                 if (event_peek(&adapter->tx_eq))
1824                         tx = event_handle(adapter, &adapter->tx_eq, false);
1825                 for_all_rx_queues(adapter, rxo, i) {
1826                         if (event_peek(&rxo->rx_eq))
1827                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1828                 }
1829
1830                 if (!(tx || rx))
1831                         return IRQ_NONE;
1832
1833         } else {
1834                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1835                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1836                 if (!isr)
1837                         return IRQ_NONE;
1838
1839                 if ((1 << adapter->tx_eq.eq_idx & isr))
1840                         event_handle(adapter, &adapter->tx_eq, false);
1841
1842                 for_all_rx_queues(adapter, rxo, i) {
1843                         if ((1 << rxo->rx_eq.eq_idx & isr))
1844                                 event_handle(adapter, &rxo->rx_eq, true);
1845                 }
1846         }
1847
1848         return IRQ_HANDLED;
1849 }
1850
1851 static irqreturn_t be_msix_rx(int irq, void *dev)
1852 {
1853         struct be_rx_obj *rxo = dev;
1854         struct be_adapter *adapter = rxo->adapter;
1855
1856         event_handle(adapter, &rxo->rx_eq, true);
1857
1858         return IRQ_HANDLED;
1859 }
1860
1861 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1862 {
1863         struct be_adapter *adapter = dev;
1864
1865         event_handle(adapter, &adapter->tx_eq, false);
1866
1867         return IRQ_HANDLED;
1868 }
1869
1870 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1871 {
1872         return (rxcp->tcpf && !rxcp->err) ? true : false;
1873 }
1874
1875 static int be_poll_rx(struct napi_struct *napi, int budget)
1876 {
1877         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1878         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1879         struct be_adapter *adapter = rxo->adapter;
1880         struct be_queue_info *rx_cq = &rxo->cq;
1881         struct be_rx_compl_info *rxcp;
1882         u32 work_done;
1883
1884         rx_stats(rxo)->rx_polls++;
1885         for (work_done = 0; work_done < budget; work_done++) {
1886                 rxcp = be_rx_compl_get(rxo);
1887                 if (!rxcp)
1888                         break;
1889
1890                 /* Is it a flush compl that has no data */
1891                 if (unlikely(rxcp->num_rcvd == 0))
1892                         goto loop_continue;
1893
1894                 /* Discard compl with partial DMA Lancer B0 */
1895                 if (unlikely(!rxcp->pkt_size)) {
1896                         be_rx_compl_discard(adapter, rxo, rxcp);
1897                         goto loop_continue;
1898                 }
1899
1900                 /* On BE drop pkts that arrive due to imperfect filtering in
1901                  * promiscuous mode on some skews
1902                  */
1903                 if (unlikely(rxcp->port != adapter->port_num &&
1904                                 !lancer_chip(adapter))) {
1905                         be_rx_compl_discard(adapter, rxo, rxcp);
1906                         goto loop_continue;
1907                 }
1908
1909                 if (do_gro(rxcp))
1910                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1911                 else
1912                         be_rx_compl_process(adapter, rxo, rxcp);
1913 loop_continue:
1914                 be_rx_stats_update(rxo, rxcp);
1915         }
1916
1917         be_cq_notify(adapter, rx_cq->id, false, work_done);
1918
1919         /* Refill the queue */
1920         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1921                 be_post_rx_frags(rxo, GFP_ATOMIC);
1922
1923         /* All consumed */
1924         if (work_done < budget) {
1925                 napi_complete(napi);
1926                 /* Arm CQ */
1927                 be_cq_notify(adapter, rx_cq->id, true, 0);
1928         }
1929         return work_done;
1930 }
1931
1932 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1933  * For TX/MCC we don't honour budget; consume everything
1934  */
1935 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1936 {
1937         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1938         struct be_adapter *adapter =
1939                 container_of(tx_eq, struct be_adapter, tx_eq);
1940         struct be_tx_obj *txo;
1941         struct be_eth_tx_compl *txcp;
1942         int tx_compl, mcc_compl, status = 0;
1943         u8 i;
1944         u16 num_wrbs;
1945
1946         for_all_tx_queues(adapter, txo, i) {
1947                 tx_compl = 0;
1948                 num_wrbs = 0;
1949                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1950                         num_wrbs += be_tx_compl_process(adapter, txo,
1951                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1952                                         wrb_index, txcp));
1953                         tx_compl++;
1954                 }
1955                 if (tx_compl) {
1956                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1957
1958                         atomic_sub(num_wrbs, &txo->q.used);
1959
1960                         /* As Tx wrbs have been freed up, wake up netdev queue
1961                          * if it was stopped due to lack of tx wrbs.  */
1962                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1963                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1964                                 netif_wake_subqueue(adapter->netdev, i);
1965                         }
1966
1967                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1968                         tx_stats(txo)->tx_compl += tx_compl;
1969                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1970                 }
1971         }
1972
1973         mcc_compl = be_process_mcc(adapter, &status);
1974
1975         if (mcc_compl) {
1976                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1977                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1978         }
1979
1980         napi_complete(napi);
1981
1982         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1983         adapter->drv_stats.tx_events++;
1984         return 1;
1985 }
1986
1987 void be_detect_dump_ue(struct be_adapter *adapter)
1988 {
1989         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1990         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1991         u32 i;
1992
1993         if (adapter->eeh_err || adapter->ue_detected)
1994                 return;
1995
1996         if (lancer_chip(adapter)) {
1997                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1998                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1999                         sliport_err1 = ioread32(adapter->db +
2000                                         SLIPORT_ERROR1_OFFSET);
2001                         sliport_err2 = ioread32(adapter->db +
2002                                         SLIPORT_ERROR2_OFFSET);
2003                 }
2004         } else {
2005                 pci_read_config_dword(adapter->pdev,
2006                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2007                 pci_read_config_dword(adapter->pdev,
2008                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2009                 pci_read_config_dword(adapter->pdev,
2010                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2011                 pci_read_config_dword(adapter->pdev,
2012                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2013
2014                 ue_lo = (ue_lo & (~ue_lo_mask));
2015                 ue_hi = (ue_hi & (~ue_hi_mask));
2016         }
2017
2018         if (ue_lo || ue_hi ||
2019                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2020                 adapter->ue_detected = true;
2021                 adapter->eeh_err = true;
2022                 dev_err(&adapter->pdev->dev,
2023                         "Unrecoverable error in the card\n");
2024         }
2025
2026         if (ue_lo) {
2027                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2028                         if (ue_lo & 1)
2029                                 dev_err(&adapter->pdev->dev,
2030                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2031                 }
2032         }
2033         if (ue_hi) {
2034                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2035                         if (ue_hi & 1)
2036                                 dev_err(&adapter->pdev->dev,
2037                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2038                 }
2039         }
2040
2041         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2042                 dev_err(&adapter->pdev->dev,
2043                         "sliport status 0x%x\n", sliport_status);
2044                 dev_err(&adapter->pdev->dev,
2045                         "sliport error1 0x%x\n", sliport_err1);
2046                 dev_err(&adapter->pdev->dev,
2047                         "sliport error2 0x%x\n", sliport_err2);
2048         }
2049 }
2050
2051 static void be_msix_disable(struct be_adapter *adapter)
2052 {
2053         if (msix_enabled(adapter)) {
2054                 pci_disable_msix(adapter->pdev);
2055                 adapter->num_msix_vec = 0;
2056         }
2057 }
2058
2059 static void be_msix_enable(struct be_adapter *adapter)
2060 {
2061 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2062         int i, status, num_vec;
2063
2064         num_vec = be_num_rxqs_want(adapter) + 1;
2065
2066         for (i = 0; i < num_vec; i++)
2067                 adapter->msix_entries[i].entry = i;
2068
2069         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2070         if (status == 0) {
2071                 goto done;
2072         } else if (status >= BE_MIN_MSIX_VECTORS) {
2073                 num_vec = status;
2074                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2075                                 num_vec) == 0)
2076                         goto done;
2077         }
2078         return;
2079 done:
2080         adapter->num_msix_vec = num_vec;
2081         return;
2082 }
2083
2084 static int be_sriov_enable(struct be_adapter *adapter)
2085 {
2086         be_check_sriov_fn_type(adapter);
2087 #ifdef CONFIG_PCI_IOV
2088         if (be_physfn(adapter) && num_vfs) {
2089                 int status, pos;
2090                 u16 nvfs;
2091
2092                 pos = pci_find_ext_capability(adapter->pdev,
2093                                                 PCI_EXT_CAP_ID_SRIOV);
2094                 pci_read_config_word(adapter->pdev,
2095                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2096
2097                 if (num_vfs > nvfs) {
2098                         dev_info(&adapter->pdev->dev,
2099                                         "Device supports %d VFs and not %d\n",
2100                                         nvfs, num_vfs);
2101                         num_vfs = nvfs;
2102                 }
2103
2104                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2105                 adapter->sriov_enabled = status ? false : true;
2106
2107                 if (adapter->sriov_enabled) {
2108                         adapter->vf_cfg = kcalloc(num_vfs,
2109                                                 sizeof(struct be_vf_cfg),
2110                                                 GFP_KERNEL);
2111                         if (!adapter->vf_cfg)
2112                                 return -ENOMEM;
2113                 }
2114         }
2115 #endif
2116         return 0;
2117 }
2118
2119 static void be_sriov_disable(struct be_adapter *adapter)
2120 {
2121 #ifdef CONFIG_PCI_IOV
2122         if (adapter->sriov_enabled) {
2123                 pci_disable_sriov(adapter->pdev);
2124                 kfree(adapter->vf_cfg);
2125                 adapter->sriov_enabled = false;
2126         }
2127 #endif
2128 }
2129
2130 static inline int be_msix_vec_get(struct be_adapter *adapter,
2131                                         struct be_eq_obj *eq_obj)
2132 {
2133         return adapter->msix_entries[eq_obj->eq_idx].vector;
2134 }
2135
2136 static int be_request_irq(struct be_adapter *adapter,
2137                 struct be_eq_obj *eq_obj,
2138                 void *handler, char *desc, void *context)
2139 {
2140         struct net_device *netdev = adapter->netdev;
2141         int vec;
2142
2143         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2144         vec = be_msix_vec_get(adapter, eq_obj);
2145         return request_irq(vec, handler, 0, eq_obj->desc, context);
2146 }
2147
2148 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2149                         void *context)
2150 {
2151         int vec = be_msix_vec_get(adapter, eq_obj);
2152         free_irq(vec, context);
2153 }
2154
2155 static int be_msix_register(struct be_adapter *adapter)
2156 {
2157         struct be_rx_obj *rxo;
2158         int status, i;
2159         char qname[10];
2160
2161         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2162                                 adapter);
2163         if (status)
2164                 goto err;
2165
2166         for_all_rx_queues(adapter, rxo, i) {
2167                 sprintf(qname, "rxq%d", i);
2168                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2169                                 qname, rxo);
2170                 if (status)
2171                         goto err_msix;
2172         }
2173
2174         return 0;
2175
2176 err_msix:
2177         be_free_irq(adapter, &adapter->tx_eq, adapter);
2178
2179         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2180                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2181
2182 err:
2183         dev_warn(&adapter->pdev->dev,
2184                 "MSIX Request IRQ failed - err %d\n", status);
2185         be_msix_disable(adapter);
2186         return status;
2187 }
2188
2189 static int be_irq_register(struct be_adapter *adapter)
2190 {
2191         struct net_device *netdev = adapter->netdev;
2192         int status;
2193
2194         if (msix_enabled(adapter)) {
2195                 status = be_msix_register(adapter);
2196                 if (status == 0)
2197                         goto done;
2198                 /* INTx is not supported for VF */
2199                 if (!be_physfn(adapter))
2200                         return status;
2201         }
2202
2203         /* INTx */
2204         netdev->irq = adapter->pdev->irq;
2205         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2206                         adapter);
2207         if (status) {
2208                 dev_err(&adapter->pdev->dev,
2209                         "INTx request IRQ failed - err %d\n", status);
2210                 return status;
2211         }
2212 done:
2213         adapter->isr_registered = true;
2214         return 0;
2215 }
2216
2217 static void be_irq_unregister(struct be_adapter *adapter)
2218 {
2219         struct net_device *netdev = adapter->netdev;
2220         struct be_rx_obj *rxo;
2221         int i;
2222
2223         if (!adapter->isr_registered)
2224                 return;
2225
2226         /* INTx */
2227         if (!msix_enabled(adapter)) {
2228                 free_irq(netdev->irq, adapter);
2229                 goto done;
2230         }
2231
2232         /* MSIx */
2233         be_free_irq(adapter, &adapter->tx_eq, adapter);
2234
2235         for_all_rx_queues(adapter, rxo, i)
2236                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2237
2238 done:
2239         adapter->isr_registered = false;
2240 }
2241
2242 static void be_rx_queues_clear(struct be_adapter *adapter)
2243 {
2244         struct be_queue_info *q;
2245         struct be_rx_obj *rxo;
2246         int i;
2247
2248         for_all_rx_queues(adapter, rxo, i) {
2249                 q = &rxo->q;
2250                 if (q->created) {
2251                         be_cmd_rxq_destroy(adapter, q);
2252                         /* After the rxq is invalidated, wait for a grace time
2253                          * of 1ms for all dma to end and the flush compl to
2254                          * arrive
2255                          */
2256                         mdelay(1);
2257                         be_rx_q_clean(adapter, rxo);
2258                 }
2259
2260                 /* Clear any residual events */
2261                 q = &rxo->rx_eq.q;
2262                 if (q->created)
2263                         be_eq_clean(adapter, &rxo->rx_eq);
2264         }
2265 }
2266
2267 static int be_close(struct net_device *netdev)
2268 {
2269         struct be_adapter *adapter = netdev_priv(netdev);
2270         struct be_rx_obj *rxo;
2271         struct be_tx_obj *txo;
2272         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2273         int vec, i;
2274
2275         be_async_mcc_disable(adapter);
2276
2277         if (!lancer_chip(adapter))
2278                 be_intr_set(adapter, false);
2279
2280         for_all_rx_queues(adapter, rxo, i)
2281                 napi_disable(&rxo->rx_eq.napi);
2282
2283         napi_disable(&tx_eq->napi);
2284
2285         if (lancer_chip(adapter)) {
2286                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2287                 for_all_rx_queues(adapter, rxo, i)
2288                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2289                 for_all_tx_queues(adapter, txo, i)
2290                          be_cq_notify(adapter, txo->cq.id, false, 0);
2291         }
2292
2293         if (msix_enabled(adapter)) {
2294                 vec = be_msix_vec_get(adapter, tx_eq);
2295                 synchronize_irq(vec);
2296
2297                 for_all_rx_queues(adapter, rxo, i) {
2298                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2299                         synchronize_irq(vec);
2300                 }
2301         } else {
2302                 synchronize_irq(netdev->irq);
2303         }
2304         be_irq_unregister(adapter);
2305
2306         /* Wait for all pending tx completions to arrive so that
2307          * all tx skbs are freed.
2308          */
2309         for_all_tx_queues(adapter, txo, i)
2310                 be_tx_compl_clean(adapter, txo);
2311
2312         be_rx_queues_clear(adapter);
2313         return 0;
2314 }
2315
2316 static int be_rx_queues_setup(struct be_adapter *adapter)
2317 {
2318         struct be_rx_obj *rxo;
2319         int rc, i, j;
2320         u8 rsstable[128];
2321
2322         for_all_rx_queues(adapter, rxo, i) {
2323                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2324                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2325                         adapter->if_handle,
2326                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2327                 if (rc)
2328                         return rc;
2329         }
2330
2331         if (be_multi_rxq(adapter)) {
2332                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2333                         for_all_rss_queues(adapter, rxo, i) {
2334                                 if ((j + i) >= 128)
2335                                         break;
2336                                 rsstable[j + i] = rxo->rss_id;
2337                         }
2338                 }
2339                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2340
2341                 if (rc)
2342                         return rc;
2343         }
2344
2345         /* First time posting */
2346         for_all_rx_queues(adapter, rxo, i) {
2347                 be_post_rx_frags(rxo, GFP_KERNEL);
2348                 napi_enable(&rxo->rx_eq.napi);
2349         }
2350         return 0;
2351 }
2352
2353 static int be_open(struct net_device *netdev)
2354 {
2355         struct be_adapter *adapter = netdev_priv(netdev);
2356         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2357         struct be_rx_obj *rxo;
2358         int status, i;
2359
2360         status = be_rx_queues_setup(adapter);
2361         if (status)
2362                 goto err;
2363
2364         napi_enable(&tx_eq->napi);
2365
2366         be_irq_register(adapter);
2367
2368         if (!lancer_chip(adapter))
2369                 be_intr_set(adapter, true);
2370
2371         /* The evt queues are created in unarmed state; arm them */
2372         for_all_rx_queues(adapter, rxo, i) {
2373                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2374                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2375         }
2376         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2377
2378         /* Now that interrupts are on we can process async mcc */
2379         be_async_mcc_enable(adapter);
2380
2381         return 0;
2382 err:
2383         be_close(adapter->netdev);
2384         return -EIO;
2385 }
2386
2387 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2388 {
2389         struct be_dma_mem cmd;
2390         int status = 0;
2391         u8 mac[ETH_ALEN];
2392
2393         memset(mac, 0, ETH_ALEN);
2394
2395         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2396         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2397                                     GFP_KERNEL);
2398         if (cmd.va == NULL)
2399                 return -1;
2400         memset(cmd.va, 0, cmd.size);
2401
2402         if (enable) {
2403                 status = pci_write_config_dword(adapter->pdev,
2404                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2405                 if (status) {
2406                         dev_err(&adapter->pdev->dev,
2407                                 "Could not enable Wake-on-lan\n");
2408                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2409                                           cmd.dma);
2410                         return status;
2411                 }
2412                 status = be_cmd_enable_magic_wol(adapter,
2413                                 adapter->netdev->dev_addr, &cmd);
2414                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2415                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2416         } else {
2417                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2418                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2419                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2420         }
2421
2422         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2423         return status;
2424 }
2425
2426 /*
2427  * Generate a seed MAC address from the PF MAC Address using jhash.
2428  * MAC Address for VFs are assigned incrementally starting from the seed.
2429  * These addresses are programmed in the ASIC by the PF and the VF driver
2430  * queries for the MAC address during its probe.
2431  */
2432 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2433 {
2434         u32 vf;
2435         int status = 0;
2436         u8 mac[ETH_ALEN];
2437
2438         be_vf_eth_addr_generate(adapter, mac);
2439
2440         for (vf = 0; vf < num_vfs; vf++) {
2441                 if (lancer_chip(adapter)) {
2442                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2443                 } else {
2444                         status = be_cmd_pmac_add(adapter, mac,
2445                                         adapter->vf_cfg[vf].vf_if_handle,
2446                                         &adapter->vf_cfg[vf].vf_pmac_id,
2447                                         vf + 1);
2448                 }
2449
2450                 if (status)
2451                         dev_err(&adapter->pdev->dev,
2452                         "Mac address assignment failed for VF %d\n", vf);
2453                 else
2454                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2455
2456                 mac[5] += 1;
2457         }
2458         return status;
2459 }
2460
2461 static void be_vf_clear(struct be_adapter *adapter)
2462 {
2463         u32 vf;
2464
2465         for (vf = 0; vf < num_vfs; vf++) {
2466                 if (lancer_chip(adapter))
2467                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2468                 else
2469                         be_cmd_pmac_del(adapter,
2470                                         adapter->vf_cfg[vf].vf_if_handle,
2471                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2472         }
2473
2474         for (vf = 0; vf < num_vfs; vf++)
2475                 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2476                                 vf + 1);
2477 }
2478
2479 static int be_clear(struct be_adapter *adapter)
2480 {
2481         if (be_physfn(adapter) && adapter->sriov_enabled)
2482                 be_vf_clear(adapter);
2483
2484         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2485
2486         be_mcc_queues_destroy(adapter);
2487         be_rx_queues_destroy(adapter);
2488         be_tx_queues_destroy(adapter);
2489
2490         /* tell fw we're done with firing cmds */
2491         be_cmd_fw_clean(adapter);
2492         return 0;
2493 }
2494
2495 static void be_vf_setup_init(struct be_adapter *adapter)
2496 {
2497         int vf;
2498
2499         for (vf = 0; vf < num_vfs; vf++) {
2500                 adapter->vf_cfg[vf].vf_if_handle = -1;
2501                 adapter->vf_cfg[vf].vf_pmac_id = -1;
2502         }
2503 }
2504
2505 static int be_vf_setup(struct be_adapter *adapter)
2506 {
2507         u32 cap_flags, en_flags, vf;
2508         u16 lnk_speed;
2509         int status;
2510
2511         be_vf_setup_init(adapter);
2512
2513         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2514                                 BE_IF_FLAGS_MULTICAST;
2515
2516         for (vf = 0; vf < num_vfs; vf++) {
2517                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2518                                         &adapter->vf_cfg[vf].vf_if_handle,
2519                                         NULL, vf+1);
2520                 if (status)
2521                         goto err;
2522         }
2523
2524         status = be_vf_eth_addr_config(adapter);
2525         if (status)
2526                 goto err;
2527
2528         for (vf = 0; vf < num_vfs; vf++) {
2529                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2530                                 vf + 1);
2531                 if (status)
2532                         goto err;
2533                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2534         }
2535         return 0;
2536 err:
2537         return status;
2538 }
2539
2540 static void be_setup_init(struct be_adapter *adapter)
2541 {
2542         adapter->vlan_prio_bmap = 0xff;
2543         adapter->link_speed = -1;
2544         adapter->if_handle = -1;
2545         adapter->be3_native = false;
2546         adapter->promiscuous = false;
2547         adapter->eq_next_idx = 0;
2548 }
2549
2550 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2551 {
2552         u32 pmac_id;
2553         int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2554         if (status != 0)
2555                 goto do_none;
2556         status = be_cmd_mac_addr_query(adapter, mac,
2557                         MAC_ADDRESS_TYPE_NETWORK,
2558                         false, adapter->if_handle, pmac_id);
2559         if (status != 0)
2560                 goto do_none;
2561         status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2562                         &adapter->pmac_id, 0);
2563 do_none:
2564         return status;
2565 }
2566
2567 static int be_setup(struct be_adapter *adapter)
2568 {
2569         struct net_device *netdev = adapter->netdev;
2570         u32 cap_flags, en_flags;
2571         u32 tx_fc, rx_fc;
2572         int status, i;
2573         u8 mac[ETH_ALEN];
2574         struct be_tx_obj *txo;
2575
2576         be_setup_init(adapter);
2577
2578         be_cmd_req_native_mode(adapter);
2579
2580         status = be_tx_queues_create(adapter);
2581         if (status != 0)
2582                 goto err;
2583
2584         status = be_rx_queues_create(adapter);
2585         if (status != 0)
2586                 goto err;
2587
2588         status = be_mcc_queues_create(adapter);
2589         if (status != 0)
2590                 goto err;
2591
2592         memset(mac, 0, ETH_ALEN);
2593         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2594                         true /*permanent */, 0, 0);
2595         if (status)
2596                 return status;
2597         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2599
2600         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2603                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
2605         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606                 cap_flags |= BE_IF_FLAGS_RSS;
2607                 en_flags |= BE_IF_FLAGS_RSS;
2608         }
2609         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610                         netdev->dev_addr, &adapter->if_handle,
2611                         &adapter->pmac_id, 0);
2612         if (status != 0)
2613                 goto err;
2614
2615          for_all_tx_queues(adapter, txo, i) {
2616                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2617                 if (status)
2618                         goto err;
2619         }
2620
2621          /* The VF's permanent mac queried from card is incorrect.
2622           * For BEx: Query the mac configued by the PF using if_handle
2623           * For Lancer: Get and use mac_list to obtain mac address.
2624           */
2625         if (!be_physfn(adapter)) {
2626                 if (lancer_chip(adapter))
2627                         status = be_configure_mac_from_list(adapter, mac);
2628                 else
2629                         status = be_cmd_mac_addr_query(adapter, mac,
2630                                         MAC_ADDRESS_TYPE_NETWORK, false,
2631                                         adapter->if_handle, 0);
2632                 if (!status) {
2633                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2634                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2635                 }
2636         }
2637
2638         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2639
2640         status = be_vid_config(adapter, false, 0);
2641         if (status)
2642                 goto err;
2643
2644         be_set_rx_mode(adapter->netdev);
2645
2646         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2647         /* For Lancer: It is legal for this cmd to fail on VF */
2648         if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2649                 goto err;
2650
2651         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2652                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2653                                         adapter->rx_fc);
2654                 /* For Lancer: It is legal for this cmd to fail on VF */
2655                 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2656                         goto err;
2657         }
2658
2659         pcie_set_readrq(adapter->pdev, 4096);
2660
2661         if (be_physfn(adapter) && adapter->sriov_enabled) {
2662                 status = be_vf_setup(adapter);
2663                 if (status)
2664                         goto err;
2665         }
2666
2667         return 0;
2668 err:
2669         be_clear(adapter);
2670         return status;
2671 }
2672
2673 #ifdef CONFIG_NET_POLL_CONTROLLER
2674 static void be_netpoll(struct net_device *netdev)
2675 {
2676         struct be_adapter *adapter = netdev_priv(netdev);
2677         struct be_rx_obj *rxo;
2678         int i;
2679
2680         event_handle(adapter, &adapter->tx_eq, false);
2681         for_all_rx_queues(adapter, rxo, i)
2682                 event_handle(adapter, &rxo->rx_eq, true);
2683 }
2684 #endif
2685
2686 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2687 static bool be_flash_redboot(struct be_adapter *adapter,
2688                         const u8 *p, u32 img_start, int image_size,
2689                         int hdr_size)
2690 {
2691         u32 crc_offset;
2692         u8 flashed_crc[4];
2693         int status;
2694
2695         crc_offset = hdr_size + img_start + image_size - 4;
2696
2697         p += crc_offset;
2698
2699         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2700                         (image_size - 4));
2701         if (status) {
2702                 dev_err(&adapter->pdev->dev,
2703                 "could not get crc from flash, not flashing redboot\n");
2704                 return false;
2705         }
2706
2707         /*update redboot only if crc does not match*/
2708         if (!memcmp(flashed_crc, p, 4))
2709                 return false;
2710         else
2711                 return true;
2712 }
2713
2714 static bool phy_flashing_required(struct be_adapter *adapter)
2715 {
2716         int status = 0;
2717         struct be_phy_info phy_info;
2718
2719         status = be_cmd_get_phy_info(adapter, &phy_info);
2720         if (status)
2721                 return false;
2722         if ((phy_info.phy_type == TN_8022) &&
2723                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2724                 return true;
2725         }
2726         return false;
2727 }
2728
2729 static int be_flash_data(struct be_adapter *adapter,
2730                         const struct firmware *fw,
2731                         struct be_dma_mem *flash_cmd, int num_of_images)
2732
2733 {
2734         int status = 0, i, filehdr_size = 0;
2735         u32 total_bytes = 0, flash_op;
2736         int num_bytes;
2737         const u8 *p = fw->data;
2738         struct be_cmd_write_flashrom *req = flash_cmd->va;
2739         const struct flash_comp *pflashcomp;
2740         int num_comp;
2741
2742         static const struct flash_comp gen3_flash_types[10] = {
2743                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2744                         FLASH_IMAGE_MAX_SIZE_g3},
2745                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2746                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2747                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2748                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2749                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2750                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2751                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2752                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2754                         FLASH_IMAGE_MAX_SIZE_g3},
2755                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2756                         FLASH_IMAGE_MAX_SIZE_g3},
2757                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2758                         FLASH_IMAGE_MAX_SIZE_g3},
2759                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2760                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2761                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2762                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2763         };
2764         static const struct flash_comp gen2_flash_types[8] = {
2765                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2766                         FLASH_IMAGE_MAX_SIZE_g2},
2767                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2768                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2769                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2770                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2771                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2772                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2773                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2774                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2776                         FLASH_IMAGE_MAX_SIZE_g2},
2777                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2778                         FLASH_IMAGE_MAX_SIZE_g2},
2779                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2780                          FLASH_IMAGE_MAX_SIZE_g2}
2781         };
2782
2783         if (adapter->generation == BE_GEN3) {
2784                 pflashcomp = gen3_flash_types;
2785                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2786                 num_comp = ARRAY_SIZE(gen3_flash_types);
2787         } else {
2788                 pflashcomp = gen2_flash_types;
2789                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2790                 num_comp = ARRAY_SIZE(gen2_flash_types);
2791         }
2792         for (i = 0; i < num_comp; i++) {
2793                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2794                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2795                         continue;
2796                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2797                         if (!phy_flashing_required(adapter))
2798                                 continue;
2799                 }
2800                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2801                         (!be_flash_redboot(adapter, fw->data,
2802                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2803                         (num_of_images * sizeof(struct image_hdr)))))
2804                         continue;
2805                 p = fw->data;
2806                 p += filehdr_size + pflashcomp[i].offset
2807                         + (num_of_images * sizeof(struct image_hdr));
2808                 if (p + pflashcomp[i].size > fw->data + fw->size)
2809                         return -1;
2810                 total_bytes = pflashcomp[i].size;
2811                 while (total_bytes) {
2812                         if (total_bytes > 32*1024)
2813                                 num_bytes = 32*1024;
2814                         else
2815                                 num_bytes = total_bytes;
2816                         total_bytes -= num_bytes;
2817                         if (!total_bytes) {
2818                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2819                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2820                                 else
2821                                         flash_op = FLASHROM_OPER_FLASH;
2822                         } else {
2823                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2824                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2825                                 else
2826                                         flash_op = FLASHROM_OPER_SAVE;
2827                         }
2828                         memcpy(req->params.data_buf, p, num_bytes);
2829                         p += num_bytes;
2830                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2831                                 pflashcomp[i].optype, flash_op, num_bytes);
2832                         if (status) {
2833                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2834                                         (pflashcomp[i].optype ==
2835                                                 IMG_TYPE_PHY_FW))
2836                                         break;
2837                                 dev_err(&adapter->pdev->dev,
2838                                         "cmd to write to flash rom failed.\n");
2839                                 return -1;
2840                         }
2841                 }
2842         }
2843         return 0;
2844 }
2845
2846 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2847 {
2848         if (fhdr == NULL)
2849                 return 0;
2850         if (fhdr->build[0] == '3')
2851                 return BE_GEN3;
2852         else if (fhdr->build[0] == '2')
2853                 return BE_GEN2;
2854         else
2855                 return 0;
2856 }
2857
2858 static int lancer_fw_download(struct be_adapter *adapter,
2859                                 const struct firmware *fw)
2860 {
2861 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2862 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2863         struct be_dma_mem flash_cmd;
2864         const u8 *data_ptr = NULL;
2865         u8 *dest_image_ptr = NULL;
2866         size_t image_size = 0;
2867         u32 chunk_size = 0;
2868         u32 data_written = 0;
2869         u32 offset = 0;
2870         int status = 0;
2871         u8 add_status = 0;
2872
2873         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2874                 dev_err(&adapter->pdev->dev,
2875                         "FW Image not properly aligned. "
2876                         "Length must be 4 byte aligned.\n");
2877                 status = -EINVAL;
2878                 goto lancer_fw_exit;
2879         }
2880
2881         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2882                                 + LANCER_FW_DOWNLOAD_CHUNK;
2883         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2884                                                 &flash_cmd.dma, GFP_KERNEL);
2885         if (!flash_cmd.va) {
2886                 status = -ENOMEM;
2887                 dev_err(&adapter->pdev->dev,
2888                         "Memory allocation failure while flashing\n");
2889                 goto lancer_fw_exit;
2890         }
2891
2892         dest_image_ptr = flash_cmd.va +
2893                                 sizeof(struct lancer_cmd_req_write_object);
2894         image_size = fw->size;
2895         data_ptr = fw->data;
2896
2897         while (image_size) {
2898                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2899
2900                 /* Copy the image chunk content. */
2901                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2902
2903                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2904                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2905                                 &data_written, &add_status);
2906
2907                 if (status)
2908                         break;
2909
2910                 offset += data_written;
2911                 data_ptr += data_written;
2912                 image_size -= data_written;
2913         }
2914
2915         if (!status) {
2916                 /* Commit the FW written */
2917                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2918                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2919                                         &data_written, &add_status);
2920         }
2921
2922         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2923                                 flash_cmd.dma);
2924         if (status) {
2925                 dev_err(&adapter->pdev->dev,
2926                         "Firmware load error. "
2927                         "Status code: 0x%x Additional Status: 0x%x\n",
2928                         status, add_status);
2929                 goto lancer_fw_exit;
2930         }
2931
2932         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2933 lancer_fw_exit:
2934         return status;
2935 }
2936
2937 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2938 {
2939         struct flash_file_hdr_g2 *fhdr;
2940         struct flash_file_hdr_g3 *fhdr3;
2941         struct image_hdr *img_hdr_ptr = NULL;
2942         struct be_dma_mem flash_cmd;
2943         const u8 *p;
2944         int status = 0, i = 0, num_imgs = 0;
2945
2946         p = fw->data;
2947         fhdr = (struct flash_file_hdr_g2 *) p;
2948
2949         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2950         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2951                                           &flash_cmd.dma, GFP_KERNEL);
2952         if (!flash_cmd.va) {
2953                 status = -ENOMEM;
2954                 dev_err(&adapter->pdev->dev,
2955                         "Memory allocation failure while flashing\n");
2956                 goto be_fw_exit;
2957         }
2958
2959         if ((adapter->generation == BE_GEN3) &&
2960                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2961                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2962                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2963                 for (i = 0; i < num_imgs; i++) {
2964                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2965                                         (sizeof(struct flash_file_hdr_g3) +
2966                                          i * sizeof(struct image_hdr)));
2967                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2968                                 status = be_flash_data(adapter, fw, &flash_cmd,
2969                                                         num_imgs);
2970                 }
2971         } else if ((adapter->generation == BE_GEN2) &&
2972                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2973                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2974         } else {
2975                 dev_err(&adapter->pdev->dev,
2976                         "UFI and Interface are not compatible for flashing\n");
2977                 status = -1;
2978         }
2979
2980         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2981                           flash_cmd.dma);
2982         if (status) {
2983                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2984                 goto be_fw_exit;
2985         }
2986
2987         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2988
2989 be_fw_exit:
2990         return status;
2991 }
2992
2993 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2994 {
2995         const struct firmware *fw;
2996         int status;
2997
2998         if (!netif_running(adapter->netdev)) {
2999                 dev_err(&adapter->pdev->dev,
3000                         "Firmware load not allowed (interface is down)\n");
3001                 return -1;
3002         }
3003
3004         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3005         if (status)
3006                 goto fw_exit;
3007
3008         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3009
3010         if (lancer_chip(adapter))
3011                 status = lancer_fw_download(adapter, fw);
3012         else
3013                 status = be_fw_download(adapter, fw);
3014
3015 fw_exit:
3016         release_firmware(fw);
3017         return status;
3018 }
3019
3020 static struct net_device_ops be_netdev_ops = {
3021         .ndo_open               = be_open,
3022         .ndo_stop               = be_close,
3023         .ndo_start_xmit         = be_xmit,
3024         .ndo_set_rx_mode        = be_set_rx_mode,
3025         .ndo_set_mac_address    = be_mac_addr_set,
3026         .ndo_change_mtu         = be_change_mtu,
3027         .ndo_get_stats64        = be_get_stats64,
3028         .ndo_validate_addr      = eth_validate_addr,
3029         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3030         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3031         .ndo_set_vf_mac         = be_set_vf_mac,
3032         .ndo_set_vf_vlan        = be_set_vf_vlan,
3033         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3034         .ndo_get_vf_config      = be_get_vf_config,
3035 #ifdef CONFIG_NET_POLL_CONTROLLER
3036         .ndo_poll_controller    = be_netpoll,
3037 #endif
3038 };
3039
3040 static void be_netdev_init(struct net_device *netdev)
3041 {
3042         struct be_adapter *adapter = netdev_priv(netdev);
3043         struct be_rx_obj *rxo;
3044         int i;
3045
3046         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3047                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3048                 NETIF_F_HW_VLAN_TX;
3049         if (be_multi_rxq(adapter))
3050                 netdev->hw_features |= NETIF_F_RXHASH;
3051
3052         netdev->features |= netdev->hw_features |
3053                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3054
3055         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3056                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3057
3058         netdev->flags |= IFF_MULTICAST;
3059
3060         netif_set_gso_max_size(netdev, 65535);
3061
3062         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3063
3064         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3065
3066         for_all_rx_queues(adapter, rxo, i)
3067                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3068                                 BE_NAPI_WEIGHT);
3069
3070         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3071                 BE_NAPI_WEIGHT);
3072 }
3073
3074 static void be_unmap_pci_bars(struct be_adapter *adapter)
3075 {
3076         if (adapter->csr)
3077                 iounmap(adapter->csr);
3078         if (adapter->db)
3079                 iounmap(adapter->db);
3080 }
3081
3082 static int be_map_pci_bars(struct be_adapter *adapter)
3083 {
3084         u8 __iomem *addr;
3085         int db_reg;
3086
3087         if (lancer_chip(adapter)) {
3088                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3089                         pci_resource_len(adapter->pdev, 0));
3090                 if (addr == NULL)
3091                         return -ENOMEM;
3092                 adapter->db = addr;
3093                 return 0;
3094         }
3095
3096         if (be_physfn(adapter)) {
3097                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3098                                 pci_resource_len(adapter->pdev, 2));
3099                 if (addr == NULL)
3100                         return -ENOMEM;
3101                 adapter->csr = addr;
3102         }
3103
3104         if (adapter->generation == BE_GEN2) {
3105                 db_reg = 4;
3106         } else {
3107                 if (be_physfn(adapter))
3108                         db_reg = 4;
3109                 else
3110                         db_reg = 0;
3111         }
3112         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3113                                 pci_resource_len(adapter->pdev, db_reg));
3114         if (addr == NULL)
3115                 goto pci_map_err;
3116         adapter->db = addr;
3117
3118         return 0;
3119 pci_map_err:
3120         be_unmap_pci_bars(adapter);
3121         return -ENOMEM;
3122 }
3123
3124
3125 static void be_ctrl_cleanup(struct be_adapter *adapter)
3126 {
3127         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3128
3129         be_unmap_pci_bars(adapter);
3130
3131         if (mem->va)
3132                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3133                                   mem->dma);
3134
3135         mem = &adapter->rx_filter;
3136         if (mem->va)
3137                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3138                                   mem->dma);
3139 }
3140
3141 static int be_ctrl_init(struct be_adapter *adapter)
3142 {
3143         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3144         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3145         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3146         int status;
3147
3148         status = be_map_pci_bars(adapter);
3149         if (status)
3150                 goto done;
3151
3152         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3153         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3154                                                 mbox_mem_alloc->size,
3155                                                 &mbox_mem_alloc->dma,
3156                                                 GFP_KERNEL);
3157         if (!mbox_mem_alloc->va) {
3158                 status = -ENOMEM;
3159                 goto unmap_pci_bars;
3160         }
3161         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3162         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3163         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3164         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3165
3166         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3167         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3168                                         &rx_filter->dma, GFP_KERNEL);
3169         if (rx_filter->va == NULL) {
3170                 status = -ENOMEM;
3171                 goto free_mbox;
3172         }
3173         memset(rx_filter->va, 0, rx_filter->size);
3174
3175         mutex_init(&adapter->mbox_lock);
3176         spin_lock_init(&adapter->mcc_lock);
3177         spin_lock_init(&adapter->mcc_cq_lock);
3178
3179         init_completion(&adapter->flash_compl);
3180         pci_save_state(adapter->pdev);
3181         return 0;
3182
3183 free_mbox:
3184         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3185                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3186
3187 unmap_pci_bars:
3188         be_unmap_pci_bars(adapter);
3189
3190 done:
3191         return status;
3192 }
3193
3194 static void be_stats_cleanup(struct be_adapter *adapter)
3195 {
3196         struct be_dma_mem *cmd = &adapter->stats_cmd;
3197
3198         if (cmd->va)
3199                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3200                                   cmd->va, cmd->dma);
3201 }
3202
3203 static int be_stats_init(struct be_adapter *adapter)
3204 {
3205         struct be_dma_mem *cmd = &adapter->stats_cmd;
3206
3207         if (adapter->generation == BE_GEN2) {
3208                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3209         } else {
3210                 if (lancer_chip(adapter))
3211                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3212                 else
3213                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3214         }
3215         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3216                                      GFP_KERNEL);
3217         if (cmd->va == NULL)
3218                 return -1;
3219         memset(cmd->va, 0, cmd->size);
3220         return 0;
3221 }
3222
3223 static void __devexit be_remove(struct pci_dev *pdev)
3224 {
3225         struct be_adapter *adapter = pci_get_drvdata(pdev);
3226
3227         if (!adapter)
3228                 return;
3229
3230         cancel_delayed_work_sync(&adapter->work);
3231
3232         unregister_netdev(adapter->netdev);
3233
3234         be_clear(adapter);
3235
3236         be_stats_cleanup(adapter);
3237
3238         be_ctrl_cleanup(adapter);
3239
3240         be_sriov_disable(adapter);
3241
3242         be_msix_disable(adapter);
3243
3244         pci_set_drvdata(pdev, NULL);
3245         pci_release_regions(pdev);
3246         pci_disable_device(pdev);
3247
3248         free_netdev(adapter->netdev);
3249 }
3250
3251 static int be_get_config(struct be_adapter *adapter)
3252 {
3253         int status;
3254
3255         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3256                         &adapter->function_mode, &adapter->function_caps);
3257         if (status)
3258                 return status;
3259
3260         if (adapter->function_mode & FLEX10_MODE)
3261                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3262         else
3263                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3264
3265         status = be_cmd_get_cntl_attributes(adapter);
3266         if (status)
3267                 return status;
3268
3269         return 0;
3270 }
3271
3272 static int be_dev_family_check(struct be_adapter *adapter)
3273 {
3274         struct pci_dev *pdev = adapter->pdev;
3275         u32 sli_intf = 0, if_type;
3276
3277         switch (pdev->device) {
3278         case BE_DEVICE_ID1:
3279         case OC_DEVICE_ID1:
3280                 adapter->generation = BE_GEN2;
3281                 break;
3282         case BE_DEVICE_ID2:
3283         case OC_DEVICE_ID2:
3284                 adapter->generation = BE_GEN3;
3285                 break;
3286         case OC_DEVICE_ID3:
3287         case OC_DEVICE_ID4:
3288                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3289                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3290                                                 SLI_INTF_IF_TYPE_SHIFT;
3291
3292                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3293                         if_type != 0x02) {
3294                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3295                         return -EINVAL;
3296                 }
3297                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3298                                          SLI_INTF_FAMILY_SHIFT);
3299                 adapter->generation = BE_GEN3;
3300                 break;
3301         default:
3302                 adapter->generation = 0;
3303         }
3304         return 0;
3305 }
3306
3307 static int lancer_wait_ready(struct be_adapter *adapter)
3308 {
3309 #define SLIPORT_READY_TIMEOUT 30
3310         u32 sliport_status;
3311         int status = 0, i;
3312
3313         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3314                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3315                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3316                         break;
3317
3318                 msleep(1000);
3319         }
3320
3321         if (i == SLIPORT_READY_TIMEOUT)
3322                 status = -1;
3323
3324         return status;
3325 }
3326
3327 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3328 {
3329         int status;
3330         u32 sliport_status, err, reset_needed;
3331         status = lancer_wait_ready(adapter);
3332         if (!status) {
3333                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3334                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3335                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3336                 if (err && reset_needed) {
3337                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3338                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3339
3340                         /* check adapter has corrected the error */
3341                         status = lancer_wait_ready(adapter);
3342                         sliport_status = ioread32(adapter->db +
3343                                                         SLIPORT_STATUS_OFFSET);
3344                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3345                                                 SLIPORT_STATUS_RN_MASK);
3346                         if (status || sliport_status)
3347                                 status = -1;
3348                 } else if (err || reset_needed) {
3349                         status = -1;
3350                 }
3351         }
3352         return status;
3353 }
3354
3355 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3356 {
3357         int status;
3358         u32 sliport_status;
3359
3360         if (adapter->eeh_err || adapter->ue_detected)
3361                 return;
3362
3363         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3364
3365         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3366                 dev_err(&adapter->pdev->dev,
3367                                 "Adapter in error state."
3368                                 "Trying to recover.\n");
3369
3370                 status = lancer_test_and_set_rdy_state(adapter);
3371                 if (status)
3372                         goto err;
3373
3374                 netif_device_detach(adapter->netdev);
3375
3376                 if (netif_running(adapter->netdev))
3377                         be_close(adapter->netdev);
3378
3379                 be_clear(adapter);
3380
3381                 adapter->fw_timeout = false;
3382
3383                 status = be_setup(adapter);
3384                 if (status)
3385                         goto err;
3386
3387                 if (netif_running(adapter->netdev)) {
3388                         status = be_open(adapter->netdev);
3389                         if (status)
3390                                 goto err;
3391                 }
3392
3393                 netif_device_attach(adapter->netdev);
3394
3395                 dev_err(&adapter->pdev->dev,
3396                                 "Adapter error recovery succeeded\n");
3397         }
3398         return;
3399 err:
3400         dev_err(&adapter->pdev->dev,
3401                         "Adapter error recovery failed\n");
3402 }
3403
3404 static void be_worker(struct work_struct *work)
3405 {
3406         struct be_adapter *adapter =
3407                 container_of(work, struct be_adapter, work.work);
3408         struct be_rx_obj *rxo;
3409         int i;
3410
3411         if (lancer_chip(adapter))
3412                 lancer_test_and_recover_fn_err(adapter);
3413
3414         be_detect_dump_ue(adapter);
3415
3416         /* when interrupts are not yet enabled, just reap any pending
3417         * mcc completions */
3418         if (!netif_running(adapter->netdev)) {
3419                 int mcc_compl, status = 0;
3420
3421                 mcc_compl = be_process_mcc(adapter, &status);
3422
3423                 if (mcc_compl) {
3424                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3425                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3426                 }
3427
3428                 goto reschedule;
3429         }
3430
3431         if (!adapter->stats_cmd_sent) {
3432                 if (lancer_chip(adapter))
3433                         lancer_cmd_get_pport_stats(adapter,
3434                                                 &adapter->stats_cmd);
3435                 else
3436                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3437         }
3438
3439         for_all_rx_queues(adapter, rxo, i) {
3440                 be_rx_eqd_update(adapter, rxo);
3441
3442                 if (rxo->rx_post_starved) {
3443                         rxo->rx_post_starved = false;
3444                         be_post_rx_frags(rxo, GFP_KERNEL);
3445                 }
3446         }
3447
3448 reschedule:
3449         adapter->work_counter++;
3450         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3451 }
3452
3453 static int __devinit be_probe(struct pci_dev *pdev,
3454                         const struct pci_device_id *pdev_id)
3455 {
3456         int status = 0;
3457         struct be_adapter *adapter;
3458         struct net_device *netdev;
3459
3460         status = pci_enable_device(pdev);
3461         if (status)
3462                 goto do_none;
3463
3464         status = pci_request_regions(pdev, DRV_NAME);
3465         if (status)
3466                 goto disable_dev;
3467         pci_set_master(pdev);
3468
3469         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3470         if (netdev == NULL) {
3471                 status = -ENOMEM;
3472                 goto rel_reg;
3473         }
3474         adapter = netdev_priv(netdev);
3475         adapter->pdev = pdev;
3476         pci_set_drvdata(pdev, adapter);
3477
3478         status = be_dev_family_check(adapter);
3479         if (status)
3480                 goto free_netdev;
3481
3482         adapter->netdev = netdev;
3483         SET_NETDEV_DEV(netdev, &pdev->dev);
3484
3485         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3486         if (!status) {
3487                 netdev->features |= NETIF_F_HIGHDMA;
3488         } else {
3489                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3490                 if (status) {
3491                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3492                         goto free_netdev;
3493                 }
3494         }
3495
3496         status = be_sriov_enable(adapter);
3497         if (status)
3498                 goto free_netdev;
3499
3500         status = be_ctrl_init(adapter);
3501         if (status)
3502                 goto disable_sriov;
3503
3504         if (lancer_chip(adapter)) {
3505                 status = lancer_wait_ready(adapter);
3506                 if (!status) {
3507                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3508                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3509                         status = lancer_test_and_set_rdy_state(adapter);
3510                 }
3511                 if (status) {
3512                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3513                         goto ctrl_clean;
3514                 }
3515         }
3516
3517         /* sync up with fw's ready state */
3518         if (be_physfn(adapter)) {
3519                 status = be_cmd_POST(adapter);
3520                 if (status)
3521                         goto ctrl_clean;
3522         }
3523
3524         /* tell fw we're ready to fire cmds */
3525         status = be_cmd_fw_init(adapter);
3526         if (status)
3527                 goto ctrl_clean;
3528
3529         status = be_cmd_reset_function(adapter);
3530         if (status)
3531                 goto ctrl_clean;
3532
3533         status = be_stats_init(adapter);
3534         if (status)
3535                 goto ctrl_clean;
3536
3537         status = be_get_config(adapter);
3538         if (status)
3539                 goto stats_clean;
3540
3541         /* The INTR bit may be set in the card when probed by a kdump kernel
3542          * after a crash.
3543          */
3544         if (!lancer_chip(adapter))
3545                 be_intr_set(adapter, false);
3546
3547         be_msix_enable(adapter);
3548
3549         INIT_DELAYED_WORK(&adapter->work, be_worker);
3550         adapter->rx_fc = adapter->tx_fc = true;
3551
3552         status = be_setup(adapter);
3553         if (status)
3554                 goto msix_disable;
3555
3556         be_netdev_init(netdev);
3557         status = register_netdev(netdev);
3558         if (status != 0)
3559                 goto unsetup;
3560
3561         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3562
3563         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3564         return 0;
3565
3566 unsetup:
3567         be_clear(adapter);
3568 msix_disable:
3569         be_msix_disable(adapter);
3570 stats_clean:
3571         be_stats_cleanup(adapter);
3572 ctrl_clean:
3573         be_ctrl_cleanup(adapter);
3574 disable_sriov:
3575         be_sriov_disable(adapter);
3576 free_netdev:
3577         free_netdev(netdev);
3578         pci_set_drvdata(pdev, NULL);
3579 rel_reg:
3580         pci_release_regions(pdev);
3581 disable_dev:
3582         pci_disable_device(pdev);
3583 do_none:
3584         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3585         return status;
3586 }
3587
3588 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3589 {
3590         struct be_adapter *adapter = pci_get_drvdata(pdev);
3591         struct net_device *netdev =  adapter->netdev;
3592
3593         cancel_delayed_work_sync(&adapter->work);
3594         if (adapter->wol)
3595                 be_setup_wol(adapter, true);
3596
3597         netif_device_detach(netdev);
3598         if (netif_running(netdev)) {
3599                 rtnl_lock();
3600                 be_close(netdev);
3601                 rtnl_unlock();
3602         }
3603         be_clear(adapter);
3604
3605         be_msix_disable(adapter);
3606         pci_save_state(pdev);
3607         pci_disable_device(pdev);
3608         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3609         return 0;
3610 }
3611
3612 static int be_resume(struct pci_dev *pdev)
3613 {
3614         int status = 0;
3615         struct be_adapter *adapter = pci_get_drvdata(pdev);
3616         struct net_device *netdev =  adapter->netdev;
3617
3618         netif_device_detach(netdev);
3619
3620         status = pci_enable_device(pdev);
3621         if (status)
3622                 return status;
3623
3624         pci_set_power_state(pdev, 0);
3625         pci_restore_state(pdev);
3626
3627         be_msix_enable(adapter);
3628         /* tell fw we're ready to fire cmds */
3629         status = be_cmd_fw_init(adapter);
3630         if (status)
3631                 return status;
3632
3633         be_setup(adapter);
3634         if (netif_running(netdev)) {
3635                 rtnl_lock();
3636                 be_open(netdev);
3637                 rtnl_unlock();
3638         }
3639         netif_device_attach(netdev);
3640
3641         if (adapter->wol)
3642                 be_setup_wol(adapter, false);
3643
3644         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3645         return 0;
3646 }
3647
3648 /*
3649  * An FLR will stop BE from DMAing any data.
3650  */
3651 static void be_shutdown(struct pci_dev *pdev)
3652 {
3653         struct be_adapter *adapter = pci_get_drvdata(pdev);
3654
3655         if (!adapter)
3656                 return;
3657
3658         cancel_delayed_work_sync(&adapter->work);
3659
3660         netif_device_detach(adapter->netdev);
3661
3662         if (adapter->wol)
3663                 be_setup_wol(adapter, true);
3664
3665         be_cmd_reset_function(adapter);
3666
3667         pci_disable_device(pdev);
3668 }
3669
3670 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3671                                 pci_channel_state_t state)
3672 {
3673         struct be_adapter *adapter = pci_get_drvdata(pdev);
3674         struct net_device *netdev =  adapter->netdev;
3675
3676         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3677
3678         adapter->eeh_err = true;
3679
3680         netif_device_detach(netdev);
3681
3682         if (netif_running(netdev)) {
3683                 rtnl_lock();
3684                 be_close(netdev);
3685                 rtnl_unlock();
3686         }
3687         be_clear(adapter);
3688
3689         if (state == pci_channel_io_perm_failure)
3690                 return PCI_ERS_RESULT_DISCONNECT;
3691
3692         pci_disable_device(pdev);
3693
3694         return PCI_ERS_RESULT_NEED_RESET;
3695 }
3696
3697 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3698 {
3699         struct be_adapter *adapter = pci_get_drvdata(pdev);
3700         int status;
3701
3702         dev_info(&adapter->pdev->dev, "EEH reset\n");
3703         adapter->eeh_err = false;
3704         adapter->ue_detected = false;
3705         adapter->fw_timeout = false;
3706
3707         status = pci_enable_device(pdev);
3708         if (status)
3709                 return PCI_ERS_RESULT_DISCONNECT;
3710
3711         pci_set_master(pdev);
3712         pci_set_power_state(pdev, 0);
3713         pci_restore_state(pdev);
3714
3715         /* Check if card is ok and fw is ready */
3716         status = be_cmd_POST(adapter);
3717         if (status)
3718                 return PCI_ERS_RESULT_DISCONNECT;
3719
3720         return PCI_ERS_RESULT_RECOVERED;
3721 }
3722
3723 static void be_eeh_resume(struct pci_dev *pdev)
3724 {
3725         int status = 0;
3726         struct be_adapter *adapter = pci_get_drvdata(pdev);
3727         struct net_device *netdev =  adapter->netdev;
3728
3729         dev_info(&adapter->pdev->dev, "EEH resume\n");
3730
3731         pci_save_state(pdev);
3732
3733         /* tell fw we're ready to fire cmds */
3734         status = be_cmd_fw_init(adapter);
3735         if (status)
3736                 goto err;
3737
3738         status = be_setup(adapter);
3739         if (status)
3740                 goto err;
3741
3742         if (netif_running(netdev)) {
3743                 status = be_open(netdev);
3744                 if (status)
3745                         goto err;
3746         }
3747         netif_device_attach(netdev);
3748         return;
3749 err:
3750         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3751 }
3752
3753 static struct pci_error_handlers be_eeh_handlers = {
3754         .error_detected = be_eeh_err_detected,
3755         .slot_reset = be_eeh_reset,
3756         .resume = be_eeh_resume,
3757 };
3758
3759 static struct pci_driver be_driver = {
3760         .name = DRV_NAME,
3761         .id_table = be_dev_ids,
3762         .probe = be_probe,
3763         .remove = be_remove,
3764         .suspend = be_suspend,
3765         .resume = be_resume,
3766         .shutdown = be_shutdown,
3767         .err_handler = &be_eeh_handlers
3768 };
3769
3770 static int __init be_init_module(void)
3771 {
3772         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3773             rx_frag_size != 2048) {
3774                 printk(KERN_WARNING DRV_NAME
3775                         " : Module param rx_frag_size must be 2048/4096/8192."
3776                         " Using 2048\n");
3777                 rx_frag_size = 2048;
3778         }
3779
3780         return pci_register_driver(&be_driver);
3781 }
3782 module_init(be_init_module);
3783
3784 static void __exit be_exit_module(void)
3785 {
3786         pci_unregister_driver(&be_driver);
3787 }
3788 module_exit(be_exit_module);