be2net: Program secondary UC MAC address into MAC filter
[linux-2.6.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va) {
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133                 mem->va = NULL;
134         }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138                 u16 len, u16 entry_size)
139 {
140         struct be_dma_mem *mem = &q->dma_mem;
141
142         memset(q, 0, sizeof(*q));
143         q->len = len;
144         q->entry_size = entry_size;
145         mem->size = len * entry_size;
146         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147                                      GFP_KERNEL);
148         if (!mem->va)
149                 return -ENOMEM;
150         memset(mem->va, 0, mem->size);
151         return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156         u32 reg, enabled;
157
158         if (adapter->eeh_err)
159                 return;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_RQ_RING_ID_MASK;
180         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_TXULP_RING_ID_MASK;
190         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197                 bool arm, bool clear_int, u16 num_popped)
198 {
199         u32 val = 0;
200         val |= qid & DB_EQ_RING_ID_MASK;
201         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204         if (adapter->eeh_err)
205                 return;
206
207         if (arm)
208                 val |= 1 << DB_EQ_REARM_SHIFT;
209         if (clear_int)
210                 val |= 1 << DB_EQ_CLR_SHIFT;
211         val |= 1 << DB_EQ_EVNT_SHIFT;
212         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213         iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218         u32 val = 0;
219         val |= qid & DB_CQ_RING_ID_MASK;
220         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223         if (adapter->eeh_err)
224                 return;
225
226         if (arm)
227                 val |= 1 << DB_CQ_REARM_SHIFT;
228         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234         struct be_adapter *adapter = netdev_priv(netdev);
235         struct sockaddr *addr = p;
236         int status = 0;
237         u8 current_mac[ETH_ALEN];
238         u32 pmac_id = adapter->pmac_id[0];
239
240         if (!is_valid_ether_addr(addr->sa_data))
241                 return -EADDRNOTAVAIL;
242
243         status = be_cmd_mac_addr_query(adapter, current_mac,
244                                 MAC_ADDRESS_TYPE_NETWORK, false,
245                                 adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
425         for_all_rx_queues(adapter, rxo, i) {
426                 /* below erx HW counter can actually wrap around after
427                  * 65535. Driver accumulates a 32-bit value
428                  */
429                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431         }
432 }
433
434 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435                                         struct rtnl_link_stats64 *stats)
436 {
437         struct be_adapter *adapter = netdev_priv(netdev);
438         struct be_drv_stats *drvs = &adapter->drv_stats;
439         struct be_rx_obj *rxo;
440         struct be_tx_obj *txo;
441         u64 pkts, bytes;
442         unsigned int start;
443         int i;
444
445         for_all_rx_queues(adapter, rxo, i) {
446                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447                 do {
448                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449                         pkts = rx_stats(rxo)->rx_pkts;
450                         bytes = rx_stats(rxo)->rx_bytes;
451                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452                 stats->rx_packets += pkts;
453                 stats->rx_bytes += bytes;
454                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456                                         rx_stats(rxo)->rx_drops_no_frags;
457         }
458
459         for_all_tx_queues(adapter, txo, i) {
460                 const struct be_tx_stats *tx_stats = tx_stats(txo);
461                 do {
462                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463                         pkts = tx_stats(txo)->tx_pkts;
464                         bytes = tx_stats(txo)->tx_bytes;
465                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466                 stats->tx_packets += pkts;
467                 stats->tx_bytes += bytes;
468         }
469
470         /* bad pkts received */
471         stats->rx_errors = drvs->rx_crc_errors +
472                 drvs->rx_alignment_symbol_errors +
473                 drvs->rx_in_range_errors +
474                 drvs->rx_out_range_errors +
475                 drvs->rx_frame_too_long +
476                 drvs->rx_dropped_too_small +
477                 drvs->rx_dropped_too_short +
478                 drvs->rx_dropped_header_too_small +
479                 drvs->rx_dropped_tcp_length +
480                 drvs->rx_dropped_runt;
481
482         /* detailed rx errors */
483         stats->rx_length_errors = drvs->rx_in_range_errors +
484                 drvs->rx_out_range_errors +
485                 drvs->rx_frame_too_long;
486
487         stats->rx_crc_errors = drvs->rx_crc_errors;
488
489         /* frame alignment errors */
490         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
491
492         /* receiver fifo overrun */
493         /* drops_no_pbuf is no per i/f, it's per BE card */
494         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
495                                 drvs->rx_input_fifo_overflow_drop +
496                                 drvs->rx_drops_no_pbuf;
497         return stats;
498 }
499
500 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
501 {
502         struct net_device *netdev = adapter->netdev;
503
504         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
505                 netif_carrier_off(netdev);
506                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
507         }
508
509         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510                 netif_carrier_on(netdev);
511         else
512                 netif_carrier_off(netdev);
513 }
514
515 static void be_tx_stats_update(struct be_tx_obj *txo,
516                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
517 {
518         struct be_tx_stats *stats = tx_stats(txo);
519
520         u64_stats_update_begin(&stats->sync);
521         stats->tx_reqs++;
522         stats->tx_wrbs += wrb_cnt;
523         stats->tx_bytes += copied;
524         stats->tx_pkts += (gso_segs ? gso_segs : 1);
525         if (stopped)
526                 stats->tx_stops++;
527         u64_stats_update_end(&stats->sync);
528 }
529
530 /* Determine number of WRB entries needed to xmit data in an skb */
531 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532                                                                 bool *dummy)
533 {
534         int cnt = (skb->len > skb->data_len);
535
536         cnt += skb_shinfo(skb)->nr_frags;
537
538         /* to account for hdr wrb */
539         cnt++;
540         if (lancer_chip(adapter) || !(cnt & 1)) {
541                 *dummy = false;
542         } else {
543                 /* add a dummy to make it an even num */
544                 cnt++;
545                 *dummy = true;
546         }
547         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548         return cnt;
549 }
550
551 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552 {
553         wrb->frag_pa_hi = upper_32_bits(addr);
554         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556 }
557
558 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559                                         struct sk_buff *skb)
560 {
561         u8 vlan_prio;
562         u16 vlan_tag;
563
564         vlan_tag = vlan_tx_tag_get(skb);
565         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566         /* If vlan priority provided by OS is NOT in available bmap */
567         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569                                 adapter->recommended_prio;
570
571         return vlan_tag;
572 }
573
574 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
576 {
577         u16 vlan_tag;
578
579         memset(hdr, 0, sizeof(*hdr));
580
581         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
583         if (skb_is_gso(skb)) {
584                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586                         hdr, skb_shinfo(skb)->gso_size);
587                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
588                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
589                 if (lancer_chip(adapter) && adapter->sli_family  ==
590                                                         LANCER_A0_SLI_FAMILY) {
591                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592                         if (is_tcp_pkt(skb))
593                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594                                                                 tcpcs, hdr, 1);
595                         else if (is_udp_pkt(skb))
596                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597                                                                 udpcs, hdr, 1);
598                 }
599         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600                 if (is_tcp_pkt(skb))
601                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602                 else if (is_udp_pkt(skb))
603                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604         }
605
606         if (vlan_tx_tag_present(skb)) {
607                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
608                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
609                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
610         }
611
612         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616 }
617
618 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
619                 bool unmap_single)
620 {
621         dma_addr_t dma;
622
623         be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
626         if (wrb->frag_len) {
627                 if (unmap_single)
628                         dma_unmap_single(dev, dma, wrb->frag_len,
629                                          DMA_TO_DEVICE);
630                 else
631                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
632         }
633 }
634
635 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
636                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637 {
638         dma_addr_t busaddr;
639         int i, copied = 0;
640         struct device *dev = &adapter->pdev->dev;
641         struct sk_buff *first_skb = skb;
642         struct be_eth_wrb *wrb;
643         struct be_eth_hdr_wrb *hdr;
644         bool map_single = false;
645         u16 map_head;
646
647         hdr = queue_head_node(txq);
648         queue_head_inc(txq);
649         map_head = txq->head;
650
651         if (skb->len > skb->data_len) {
652                 int len = skb_headlen(skb);
653                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654                 if (dma_mapping_error(dev, busaddr))
655                         goto dma_err;
656                 map_single = true;
657                 wrb = queue_head_node(txq);
658                 wrb_fill(wrb, busaddr, len);
659                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660                 queue_head_inc(txq);
661                 copied += len;
662         }
663
664         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
665                 const struct skb_frag_struct *frag =
666                         &skb_shinfo(skb)->frags[i];
667                 busaddr = skb_frag_dma_map(dev, frag, 0,
668                                            skb_frag_size(frag), DMA_TO_DEVICE);
669                 if (dma_mapping_error(dev, busaddr))
670                         goto dma_err;
671                 wrb = queue_head_node(txq);
672                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
673                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674                 queue_head_inc(txq);
675                 copied += skb_frag_size(frag);
676         }
677
678         if (dummy_wrb) {
679                 wrb = queue_head_node(txq);
680                 wrb_fill(wrb, 0, 0);
681                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682                 queue_head_inc(txq);
683         }
684
685         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
686         be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688         return copied;
689 dma_err:
690         txq->head = map_head;
691         while (copied) {
692                 wrb = queue_head_node(txq);
693                 unmap_tx_frag(dev, wrb, map_single);
694                 map_single = false;
695                 copied -= wrb->frag_len;
696                 queue_head_inc(txq);
697         }
698         return 0;
699 }
700
701 static netdev_tx_t be_xmit(struct sk_buff *skb,
702                         struct net_device *netdev)
703 {
704         struct be_adapter *adapter = netdev_priv(netdev);
705         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706         struct be_queue_info *txq = &txo->q;
707         u32 wrb_cnt = 0, copied = 0;
708         u32 start = txq->head;
709         bool dummy_wrb, stopped = false;
710
711         /* For vlan tagged pkts, BE
712          * 1) calculates checksum even when CSO is not requested
713          * 2) calculates checksum wrongly for padded pkt less than
714          * 60 bytes long.
715          * As a workaround disable TX vlan offloading in such cases.
716          */
717         if (unlikely(vlan_tx_tag_present(skb) &&
718                      (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719                 skb = skb_share_check(skb, GFP_ATOMIC);
720                 if (unlikely(!skb))
721                         goto tx_drop;
722
723                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724                 if (unlikely(!skb))
725                         goto tx_drop;
726
727                 skb->vlan_tci = 0;
728         }
729
730         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
731
732         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
733         if (copied) {
734                 /* record the sent skb in the sent_skb table */
735                 BUG_ON(txo->sent_skb_list[start]);
736                 txo->sent_skb_list[start] = skb;
737
738                 /* Ensure txq has space for the next skb; Else stop the queue
739                  * *BEFORE* ringing the tx doorbell, so that we serialze the
740                  * tx compls of the current transmit which'll wake up the queue
741                  */
742                 atomic_add(wrb_cnt, &txq->used);
743                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744                                                                 txq->len) {
745                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
746                         stopped = true;
747                 }
748
749                 be_txq_notify(adapter, txq->id, wrb_cnt);
750
751                 be_tx_stats_update(txo, wrb_cnt, copied,
752                                 skb_shinfo(skb)->gso_segs, stopped);
753         } else {
754                 txq->head = start;
755                 dev_kfree_skb_any(skb);
756         }
757 tx_drop:
758         return NETDEV_TX_OK;
759 }
760
761 static int be_change_mtu(struct net_device *netdev, int new_mtu)
762 {
763         struct be_adapter *adapter = netdev_priv(netdev);
764         if (new_mtu < BE_MIN_MTU ||
765                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766                                         (ETH_HLEN + ETH_FCS_LEN))) {
767                 dev_info(&adapter->pdev->dev,
768                         "MTU must be between %d and %d bytes\n",
769                         BE_MIN_MTU,
770                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
771                 return -EINVAL;
772         }
773         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774                         netdev->mtu, new_mtu);
775         netdev->mtu = new_mtu;
776         return 0;
777 }
778
779 /*
780  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781  * If the user configures more, place BE in vlan promiscuous mode.
782  */
783 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
784 {
785         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
786         u16 vtag[BE_NUM_VLANS_SUPPORTED];
787         u16 ntags = 0, i;
788         int status = 0;
789
790         if (vf) {
791                 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792                 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793                                             1, 1, 0);
794         }
795
796         /* No need to further configure vids if in promiscuous mode */
797         if (adapter->promiscuous)
798                 return 0;
799
800         if (adapter->vlans_added <= adapter->max_vlans)  {
801                 /* Construct VLAN Table to give to HW */
802                 for (i = 0; i < VLAN_N_VID; i++) {
803                         if (adapter->vlan_tag[i]) {
804                                 vtag[ntags] = cpu_to_le16(i);
805                                 ntags++;
806                         }
807                 }
808                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809                                         vtag, ntags, 1, 0);
810         } else {
811                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
812                                         NULL, 0, 1, 1);
813         }
814
815         return status;
816 }
817
818 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
819 {
820         struct be_adapter *adapter = netdev_priv(netdev);
821         int status = 0;
822
823         if (!be_physfn(adapter)) {
824                 status = -EINVAL;
825                 goto ret;
826         }
827
828         adapter->vlan_tag[vid] = 1;
829         if (adapter->vlans_added <= (adapter->max_vlans + 1))
830                 status = be_vid_config(adapter, false, 0);
831
832         if (!status)
833                 adapter->vlans_added++;
834         else
835                 adapter->vlan_tag[vid] = 0;
836 ret:
837         return status;
838 }
839
840 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
841 {
842         struct be_adapter *adapter = netdev_priv(netdev);
843         int status = 0;
844
845         if (!be_physfn(adapter)) {
846                 status = -EINVAL;
847                 goto ret;
848         }
849
850         adapter->vlan_tag[vid] = 0;
851         if (adapter->vlans_added <= adapter->max_vlans)
852                 status = be_vid_config(adapter, false, 0);
853
854         if (!status)
855                 adapter->vlans_added--;
856         else
857                 adapter->vlan_tag[vid] = 1;
858 ret:
859         return status;
860 }
861
862 static void be_set_rx_mode(struct net_device *netdev)
863 {
864         struct be_adapter *adapter = netdev_priv(netdev);
865
866         if (netdev->flags & IFF_PROMISC) {
867                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
868                 adapter->promiscuous = true;
869                 goto done;
870         }
871
872         /* BE was previously in promiscuous mode; disable it */
873         if (adapter->promiscuous) {
874                 adapter->promiscuous = false;
875                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
876
877                 if (adapter->vlans_added)
878                         be_vid_config(adapter, false, 0);
879         }
880
881         /* Enable multicast promisc if num configured exceeds what we support */
882         if (netdev->flags & IFF_ALLMULTI ||
883                         netdev_mc_count(netdev) > BE_MAX_MC) {
884                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
885                 goto done;
886         }
887
888         if (netdev_uc_count(netdev) != adapter->uc_macs) {
889                 struct netdev_hw_addr *ha;
890                 int i = 1; /* First slot is claimed by the Primary MAC */
891
892                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
893                         be_cmd_pmac_del(adapter, adapter->if_handle,
894                                         adapter->pmac_id[i], 0);
895                 }
896
897                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
898                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
899                         adapter->promiscuous = true;
900                         goto done;
901                 }
902
903                 netdev_for_each_uc_addr(ha, adapter->netdev) {
904                         adapter->uc_macs++; /* First slot is for Primary MAC */
905                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
906                                         adapter->if_handle,
907                                         &adapter->pmac_id[adapter->uc_macs], 0);
908                 }
909         }
910
911         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
912 done:
913         return;
914 }
915
916 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
917 {
918         struct be_adapter *adapter = netdev_priv(netdev);
919         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
920         int status;
921
922         if (!sriov_enabled(adapter))
923                 return -EPERM;
924
925         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
926                 return -EINVAL;
927
928         if (lancer_chip(adapter)) {
929                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
930         } else {
931                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
932                                          vf_cfg->pmac_id, vf + 1);
933
934                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
935                                          &vf_cfg->pmac_id, vf + 1);
936         }
937
938         if (status)
939                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
940                                 mac, vf);
941         else
942                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
943
944         return status;
945 }
946
947 static int be_get_vf_config(struct net_device *netdev, int vf,
948                         struct ifla_vf_info *vi)
949 {
950         struct be_adapter *adapter = netdev_priv(netdev);
951         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
952
953         if (!sriov_enabled(adapter))
954                 return -EPERM;
955
956         if (vf >= adapter->num_vfs)
957                 return -EINVAL;
958
959         vi->vf = vf;
960         vi->tx_rate = vf_cfg->tx_rate;
961         vi->vlan = vf_cfg->vlan_tag;
962         vi->qos = 0;
963         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
964
965         return 0;
966 }
967
968 static int be_set_vf_vlan(struct net_device *netdev,
969                         int vf, u16 vlan, u8 qos)
970 {
971         struct be_adapter *adapter = netdev_priv(netdev);
972         int status = 0;
973
974         if (!sriov_enabled(adapter))
975                 return -EPERM;
976
977         if (vf >= adapter->num_vfs || vlan > 4095)
978                 return -EINVAL;
979
980         if (vlan) {
981                 adapter->vf_cfg[vf].vlan_tag = vlan;
982                 adapter->vlans_added++;
983         } else {
984                 adapter->vf_cfg[vf].vlan_tag = 0;
985                 adapter->vlans_added--;
986         }
987
988         status = be_vid_config(adapter, true, vf);
989
990         if (status)
991                 dev_info(&adapter->pdev->dev,
992                                 "VLAN %d config on VF %d failed\n", vlan, vf);
993         return status;
994 }
995
996 static int be_set_vf_tx_rate(struct net_device *netdev,
997                         int vf, int rate)
998 {
999         struct be_adapter *adapter = netdev_priv(netdev);
1000         int status = 0;
1001
1002         if (!sriov_enabled(adapter))
1003                 return -EPERM;
1004
1005         if (vf >= adapter->num_vfs)
1006                 return -EINVAL;
1007
1008         if (rate < 100 || rate > 10000) {
1009                 dev_err(&adapter->pdev->dev,
1010                         "tx rate must be between 100 and 10000 Mbps\n");
1011                 return -EINVAL;
1012         }
1013
1014         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1015
1016         if (status)
1017                 dev_err(&adapter->pdev->dev,
1018                                 "tx rate %d on VF %d failed\n", rate, vf);
1019         else
1020                 adapter->vf_cfg[vf].tx_rate = rate;
1021         return status;
1022 }
1023
1024 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1025 {
1026         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1027         ulong now = jiffies;
1028         ulong delta = now - stats->rx_jiffies;
1029         u64 pkts;
1030         unsigned int start, eqd;
1031
1032         if (!eqo->enable_aic) {
1033                 eqd = eqo->eqd;
1034                 goto modify_eqd;
1035         }
1036
1037         if (eqo->idx >= adapter->num_rx_qs)
1038                 return;
1039
1040         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1041
1042         /* Wrapped around */
1043         if (time_before(now, stats->rx_jiffies)) {
1044                 stats->rx_jiffies = now;
1045                 return;
1046         }
1047
1048         /* Update once a second */
1049         if (delta < HZ)
1050                 return;
1051
1052         do {
1053                 start = u64_stats_fetch_begin_bh(&stats->sync);
1054                 pkts = stats->rx_pkts;
1055         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1056
1057         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1058         stats->rx_pkts_prev = pkts;
1059         stats->rx_jiffies = now;
1060         eqd = (stats->rx_pps / 110000) << 3;
1061         eqd = min(eqd, eqo->max_eqd);
1062         eqd = max(eqd, eqo->min_eqd);
1063         if (eqd < 10)
1064                 eqd = 0;
1065
1066 modify_eqd:
1067         if (eqd != eqo->cur_eqd) {
1068                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1069                 eqo->cur_eqd = eqd;
1070         }
1071 }
1072
1073 static void be_rx_stats_update(struct be_rx_obj *rxo,
1074                 struct be_rx_compl_info *rxcp)
1075 {
1076         struct be_rx_stats *stats = rx_stats(rxo);
1077
1078         u64_stats_update_begin(&stats->sync);
1079         stats->rx_compl++;
1080         stats->rx_bytes += rxcp->pkt_size;
1081         stats->rx_pkts++;
1082         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1083                 stats->rx_mcast_pkts++;
1084         if (rxcp->err)
1085                 stats->rx_compl_err++;
1086         u64_stats_update_end(&stats->sync);
1087 }
1088
1089 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1090 {
1091         /* L4 checksum is not reliable for non TCP/UDP packets.
1092          * Also ignore ipcksm for ipv6 pkts */
1093         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1094                                 (rxcp->ip_csum || rxcp->ipv6);
1095 }
1096
1097 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1098                                                 u16 frag_idx)
1099 {
1100         struct be_adapter *adapter = rxo->adapter;
1101         struct be_rx_page_info *rx_page_info;
1102         struct be_queue_info *rxq = &rxo->q;
1103
1104         rx_page_info = &rxo->page_info_tbl[frag_idx];
1105         BUG_ON(!rx_page_info->page);
1106
1107         if (rx_page_info->last_page_user) {
1108                 dma_unmap_page(&adapter->pdev->dev,
1109                                dma_unmap_addr(rx_page_info, bus),
1110                                adapter->big_page_size, DMA_FROM_DEVICE);
1111                 rx_page_info->last_page_user = false;
1112         }
1113
1114         atomic_dec(&rxq->used);
1115         return rx_page_info;
1116 }
1117
1118 /* Throwaway the data in the Rx completion */
1119 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1120                                 struct be_rx_compl_info *rxcp)
1121 {
1122         struct be_queue_info *rxq = &rxo->q;
1123         struct be_rx_page_info *page_info;
1124         u16 i, num_rcvd = rxcp->num_rcvd;
1125
1126         for (i = 0; i < num_rcvd; i++) {
1127                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1128                 put_page(page_info->page);
1129                 memset(page_info, 0, sizeof(*page_info));
1130                 index_inc(&rxcp->rxq_idx, rxq->len);
1131         }
1132 }
1133
1134 /*
1135  * skb_fill_rx_data forms a complete skb for an ether frame
1136  * indicated by rxcp.
1137  */
1138 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1139                              struct be_rx_compl_info *rxcp)
1140 {
1141         struct be_queue_info *rxq = &rxo->q;
1142         struct be_rx_page_info *page_info;
1143         u16 i, j;
1144         u16 hdr_len, curr_frag_len, remaining;
1145         u8 *start;
1146
1147         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1148         start = page_address(page_info->page) + page_info->page_offset;
1149         prefetch(start);
1150
1151         /* Copy data in the first descriptor of this completion */
1152         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1153
1154         /* Copy the header portion into skb_data */
1155         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1156         memcpy(skb->data, start, hdr_len);
1157         skb->len = curr_frag_len;
1158         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1159                 /* Complete packet has now been moved to data */
1160                 put_page(page_info->page);
1161                 skb->data_len = 0;
1162                 skb->tail += curr_frag_len;
1163         } else {
1164                 skb_shinfo(skb)->nr_frags = 1;
1165                 skb_frag_set_page(skb, 0, page_info->page);
1166                 skb_shinfo(skb)->frags[0].page_offset =
1167                                         page_info->page_offset + hdr_len;
1168                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1169                 skb->data_len = curr_frag_len - hdr_len;
1170                 skb->truesize += rx_frag_size;
1171                 skb->tail += hdr_len;
1172         }
1173         page_info->page = NULL;
1174
1175         if (rxcp->pkt_size <= rx_frag_size) {
1176                 BUG_ON(rxcp->num_rcvd != 1);
1177                 return;
1178         }
1179
1180         /* More frags present for this completion */
1181         index_inc(&rxcp->rxq_idx, rxq->len);
1182         remaining = rxcp->pkt_size - curr_frag_len;
1183         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1184                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1185                 curr_frag_len = min(remaining, rx_frag_size);
1186
1187                 /* Coalesce all frags from the same physical page in one slot */
1188                 if (page_info->page_offset == 0) {
1189                         /* Fresh page */
1190                         j++;
1191                         skb_frag_set_page(skb, j, page_info->page);
1192                         skb_shinfo(skb)->frags[j].page_offset =
1193                                                         page_info->page_offset;
1194                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1195                         skb_shinfo(skb)->nr_frags++;
1196                 } else {
1197                         put_page(page_info->page);
1198                 }
1199
1200                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1201                 skb->len += curr_frag_len;
1202                 skb->data_len += curr_frag_len;
1203                 skb->truesize += rx_frag_size;
1204                 remaining -= curr_frag_len;
1205                 index_inc(&rxcp->rxq_idx, rxq->len);
1206                 page_info->page = NULL;
1207         }
1208         BUG_ON(j > MAX_SKB_FRAGS);
1209 }
1210
1211 /* Process the RX completion indicated by rxcp when GRO is disabled */
1212 static void be_rx_compl_process(struct be_rx_obj *rxo,
1213                                 struct be_rx_compl_info *rxcp)
1214 {
1215         struct be_adapter *adapter = rxo->adapter;
1216         struct net_device *netdev = adapter->netdev;
1217         struct sk_buff *skb;
1218
1219         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1220         if (unlikely(!skb)) {
1221                 rx_stats(rxo)->rx_drops_no_skbs++;
1222                 be_rx_compl_discard(rxo, rxcp);
1223                 return;
1224         }
1225
1226         skb_fill_rx_data(rxo, skb, rxcp);
1227
1228         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1229                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1230         else
1231                 skb_checksum_none_assert(skb);
1232
1233         skb->protocol = eth_type_trans(skb, netdev);
1234         if (netdev->features & NETIF_F_RXHASH)
1235                 skb->rxhash = rxcp->rss_hash;
1236
1237
1238         if (rxcp->vlanf)
1239                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1240
1241         netif_receive_skb(skb);
1242 }
1243
1244 /* Process the RX completion indicated by rxcp when GRO is enabled */
1245 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1246                              struct be_rx_compl_info *rxcp)
1247 {
1248         struct be_adapter *adapter = rxo->adapter;
1249         struct be_rx_page_info *page_info;
1250         struct sk_buff *skb = NULL;
1251         struct be_queue_info *rxq = &rxo->q;
1252         u16 remaining, curr_frag_len;
1253         u16 i, j;
1254
1255         skb = napi_get_frags(napi);
1256         if (!skb) {
1257                 be_rx_compl_discard(rxo, rxcp);
1258                 return;
1259         }
1260
1261         remaining = rxcp->pkt_size;
1262         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1263                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1264
1265                 curr_frag_len = min(remaining, rx_frag_size);
1266
1267                 /* Coalesce all frags from the same physical page in one slot */
1268                 if (i == 0 || page_info->page_offset == 0) {
1269                         /* First frag or Fresh page */
1270                         j++;
1271                         skb_frag_set_page(skb, j, page_info->page);
1272                         skb_shinfo(skb)->frags[j].page_offset =
1273                                                         page_info->page_offset;
1274                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1275                 } else {
1276                         put_page(page_info->page);
1277                 }
1278                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1279                 skb->truesize += rx_frag_size;
1280                 remaining -= curr_frag_len;
1281                 index_inc(&rxcp->rxq_idx, rxq->len);
1282                 memset(page_info, 0, sizeof(*page_info));
1283         }
1284         BUG_ON(j > MAX_SKB_FRAGS);
1285
1286         skb_shinfo(skb)->nr_frags = j + 1;
1287         skb->len = rxcp->pkt_size;
1288         skb->data_len = rxcp->pkt_size;
1289         skb->ip_summed = CHECKSUM_UNNECESSARY;
1290         if (adapter->netdev->features & NETIF_F_RXHASH)
1291                 skb->rxhash = rxcp->rss_hash;
1292
1293         if (rxcp->vlanf)
1294                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1295
1296         napi_gro_frags(napi);
1297 }
1298
1299 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1300                                  struct be_rx_compl_info *rxcp)
1301 {
1302         rxcp->pkt_size =
1303                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1304         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1305         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1306         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1307         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1308         rxcp->ip_csum =
1309                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1310         rxcp->l4_csum =
1311                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1312         rxcp->ipv6 =
1313                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1314         rxcp->rxq_idx =
1315                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1316         rxcp->num_rcvd =
1317                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1318         rxcp->pkt_type =
1319                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1320         rxcp->rss_hash =
1321                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1322         if (rxcp->vlanf) {
1323                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1324                                           compl);
1325                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1326                                                compl);
1327         }
1328         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1329 }
1330
1331 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1332                                  struct be_rx_compl_info *rxcp)
1333 {
1334         rxcp->pkt_size =
1335                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1336         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1337         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1338         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1339         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1340         rxcp->ip_csum =
1341                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1342         rxcp->l4_csum =
1343                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1344         rxcp->ipv6 =
1345                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1346         rxcp->rxq_idx =
1347                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1348         rxcp->num_rcvd =
1349                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1350         rxcp->pkt_type =
1351                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1352         rxcp->rss_hash =
1353                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1354         if (rxcp->vlanf) {
1355                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1356                                           compl);
1357                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1358                                                compl);
1359         }
1360         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1361 }
1362
1363 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1364 {
1365         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1366         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1367         struct be_adapter *adapter = rxo->adapter;
1368
1369         /* For checking the valid bit it is Ok to use either definition as the
1370          * valid bit is at the same position in both v0 and v1 Rx compl */
1371         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1372                 return NULL;
1373
1374         rmb();
1375         be_dws_le_to_cpu(compl, sizeof(*compl));
1376
1377         if (adapter->be3_native)
1378                 be_parse_rx_compl_v1(compl, rxcp);
1379         else
1380                 be_parse_rx_compl_v0(compl, rxcp);
1381
1382         if (rxcp->vlanf) {
1383                 /* vlanf could be wrongly set in some cards.
1384                  * ignore if vtm is not set */
1385                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1386                         rxcp->vlanf = 0;
1387
1388                 if (!lancer_chip(adapter))
1389                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1390
1391                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1392                     !adapter->vlan_tag[rxcp->vlan_tag])
1393                         rxcp->vlanf = 0;
1394         }
1395
1396         /* As the compl has been parsed, reset it; we wont touch it again */
1397         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1398
1399         queue_tail_inc(&rxo->cq);
1400         return rxcp;
1401 }
1402
1403 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1404 {
1405         u32 order = get_order(size);
1406
1407         if (order > 0)
1408                 gfp |= __GFP_COMP;
1409         return  alloc_pages(gfp, order);
1410 }
1411
1412 /*
1413  * Allocate a page, split it to fragments of size rx_frag_size and post as
1414  * receive buffers to BE
1415  */
1416 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1417 {
1418         struct be_adapter *adapter = rxo->adapter;
1419         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1420         struct be_queue_info *rxq = &rxo->q;
1421         struct page *pagep = NULL;
1422         struct be_eth_rx_d *rxd;
1423         u64 page_dmaaddr = 0, frag_dmaaddr;
1424         u32 posted, page_offset = 0;
1425
1426         page_info = &rxo->page_info_tbl[rxq->head];
1427         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1428                 if (!pagep) {
1429                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1430                         if (unlikely(!pagep)) {
1431                                 rx_stats(rxo)->rx_post_fail++;
1432                                 break;
1433                         }
1434                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1435                                                     0, adapter->big_page_size,
1436                                                     DMA_FROM_DEVICE);
1437                         page_info->page_offset = 0;
1438                 } else {
1439                         get_page(pagep);
1440                         page_info->page_offset = page_offset + rx_frag_size;
1441                 }
1442                 page_offset = page_info->page_offset;
1443                 page_info->page = pagep;
1444                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1445                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1446
1447                 rxd = queue_head_node(rxq);
1448                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1449                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1450
1451                 /* Any space left in the current big page for another frag? */
1452                 if ((page_offset + rx_frag_size + rx_frag_size) >
1453                                         adapter->big_page_size) {
1454                         pagep = NULL;
1455                         page_info->last_page_user = true;
1456                 }
1457
1458                 prev_page_info = page_info;
1459                 queue_head_inc(rxq);
1460                 page_info = &rxo->page_info_tbl[rxq->head];
1461         }
1462         if (pagep)
1463                 prev_page_info->last_page_user = true;
1464
1465         if (posted) {
1466                 atomic_add(posted, &rxq->used);
1467                 be_rxq_notify(adapter, rxq->id, posted);
1468         } else if (atomic_read(&rxq->used) == 0) {
1469                 /* Let be_worker replenish when memory is available */
1470                 rxo->rx_post_starved = true;
1471         }
1472 }
1473
1474 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1475 {
1476         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1477
1478         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1479                 return NULL;
1480
1481         rmb();
1482         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1483
1484         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1485
1486         queue_tail_inc(tx_cq);
1487         return txcp;
1488 }
1489
1490 static u16 be_tx_compl_process(struct be_adapter *adapter,
1491                 struct be_tx_obj *txo, u16 last_index)
1492 {
1493         struct be_queue_info *txq = &txo->q;
1494         struct be_eth_wrb *wrb;
1495         struct sk_buff **sent_skbs = txo->sent_skb_list;
1496         struct sk_buff *sent_skb;
1497         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1498         bool unmap_skb_hdr = true;
1499
1500         sent_skb = sent_skbs[txq->tail];
1501         BUG_ON(!sent_skb);
1502         sent_skbs[txq->tail] = NULL;
1503
1504         /* skip header wrb */
1505         queue_tail_inc(txq);
1506
1507         do {
1508                 cur_index = txq->tail;
1509                 wrb = queue_tail_node(txq);
1510                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1511                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1512                 unmap_skb_hdr = false;
1513
1514                 num_wrbs++;
1515                 queue_tail_inc(txq);
1516         } while (cur_index != last_index);
1517
1518         kfree_skb(sent_skb);
1519         return num_wrbs;
1520 }
1521
1522 /* Return the number of events in the event queue */
1523 static inline int events_get(struct be_eq_obj *eqo)
1524 {
1525         struct be_eq_entry *eqe;
1526         int num = 0;
1527
1528         do {
1529                 eqe = queue_tail_node(&eqo->q);
1530                 if (eqe->evt == 0)
1531                         break;
1532
1533                 rmb();
1534                 eqe->evt = 0;
1535                 num++;
1536                 queue_tail_inc(&eqo->q);
1537         } while (true);
1538
1539         return num;
1540 }
1541
1542 static int event_handle(struct be_eq_obj *eqo)
1543 {
1544         bool rearm = false;
1545         int num = events_get(eqo);
1546
1547         /* Deal with any spurious interrupts that come without events */
1548         if (!num)
1549                 rearm = true;
1550
1551         be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1552         if (num)
1553                 napi_schedule(&eqo->napi);
1554
1555         return num;
1556 }
1557
1558 /* Leaves the EQ is disarmed state */
1559 static void be_eq_clean(struct be_eq_obj *eqo)
1560 {
1561         int num = events_get(eqo);
1562
1563         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1564 }
1565
1566 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1567 {
1568         struct be_rx_page_info *page_info;
1569         struct be_queue_info *rxq = &rxo->q;
1570         struct be_queue_info *rx_cq = &rxo->cq;
1571         struct be_rx_compl_info *rxcp;
1572         u16 tail;
1573
1574         /* First cleanup pending rx completions */
1575         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1576                 be_rx_compl_discard(rxo, rxcp);
1577                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1578         }
1579
1580         /* Then free posted rx buffer that were not used */
1581         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1582         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1583                 page_info = get_rx_page_info(rxo, tail);
1584                 put_page(page_info->page);
1585                 memset(page_info, 0, sizeof(*page_info));
1586         }
1587         BUG_ON(atomic_read(&rxq->used));
1588         rxq->tail = rxq->head = 0;
1589 }
1590
1591 static void be_tx_compl_clean(struct be_adapter *adapter)
1592 {
1593         struct be_tx_obj *txo;
1594         struct be_queue_info *txq;
1595         struct be_eth_tx_compl *txcp;
1596         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1597         struct sk_buff *sent_skb;
1598         bool dummy_wrb;
1599         int i, pending_txqs;
1600
1601         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1602         do {
1603                 pending_txqs = adapter->num_tx_qs;
1604
1605                 for_all_tx_queues(adapter, txo, i) {
1606                         txq = &txo->q;
1607                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1608                                 end_idx =
1609                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1610                                                       wrb_index, txcp);
1611                                 num_wrbs += be_tx_compl_process(adapter, txo,
1612                                                                 end_idx);
1613                                 cmpl++;
1614                         }
1615                         if (cmpl) {
1616                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1617                                 atomic_sub(num_wrbs, &txq->used);
1618                                 cmpl = 0;
1619                                 num_wrbs = 0;
1620                         }
1621                         if (atomic_read(&txq->used) == 0)
1622                                 pending_txqs--;
1623                 }
1624
1625                 if (pending_txqs == 0 || ++timeo > 200)
1626                         break;
1627
1628                 mdelay(1);
1629         } while (true);
1630
1631         for_all_tx_queues(adapter, txo, i) {
1632                 txq = &txo->q;
1633                 if (atomic_read(&txq->used))
1634                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1635                                 atomic_read(&txq->used));
1636
1637                 /* free posted tx for which compls will never arrive */
1638                 while (atomic_read(&txq->used)) {
1639                         sent_skb = txo->sent_skb_list[txq->tail];
1640                         end_idx = txq->tail;
1641                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1642                                                    &dummy_wrb);
1643                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1644                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1645                         atomic_sub(num_wrbs, &txq->used);
1646                 }
1647         }
1648 }
1649
1650 static void be_evt_queues_destroy(struct be_adapter *adapter)
1651 {
1652         struct be_eq_obj *eqo;
1653         int i;
1654
1655         for_all_evt_queues(adapter, eqo, i) {
1656                 be_eq_clean(eqo);
1657                 if (eqo->q.created)
1658                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1659                 be_queue_free(adapter, &eqo->q);
1660         }
1661 }
1662
1663 static int be_evt_queues_create(struct be_adapter *adapter)
1664 {
1665         struct be_queue_info *eq;
1666         struct be_eq_obj *eqo;
1667         int i, rc;
1668
1669         adapter->num_evt_qs = num_irqs(adapter);
1670
1671         for_all_evt_queues(adapter, eqo, i) {
1672                 eqo->adapter = adapter;
1673                 eqo->tx_budget = BE_TX_BUDGET;
1674                 eqo->idx = i;
1675                 eqo->max_eqd = BE_MAX_EQD;
1676                 eqo->enable_aic = true;
1677
1678                 eq = &eqo->q;
1679                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1680                                         sizeof(struct be_eq_entry));
1681                 if (rc)
1682                         return rc;
1683
1684                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1685                 if (rc)
1686                         return rc;
1687         }
1688         return 0;
1689 }
1690
1691 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1692 {
1693         struct be_queue_info *q;
1694
1695         q = &adapter->mcc_obj.q;
1696         if (q->created)
1697                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1698         be_queue_free(adapter, q);
1699
1700         q = &adapter->mcc_obj.cq;
1701         if (q->created)
1702                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1703         be_queue_free(adapter, q);
1704 }
1705
1706 /* Must be called only after TX qs are created as MCC shares TX EQ */
1707 static int be_mcc_queues_create(struct be_adapter *adapter)
1708 {
1709         struct be_queue_info *q, *cq;
1710
1711         cq = &adapter->mcc_obj.cq;
1712         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1713                         sizeof(struct be_mcc_compl)))
1714                 goto err;
1715
1716         /* Use the default EQ for MCC completions */
1717         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1718                 goto mcc_cq_free;
1719
1720         q = &adapter->mcc_obj.q;
1721         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1722                 goto mcc_cq_destroy;
1723
1724         if (be_cmd_mccq_create(adapter, q, cq))
1725                 goto mcc_q_free;
1726
1727         return 0;
1728
1729 mcc_q_free:
1730         be_queue_free(adapter, q);
1731 mcc_cq_destroy:
1732         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1733 mcc_cq_free:
1734         be_queue_free(adapter, cq);
1735 err:
1736         return -1;
1737 }
1738
1739 static void be_tx_queues_destroy(struct be_adapter *adapter)
1740 {
1741         struct be_queue_info *q;
1742         struct be_tx_obj *txo;
1743         u8 i;
1744
1745         for_all_tx_queues(adapter, txo, i) {
1746                 q = &txo->q;
1747                 if (q->created)
1748                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1749                 be_queue_free(adapter, q);
1750
1751                 q = &txo->cq;
1752                 if (q->created)
1753                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1754                 be_queue_free(adapter, q);
1755         }
1756 }
1757
1758 static int be_num_txqs_want(struct be_adapter *adapter)
1759 {
1760         if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1761                 lancer_chip(adapter) || !be_physfn(adapter) ||
1762                 adapter->generation == BE_GEN2)
1763                 return 1;
1764         else
1765                 return MAX_TX_QS;
1766 }
1767
1768 static int be_tx_cqs_create(struct be_adapter *adapter)
1769 {
1770         struct be_queue_info *cq, *eq;
1771         int status;
1772         struct be_tx_obj *txo;
1773         u8 i;
1774
1775         adapter->num_tx_qs = be_num_txqs_want(adapter);
1776         if (adapter->num_tx_qs != MAX_TX_QS) {
1777                 rtnl_lock();
1778                 netif_set_real_num_tx_queues(adapter->netdev,
1779                         adapter->num_tx_qs);
1780                 rtnl_unlock();
1781         }
1782
1783         for_all_tx_queues(adapter, txo, i) {
1784                 cq = &txo->cq;
1785                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1786                                         sizeof(struct be_eth_tx_compl));
1787                 if (status)
1788                         return status;
1789
1790                 /* If num_evt_qs is less than num_tx_qs, then more than
1791                  * one txq share an eq
1792                  */
1793                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1794                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1795                 if (status)
1796                         return status;
1797         }
1798         return 0;
1799 }
1800
1801 static int be_tx_qs_create(struct be_adapter *adapter)
1802 {
1803         struct be_tx_obj *txo;
1804         int i, status;
1805
1806         for_all_tx_queues(adapter, txo, i) {
1807                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1808                                         sizeof(struct be_eth_wrb));
1809                 if (status)
1810                         return status;
1811
1812                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1813                 if (status)
1814                         return status;
1815         }
1816
1817         return 0;
1818 }
1819
1820 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1821 {
1822         struct be_queue_info *q;
1823         struct be_rx_obj *rxo;
1824         int i;
1825
1826         for_all_rx_queues(adapter, rxo, i) {
1827                 q = &rxo->cq;
1828                 if (q->created)
1829                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1830                 be_queue_free(adapter, q);
1831         }
1832 }
1833
1834 static int be_rx_cqs_create(struct be_adapter *adapter)
1835 {
1836         struct be_queue_info *eq, *cq;
1837         struct be_rx_obj *rxo;
1838         int rc, i;
1839
1840         /* We'll create as many RSS rings as there are irqs.
1841          * But when there's only one irq there's no use creating RSS rings
1842          */
1843         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1844                                 num_irqs(adapter) + 1 : 1;
1845
1846         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1847         for_all_rx_queues(adapter, rxo, i) {
1848                 rxo->adapter = adapter;
1849                 cq = &rxo->cq;
1850                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1851                                 sizeof(struct be_eth_rx_compl));
1852                 if (rc)
1853                         return rc;
1854
1855                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1856                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1857                 if (rc)
1858                         return rc;
1859         }
1860
1861         if (adapter->num_rx_qs != MAX_RX_QS)
1862                 dev_info(&adapter->pdev->dev,
1863                         "Created only %d receive queues", adapter->num_rx_qs);
1864
1865         return 0;
1866 }
1867
1868 static irqreturn_t be_intx(int irq, void *dev)
1869 {
1870         struct be_adapter *adapter = dev;
1871         int num_evts;
1872
1873         /* With INTx only one EQ is used */
1874         num_evts = event_handle(&adapter->eq_obj[0]);
1875         if (num_evts)
1876                 return IRQ_HANDLED;
1877         else
1878                 return IRQ_NONE;
1879 }
1880
1881 static irqreturn_t be_msix(int irq, void *dev)
1882 {
1883         struct be_eq_obj *eqo = dev;
1884
1885         event_handle(eqo);
1886         return IRQ_HANDLED;
1887 }
1888
1889 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1890 {
1891         return (rxcp->tcpf && !rxcp->err) ? true : false;
1892 }
1893
1894 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1895                         int budget)
1896 {
1897         struct be_adapter *adapter = rxo->adapter;
1898         struct be_queue_info *rx_cq = &rxo->cq;
1899         struct be_rx_compl_info *rxcp;
1900         u32 work_done;
1901
1902         for (work_done = 0; work_done < budget; work_done++) {
1903                 rxcp = be_rx_compl_get(rxo);
1904                 if (!rxcp)
1905                         break;
1906
1907                 /* Is it a flush compl that has no data */
1908                 if (unlikely(rxcp->num_rcvd == 0))
1909                         goto loop_continue;
1910
1911                 /* Discard compl with partial DMA Lancer B0 */
1912                 if (unlikely(!rxcp->pkt_size)) {
1913                         be_rx_compl_discard(rxo, rxcp);
1914                         goto loop_continue;
1915                 }
1916
1917                 /* On BE drop pkts that arrive due to imperfect filtering in
1918                  * promiscuous mode on some skews
1919                  */
1920                 if (unlikely(rxcp->port != adapter->port_num &&
1921                                 !lancer_chip(adapter))) {
1922                         be_rx_compl_discard(rxo, rxcp);
1923                         goto loop_continue;
1924                 }
1925
1926                 if (do_gro(rxcp))
1927                         be_rx_compl_process_gro(rxo, napi, rxcp);
1928                 else
1929                         be_rx_compl_process(rxo, rxcp);
1930 loop_continue:
1931                 be_rx_stats_update(rxo, rxcp);
1932         }
1933
1934         if (work_done) {
1935                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1936
1937                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1938                         be_post_rx_frags(rxo, GFP_ATOMIC);
1939         }
1940
1941         return work_done;
1942 }
1943
1944 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1945                           int budget, int idx)
1946 {
1947         struct be_eth_tx_compl *txcp;
1948         int num_wrbs = 0, work_done;
1949
1950         for (work_done = 0; work_done < budget; work_done++) {
1951                 txcp = be_tx_compl_get(&txo->cq);
1952                 if (!txcp)
1953                         break;
1954                 num_wrbs += be_tx_compl_process(adapter, txo,
1955                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1956                                         wrb_index, txcp));
1957         }
1958
1959         if (work_done) {
1960                 be_cq_notify(adapter, txo->cq.id, true, work_done);
1961                 atomic_sub(num_wrbs, &txo->q.used);
1962
1963                 /* As Tx wrbs have been freed up, wake up netdev queue
1964                  * if it was stopped due to lack of tx wrbs.  */
1965                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1966                         atomic_read(&txo->q.used) < txo->q.len / 2) {
1967                         netif_wake_subqueue(adapter->netdev, idx);
1968                 }
1969
1970                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1971                 tx_stats(txo)->tx_compl += work_done;
1972                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1973         }
1974         return (work_done < budget); /* Done */
1975 }
1976
1977 int be_poll(struct napi_struct *napi, int budget)
1978 {
1979         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1980         struct be_adapter *adapter = eqo->adapter;
1981         int max_work = 0, work, i;
1982         bool tx_done;
1983
1984         /* Process all TXQs serviced by this EQ */
1985         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1986                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1987                                         eqo->tx_budget, i);
1988                 if (!tx_done)
1989                         max_work = budget;
1990         }
1991
1992         /* This loop will iterate twice for EQ0 in which
1993          * completions of the last RXQ (default one) are also processed
1994          * For other EQs the loop iterates only once
1995          */
1996         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1997                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1998                 max_work = max(work, max_work);
1999         }
2000
2001         if (is_mcc_eqo(eqo))
2002                 be_process_mcc(adapter);
2003
2004         if (max_work < budget) {
2005                 napi_complete(napi);
2006                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2007         } else {
2008                 /* As we'll continue in polling mode, count and clear events */
2009                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2010         }
2011         return max_work;
2012 }
2013
2014 void be_detect_dump_ue(struct be_adapter *adapter)
2015 {
2016         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2017         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2018         u32 i;
2019
2020         if (adapter->eeh_err || adapter->ue_detected)
2021                 return;
2022
2023         if (lancer_chip(adapter)) {
2024                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2025                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2026                         sliport_err1 = ioread32(adapter->db +
2027                                         SLIPORT_ERROR1_OFFSET);
2028                         sliport_err2 = ioread32(adapter->db +
2029                                         SLIPORT_ERROR2_OFFSET);
2030                 }
2031         } else {
2032                 pci_read_config_dword(adapter->pdev,
2033                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2034                 pci_read_config_dword(adapter->pdev,
2035                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2036                 pci_read_config_dword(adapter->pdev,
2037                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2038                 pci_read_config_dword(adapter->pdev,
2039                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2040
2041                 ue_lo = (ue_lo & (~ue_lo_mask));
2042                 ue_hi = (ue_hi & (~ue_hi_mask));
2043         }
2044
2045         if (ue_lo || ue_hi ||
2046                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2047                 adapter->ue_detected = true;
2048                 adapter->eeh_err = true;
2049                 dev_err(&adapter->pdev->dev,
2050                         "Unrecoverable error in the card\n");
2051         }
2052
2053         if (ue_lo) {
2054                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2055                         if (ue_lo & 1)
2056                                 dev_err(&adapter->pdev->dev,
2057                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2058                 }
2059         }
2060         if (ue_hi) {
2061                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2062                         if (ue_hi & 1)
2063                                 dev_err(&adapter->pdev->dev,
2064                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2065                 }
2066         }
2067
2068         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2069                 dev_err(&adapter->pdev->dev,
2070                         "sliport status 0x%x\n", sliport_status);
2071                 dev_err(&adapter->pdev->dev,
2072                         "sliport error1 0x%x\n", sliport_err1);
2073                 dev_err(&adapter->pdev->dev,
2074                         "sliport error2 0x%x\n", sliport_err2);
2075         }
2076 }
2077
2078 static void be_msix_disable(struct be_adapter *adapter)
2079 {
2080         if (msix_enabled(adapter)) {
2081                 pci_disable_msix(adapter->pdev);
2082                 adapter->num_msix_vec = 0;
2083         }
2084 }
2085
2086 static uint be_num_rss_want(struct be_adapter *adapter)
2087 {
2088         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2089              adapter->num_vfs == 0 && be_physfn(adapter) &&
2090              !be_is_mc(adapter))
2091                 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2092         else
2093                 return 0;
2094 }
2095
2096 static void be_msix_enable(struct be_adapter *adapter)
2097 {
2098 #define BE_MIN_MSIX_VECTORS             1
2099         int i, status, num_vec;
2100
2101         /* If RSS queues are not used, need a vec for default RX Q */
2102         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2103         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2104
2105         for (i = 0; i < num_vec; i++)
2106                 adapter->msix_entries[i].entry = i;
2107
2108         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2109         if (status == 0) {
2110                 goto done;
2111         } else if (status >= BE_MIN_MSIX_VECTORS) {
2112                 num_vec = status;
2113                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2114                                 num_vec) == 0)
2115                         goto done;
2116         }
2117         return;
2118 done:
2119         adapter->num_msix_vec = num_vec;
2120         return;
2121 }
2122
2123 static int be_sriov_enable(struct be_adapter *adapter)
2124 {
2125         be_check_sriov_fn_type(adapter);
2126
2127 #ifdef CONFIG_PCI_IOV
2128         if (be_physfn(adapter) && num_vfs) {
2129                 int status, pos;
2130                 u16 dev_vfs;
2131
2132                 pos = pci_find_ext_capability(adapter->pdev,
2133                                                 PCI_EXT_CAP_ID_SRIOV);
2134                 pci_read_config_word(adapter->pdev,
2135                                      pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2136
2137                 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2138                 if (adapter->num_vfs != num_vfs)
2139                         dev_info(&adapter->pdev->dev,
2140                                  "Device supports %d VFs and not %d\n",
2141                                  adapter->num_vfs, num_vfs);
2142
2143                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2144                 if (status)
2145                         adapter->num_vfs = 0;
2146
2147                 if (adapter->num_vfs) {
2148                         adapter->vf_cfg = kcalloc(num_vfs,
2149                                                 sizeof(struct be_vf_cfg),
2150                                                 GFP_KERNEL);
2151                         if (!adapter->vf_cfg)
2152                                 return -ENOMEM;
2153                 }
2154         }
2155 #endif
2156         return 0;
2157 }
2158
2159 static void be_sriov_disable(struct be_adapter *adapter)
2160 {
2161 #ifdef CONFIG_PCI_IOV
2162         if (sriov_enabled(adapter)) {
2163                 pci_disable_sriov(adapter->pdev);
2164                 kfree(adapter->vf_cfg);
2165                 adapter->num_vfs = 0;
2166         }
2167 #endif
2168 }
2169
2170 static inline int be_msix_vec_get(struct be_adapter *adapter,
2171                                 struct be_eq_obj *eqo)
2172 {
2173         return adapter->msix_entries[eqo->idx].vector;
2174 }
2175
2176 static int be_msix_register(struct be_adapter *adapter)
2177 {
2178         struct net_device *netdev = adapter->netdev;
2179         struct be_eq_obj *eqo;
2180         int status, i, vec;
2181
2182         for_all_evt_queues(adapter, eqo, i) {
2183                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2184                 vec = be_msix_vec_get(adapter, eqo);
2185                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2186                 if (status)
2187                         goto err_msix;
2188         }
2189
2190         return 0;
2191 err_msix:
2192         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2193                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2194         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2195                 status);
2196         be_msix_disable(adapter);
2197         return status;
2198 }
2199
2200 static int be_irq_register(struct be_adapter *adapter)
2201 {
2202         struct net_device *netdev = adapter->netdev;
2203         int status;
2204
2205         if (msix_enabled(adapter)) {
2206                 status = be_msix_register(adapter);
2207                 if (status == 0)
2208                         goto done;
2209                 /* INTx is not supported for VF */
2210                 if (!be_physfn(adapter))
2211                         return status;
2212         }
2213
2214         /* INTx */
2215         netdev->irq = adapter->pdev->irq;
2216         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2217                         adapter);
2218         if (status) {
2219                 dev_err(&adapter->pdev->dev,
2220                         "INTx request IRQ failed - err %d\n", status);
2221                 return status;
2222         }
2223 done:
2224         adapter->isr_registered = true;
2225         return 0;
2226 }
2227
2228 static void be_irq_unregister(struct be_adapter *adapter)
2229 {
2230         struct net_device *netdev = adapter->netdev;
2231         struct be_eq_obj *eqo;
2232         int i;
2233
2234         if (!adapter->isr_registered)
2235                 return;
2236
2237         /* INTx */
2238         if (!msix_enabled(adapter)) {
2239                 free_irq(netdev->irq, adapter);
2240                 goto done;
2241         }
2242
2243         /* MSIx */
2244         for_all_evt_queues(adapter, eqo, i)
2245                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2246
2247 done:
2248         adapter->isr_registered = false;
2249 }
2250
2251 static void be_rx_qs_destroy(struct be_adapter *adapter)
2252 {
2253         struct be_queue_info *q;
2254         struct be_rx_obj *rxo;
2255         int i;
2256
2257         for_all_rx_queues(adapter, rxo, i) {
2258                 q = &rxo->q;
2259                 if (q->created) {
2260                         be_cmd_rxq_destroy(adapter, q);
2261                         /* After the rxq is invalidated, wait for a grace time
2262                          * of 1ms for all dma to end and the flush compl to
2263                          * arrive
2264                          */
2265                         mdelay(1);
2266                         be_rx_cq_clean(rxo);
2267                 }
2268                 be_queue_free(adapter, q);
2269         }
2270 }
2271
2272 static int be_close(struct net_device *netdev)
2273 {
2274         struct be_adapter *adapter = netdev_priv(netdev);
2275         struct be_eq_obj *eqo;
2276         int i;
2277
2278         be_async_mcc_disable(adapter);
2279
2280         if (!lancer_chip(adapter))
2281                 be_intr_set(adapter, false);
2282
2283         for_all_evt_queues(adapter, eqo, i) {
2284                 napi_disable(&eqo->napi);
2285                 if (msix_enabled(adapter))
2286                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2287                 else
2288                         synchronize_irq(netdev->irq);
2289                 be_eq_clean(eqo);
2290         }
2291
2292         be_irq_unregister(adapter);
2293
2294         /* Wait for all pending tx completions to arrive so that
2295          * all tx skbs are freed.
2296          */
2297         be_tx_compl_clean(adapter);
2298
2299         be_rx_qs_destroy(adapter);
2300         return 0;
2301 }
2302
2303 static int be_rx_qs_create(struct be_adapter *adapter)
2304 {
2305         struct be_rx_obj *rxo;
2306         int rc, i, j;
2307         u8 rsstable[128];
2308
2309         for_all_rx_queues(adapter, rxo, i) {
2310                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2311                                     sizeof(struct be_eth_rx_d));
2312                 if (rc)
2313                         return rc;
2314         }
2315
2316         /* The FW would like the default RXQ to be created first */
2317         rxo = default_rxo(adapter);
2318         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2319                                adapter->if_handle, false, &rxo->rss_id);
2320         if (rc)
2321                 return rc;
2322
2323         for_all_rss_queues(adapter, rxo, i) {
2324                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2325                                        rx_frag_size, adapter->if_handle,
2326                                        true, &rxo->rss_id);
2327                 if (rc)
2328                         return rc;
2329         }
2330
2331         if (be_multi_rxq(adapter)) {
2332                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2333                         for_all_rss_queues(adapter, rxo, i) {
2334                                 if ((j + i) >= 128)
2335                                         break;
2336                                 rsstable[j + i] = rxo->rss_id;
2337                         }
2338                 }
2339                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2340                 if (rc)
2341                         return rc;
2342         }
2343
2344         /* First time posting */
2345         for_all_rx_queues(adapter, rxo, i)
2346                 be_post_rx_frags(rxo, GFP_KERNEL);
2347         return 0;
2348 }
2349
2350 static int be_open(struct net_device *netdev)
2351 {
2352         struct be_adapter *adapter = netdev_priv(netdev);
2353         struct be_eq_obj *eqo;
2354         struct be_rx_obj *rxo;
2355         struct be_tx_obj *txo;
2356         u8 link_status;
2357         int status, i;
2358
2359         status = be_rx_qs_create(adapter);
2360         if (status)
2361                 goto err;
2362
2363         be_irq_register(adapter);
2364
2365         if (!lancer_chip(adapter))
2366                 be_intr_set(adapter, true);
2367
2368         for_all_rx_queues(adapter, rxo, i)
2369                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2370
2371         for_all_tx_queues(adapter, txo, i)
2372                 be_cq_notify(adapter, txo->cq.id, true, 0);
2373
2374         be_async_mcc_enable(adapter);
2375
2376         for_all_evt_queues(adapter, eqo, i) {
2377                 napi_enable(&eqo->napi);
2378                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2379         }
2380
2381         status = be_cmd_link_status_query(adapter, NULL, NULL,
2382                                           &link_status, 0);
2383         if (!status)
2384                 be_link_status_update(adapter, link_status);
2385
2386         return 0;
2387 err:
2388         be_close(adapter->netdev);
2389         return -EIO;
2390 }
2391
2392 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2393 {
2394         struct be_dma_mem cmd;
2395         int status = 0;
2396         u8 mac[ETH_ALEN];
2397
2398         memset(mac, 0, ETH_ALEN);
2399
2400         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2401         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2402                                     GFP_KERNEL);
2403         if (cmd.va == NULL)
2404                 return -1;
2405         memset(cmd.va, 0, cmd.size);
2406
2407         if (enable) {
2408                 status = pci_write_config_dword(adapter->pdev,
2409                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2410                 if (status) {
2411                         dev_err(&adapter->pdev->dev,
2412                                 "Could not enable Wake-on-lan\n");
2413                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2414                                           cmd.dma);
2415                         return status;
2416                 }
2417                 status = be_cmd_enable_magic_wol(adapter,
2418                                 adapter->netdev->dev_addr, &cmd);
2419                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2420                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2421         } else {
2422                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2423                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2424                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2425         }
2426
2427         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2428         return status;
2429 }
2430
2431 /*
2432  * Generate a seed MAC address from the PF MAC Address using jhash.
2433  * MAC Address for VFs are assigned incrementally starting from the seed.
2434  * These addresses are programmed in the ASIC by the PF and the VF driver
2435  * queries for the MAC address during its probe.
2436  */
2437 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2438 {
2439         u32 vf;
2440         int status = 0;
2441         u8 mac[ETH_ALEN];
2442         struct be_vf_cfg *vf_cfg;
2443
2444         be_vf_eth_addr_generate(adapter, mac);
2445
2446         for_all_vfs(adapter, vf_cfg, vf) {
2447                 if (lancer_chip(adapter)) {
2448                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2449                 } else {
2450                         status = be_cmd_pmac_add(adapter, mac,
2451                                                  vf_cfg->if_handle,
2452                                                  &vf_cfg->pmac_id, vf + 1);
2453                 }
2454
2455                 if (status)
2456                         dev_err(&adapter->pdev->dev,
2457                         "Mac address assignment failed for VF %d\n", vf);
2458                 else
2459                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2460
2461                 mac[5] += 1;
2462         }
2463         return status;
2464 }
2465
2466 static void be_vf_clear(struct be_adapter *adapter)
2467 {
2468         struct be_vf_cfg *vf_cfg;
2469         u32 vf;
2470
2471         for_all_vfs(adapter, vf_cfg, vf) {
2472                 if (lancer_chip(adapter))
2473                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2474                 else
2475                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2476                                         vf_cfg->pmac_id, vf + 1);
2477
2478                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2479         }
2480 }
2481
2482 static int be_clear(struct be_adapter *adapter)
2483 {
2484         int i = 1;
2485
2486         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2487                 cancel_delayed_work_sync(&adapter->work);
2488                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2489         }
2490
2491         if (sriov_enabled(adapter))
2492                 be_vf_clear(adapter);
2493
2494         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2495                 be_cmd_pmac_del(adapter, adapter->if_handle,
2496                         adapter->pmac_id[i], 0);
2497
2498         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2499
2500         be_mcc_queues_destroy(adapter);
2501         be_rx_cqs_destroy(adapter);
2502         be_tx_queues_destroy(adapter);
2503         be_evt_queues_destroy(adapter);
2504
2505         /* tell fw we're done with firing cmds */
2506         be_cmd_fw_clean(adapter);
2507
2508         be_msix_disable(adapter);
2509         kfree(adapter->pmac_id);
2510         return 0;
2511 }
2512
2513 static void be_vf_setup_init(struct be_adapter *adapter)
2514 {
2515         struct be_vf_cfg *vf_cfg;
2516         int vf;
2517
2518         for_all_vfs(adapter, vf_cfg, vf) {
2519                 vf_cfg->if_handle = -1;
2520                 vf_cfg->pmac_id = -1;
2521         }
2522 }
2523
2524 static int be_vf_setup(struct be_adapter *adapter)
2525 {
2526         struct be_vf_cfg *vf_cfg;
2527         u32 cap_flags, en_flags, vf;
2528         u16 lnk_speed;
2529         int status;
2530
2531         be_vf_setup_init(adapter);
2532
2533         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2534                                 BE_IF_FLAGS_MULTICAST;
2535         for_all_vfs(adapter, vf_cfg, vf) {
2536                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2537                                           &vf_cfg->if_handle, NULL, vf + 1);
2538                 if (status)
2539                         goto err;
2540         }
2541
2542         status = be_vf_eth_addr_config(adapter);
2543         if (status)
2544                 goto err;
2545
2546         for_all_vfs(adapter, vf_cfg, vf) {
2547                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2548                                                   NULL, vf + 1);
2549                 if (status)
2550                         goto err;
2551                 vf_cfg->tx_rate = lnk_speed * 10;
2552         }
2553         return 0;
2554 err:
2555         return status;
2556 }
2557
2558 static void be_setup_init(struct be_adapter *adapter)
2559 {
2560         adapter->vlan_prio_bmap = 0xff;
2561         adapter->link_speed = -1;
2562         adapter->if_handle = -1;
2563         adapter->be3_native = false;
2564         adapter->promiscuous = false;
2565         adapter->eq_next_idx = 0;
2566 }
2567
2568 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2569 {
2570         u32 pmac_id;
2571         int status;
2572         bool pmac_id_active;
2573
2574         status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2575                                                         &pmac_id, mac);
2576         if (status != 0)
2577                 goto do_none;
2578
2579         if (pmac_id_active) {
2580                 status = be_cmd_mac_addr_query(adapter, mac,
2581                                 MAC_ADDRESS_TYPE_NETWORK,
2582                                 false, adapter->if_handle, pmac_id);
2583
2584                 if (!status)
2585                         adapter->pmac_id[0] = pmac_id;
2586         } else {
2587                 status = be_cmd_pmac_add(adapter, mac,
2588                                 adapter->if_handle, &adapter->pmac_id[0], 0);
2589         }
2590 do_none:
2591         return status;
2592 }
2593
2594 static int be_setup(struct be_adapter *adapter)
2595 {
2596         struct net_device *netdev = adapter->netdev;
2597         u32 cap_flags, en_flags;
2598         u32 tx_fc, rx_fc;
2599         int status;
2600         u8 mac[ETH_ALEN];
2601
2602         be_setup_init(adapter);
2603
2604         be_cmd_req_native_mode(adapter);
2605
2606         be_msix_enable(adapter);
2607
2608         status = be_evt_queues_create(adapter);
2609         if (status)
2610                 goto err;
2611
2612         status = be_tx_cqs_create(adapter);
2613         if (status)
2614                 goto err;
2615
2616         status = be_rx_cqs_create(adapter);
2617         if (status)
2618                 goto err;
2619
2620         status = be_mcc_queues_create(adapter);
2621         if (status)
2622                 goto err;
2623
2624         memset(mac, 0, ETH_ALEN);
2625         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2626                         true /*permanent */, 0, 0);
2627         if (status)
2628                 return status;
2629         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2631
2632         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2634         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2635                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2636
2637         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2638                 cap_flags |= BE_IF_FLAGS_RSS;
2639                 en_flags |= BE_IF_FLAGS_RSS;
2640         }
2641         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2642                         netdev->dev_addr, &adapter->if_handle,
2643                         &adapter->pmac_id[0], 0);
2644         if (status != 0)
2645                 goto err;
2646
2647          /* The VF's permanent mac queried from card is incorrect.
2648           * For BEx: Query the mac configued by the PF using if_handle
2649           * For Lancer: Get and use mac_list to obtain mac address.
2650           */
2651         if (!be_physfn(adapter)) {
2652                 if (lancer_chip(adapter))
2653                         status = be_add_mac_from_list(adapter, mac);
2654                 else
2655                         status = be_cmd_mac_addr_query(adapter, mac,
2656                                         MAC_ADDRESS_TYPE_NETWORK, false,
2657                                         adapter->if_handle, 0);
2658                 if (!status) {
2659                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2660                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2661                 }
2662         }
2663
2664         status = be_tx_qs_create(adapter);
2665         if (status)
2666                 goto err;
2667
2668         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2669
2670         status = be_vid_config(adapter, false, 0);
2671         if (status)
2672                 goto err;
2673
2674         be_set_rx_mode(adapter->netdev);
2675
2676         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2677         /* For Lancer: It is legal for this cmd to fail on VF */
2678         if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2679                 goto err;
2680
2681         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2682                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2683                                         adapter->rx_fc);
2684                 /* For Lancer: It is legal for this cmd to fail on VF */
2685                 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2686                         goto err;
2687         }
2688
2689         pcie_set_readrq(adapter->pdev, 4096);
2690
2691         if (sriov_enabled(adapter)) {
2692                 status = be_vf_setup(adapter);
2693                 if (status)
2694                         goto err;
2695         }
2696
2697         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2698         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2699
2700         return 0;
2701 err:
2702         be_clear(adapter);
2703         return status;
2704 }
2705
2706 #ifdef CONFIG_NET_POLL_CONTROLLER
2707 static void be_netpoll(struct net_device *netdev)
2708 {
2709         struct be_adapter *adapter = netdev_priv(netdev);
2710         struct be_eq_obj *eqo;
2711         int i;
2712
2713         for_all_evt_queues(adapter, eqo, i)
2714                 event_handle(eqo);
2715
2716         return;
2717 }
2718 #endif
2719
2720 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2721 static bool be_flash_redboot(struct be_adapter *adapter,
2722                         const u8 *p, u32 img_start, int image_size,
2723                         int hdr_size)
2724 {
2725         u32 crc_offset;
2726         u8 flashed_crc[4];
2727         int status;
2728
2729         crc_offset = hdr_size + img_start + image_size - 4;
2730
2731         p += crc_offset;
2732
2733         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2734                         (image_size - 4));
2735         if (status) {
2736                 dev_err(&adapter->pdev->dev,
2737                 "could not get crc from flash, not flashing redboot\n");
2738                 return false;
2739         }
2740
2741         /*update redboot only if crc does not match*/
2742         if (!memcmp(flashed_crc, p, 4))
2743                 return false;
2744         else
2745                 return true;
2746 }
2747
2748 static bool phy_flashing_required(struct be_adapter *adapter)
2749 {
2750         int status = 0;
2751         struct be_phy_info phy_info;
2752
2753         status = be_cmd_get_phy_info(adapter, &phy_info);
2754         if (status)
2755                 return false;
2756         if ((phy_info.phy_type == TN_8022) &&
2757                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2758                 return true;
2759         }
2760         return false;
2761 }
2762
2763 static int be_flash_data(struct be_adapter *adapter,
2764                         const struct firmware *fw,
2765                         struct be_dma_mem *flash_cmd, int num_of_images)
2766
2767 {
2768         int status = 0, i, filehdr_size = 0;
2769         u32 total_bytes = 0, flash_op;
2770         int num_bytes;
2771         const u8 *p = fw->data;
2772         struct be_cmd_write_flashrom *req = flash_cmd->va;
2773         const struct flash_comp *pflashcomp;
2774         int num_comp;
2775
2776         static const struct flash_comp gen3_flash_types[10] = {
2777                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2778                         FLASH_IMAGE_MAX_SIZE_g3},
2779                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2780                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2781                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2782                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2783                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2784                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2785                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2786                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2787                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2788                         FLASH_IMAGE_MAX_SIZE_g3},
2789                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2790                         FLASH_IMAGE_MAX_SIZE_g3},
2791                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2792                         FLASH_IMAGE_MAX_SIZE_g3},
2793                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2794                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2795                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2796                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2797         };
2798         static const struct flash_comp gen2_flash_types[8] = {
2799                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2800                         FLASH_IMAGE_MAX_SIZE_g2},
2801                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2802                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2803                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2804                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2805                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2806                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2807                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2808                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2809                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2810                         FLASH_IMAGE_MAX_SIZE_g2},
2811                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2812                         FLASH_IMAGE_MAX_SIZE_g2},
2813                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2814                          FLASH_IMAGE_MAX_SIZE_g2}
2815         };
2816
2817         if (adapter->generation == BE_GEN3) {
2818                 pflashcomp = gen3_flash_types;
2819                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2820                 num_comp = ARRAY_SIZE(gen3_flash_types);
2821         } else {
2822                 pflashcomp = gen2_flash_types;
2823                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2824                 num_comp = ARRAY_SIZE(gen2_flash_types);
2825         }
2826         for (i = 0; i < num_comp; i++) {
2827                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2828                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2829                         continue;
2830                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2831                         if (!phy_flashing_required(adapter))
2832                                 continue;
2833                 }
2834                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2835                         (!be_flash_redboot(adapter, fw->data,
2836                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2837                         (num_of_images * sizeof(struct image_hdr)))))
2838                         continue;
2839                 p = fw->data;
2840                 p += filehdr_size + pflashcomp[i].offset
2841                         + (num_of_images * sizeof(struct image_hdr));
2842                 if (p + pflashcomp[i].size > fw->data + fw->size)
2843                         return -1;
2844                 total_bytes = pflashcomp[i].size;
2845                 while (total_bytes) {
2846                         if (total_bytes > 32*1024)
2847                                 num_bytes = 32*1024;
2848                         else
2849                                 num_bytes = total_bytes;
2850                         total_bytes -= num_bytes;
2851                         if (!total_bytes) {
2852                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2853                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2854                                 else
2855                                         flash_op = FLASHROM_OPER_FLASH;
2856                         } else {
2857                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2858                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2859                                 else
2860                                         flash_op = FLASHROM_OPER_SAVE;
2861                         }
2862                         memcpy(req->params.data_buf, p, num_bytes);
2863                         p += num_bytes;
2864                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2865                                 pflashcomp[i].optype, flash_op, num_bytes);
2866                         if (status) {
2867                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2868                                         (pflashcomp[i].optype ==
2869                                                 IMG_TYPE_PHY_FW))
2870                                         break;
2871                                 dev_err(&adapter->pdev->dev,
2872                                         "cmd to write to flash rom failed.\n");
2873                                 return -1;
2874                         }
2875                 }
2876         }
2877         return 0;
2878 }
2879
2880 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2881 {
2882         if (fhdr == NULL)
2883                 return 0;
2884         if (fhdr->build[0] == '3')
2885                 return BE_GEN3;
2886         else if (fhdr->build[0] == '2')
2887                 return BE_GEN2;
2888         else
2889                 return 0;
2890 }
2891
2892 static int lancer_fw_download(struct be_adapter *adapter,
2893                                 const struct firmware *fw)
2894 {
2895 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2896 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2897         struct be_dma_mem flash_cmd;
2898         const u8 *data_ptr = NULL;
2899         u8 *dest_image_ptr = NULL;
2900         size_t image_size = 0;
2901         u32 chunk_size = 0;
2902         u32 data_written = 0;
2903         u32 offset = 0;
2904         int status = 0;
2905         u8 add_status = 0;
2906
2907         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2908                 dev_err(&adapter->pdev->dev,
2909                         "FW Image not properly aligned. "
2910                         "Length must be 4 byte aligned.\n");
2911                 status = -EINVAL;
2912                 goto lancer_fw_exit;
2913         }
2914
2915         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2916                                 + LANCER_FW_DOWNLOAD_CHUNK;
2917         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2918                                                 &flash_cmd.dma, GFP_KERNEL);
2919         if (!flash_cmd.va) {
2920                 status = -ENOMEM;
2921                 dev_err(&adapter->pdev->dev,
2922                         "Memory allocation failure while flashing\n");
2923                 goto lancer_fw_exit;
2924         }
2925
2926         dest_image_ptr = flash_cmd.va +
2927                                 sizeof(struct lancer_cmd_req_write_object);
2928         image_size = fw->size;
2929         data_ptr = fw->data;
2930
2931         while (image_size) {
2932                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2933
2934                 /* Copy the image chunk content. */
2935                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2936
2937                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2938                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2939                                 &data_written, &add_status);
2940
2941                 if (status)
2942                         break;
2943
2944                 offset += data_written;
2945                 data_ptr += data_written;
2946                 image_size -= data_written;
2947         }
2948
2949         if (!status) {
2950                 /* Commit the FW written */
2951                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2952                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2953                                         &data_written, &add_status);
2954         }
2955
2956         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2957                                 flash_cmd.dma);
2958         if (status) {
2959                 dev_err(&adapter->pdev->dev,
2960                         "Firmware load error. "
2961                         "Status code: 0x%x Additional Status: 0x%x\n",
2962                         status, add_status);
2963                 goto lancer_fw_exit;
2964         }
2965
2966         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2967 lancer_fw_exit:
2968         return status;
2969 }
2970
2971 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2972 {
2973         struct flash_file_hdr_g2 *fhdr;
2974         struct flash_file_hdr_g3 *fhdr3;
2975         struct image_hdr *img_hdr_ptr = NULL;
2976         struct be_dma_mem flash_cmd;
2977         const u8 *p;
2978         int status = 0, i = 0, num_imgs = 0;
2979
2980         p = fw->data;
2981         fhdr = (struct flash_file_hdr_g2 *) p;
2982
2983         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2984         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2985                                           &flash_cmd.dma, GFP_KERNEL);
2986         if (!flash_cmd.va) {
2987                 status = -ENOMEM;
2988                 dev_err(&adapter->pdev->dev,
2989                         "Memory allocation failure while flashing\n");
2990                 goto be_fw_exit;
2991         }
2992
2993         if ((adapter->generation == BE_GEN3) &&
2994                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2995                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2996                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2997                 for (i = 0; i < num_imgs; i++) {
2998                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2999                                         (sizeof(struct flash_file_hdr_g3) +
3000                                          i * sizeof(struct image_hdr)));
3001                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3002                                 status = be_flash_data(adapter, fw, &flash_cmd,
3003                                                         num_imgs);
3004                 }
3005         } else if ((adapter->generation == BE_GEN2) &&
3006                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3007                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3008         } else {
3009                 dev_err(&adapter->pdev->dev,
3010                         "UFI and Interface are not compatible for flashing\n");
3011                 status = -1;
3012         }
3013
3014         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3015                           flash_cmd.dma);
3016         if (status) {
3017                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3018                 goto be_fw_exit;
3019         }
3020
3021         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3022
3023 be_fw_exit:
3024         return status;
3025 }
3026
3027 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3028 {
3029         const struct firmware *fw;
3030         int status;
3031
3032         if (!netif_running(adapter->netdev)) {
3033                 dev_err(&adapter->pdev->dev,
3034                         "Firmware load not allowed (interface is down)\n");
3035                 return -1;
3036         }
3037
3038         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3039         if (status)
3040                 goto fw_exit;
3041
3042         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3043
3044         if (lancer_chip(adapter))
3045                 status = lancer_fw_download(adapter, fw);
3046         else
3047                 status = be_fw_download(adapter, fw);
3048
3049 fw_exit:
3050         release_firmware(fw);
3051         return status;
3052 }
3053
3054 static const struct net_device_ops be_netdev_ops = {
3055         .ndo_open               = be_open,
3056         .ndo_stop               = be_close,
3057         .ndo_start_xmit         = be_xmit,
3058         .ndo_set_rx_mode        = be_set_rx_mode,
3059         .ndo_set_mac_address    = be_mac_addr_set,
3060         .ndo_change_mtu         = be_change_mtu,
3061         .ndo_get_stats64        = be_get_stats64,
3062         .ndo_validate_addr      = eth_validate_addr,
3063         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3064         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3065         .ndo_set_vf_mac         = be_set_vf_mac,
3066         .ndo_set_vf_vlan        = be_set_vf_vlan,
3067         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3068         .ndo_get_vf_config      = be_get_vf_config,
3069 #ifdef CONFIG_NET_POLL_CONTROLLER
3070         .ndo_poll_controller    = be_netpoll,
3071 #endif
3072 };
3073
3074 static void be_netdev_init(struct net_device *netdev)
3075 {
3076         struct be_adapter *adapter = netdev_priv(netdev);
3077         struct be_eq_obj *eqo;
3078         int i;
3079
3080         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3081                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3082                 NETIF_F_HW_VLAN_TX;
3083         if (be_multi_rxq(adapter))
3084                 netdev->hw_features |= NETIF_F_RXHASH;
3085
3086         netdev->features |= netdev->hw_features |
3087                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3088
3089         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3090                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3091
3092         netdev->priv_flags |= IFF_UNICAST_FLT;
3093
3094         netdev->flags |= IFF_MULTICAST;
3095
3096         netif_set_gso_max_size(netdev, 65535);
3097
3098         netdev->netdev_ops = &be_netdev_ops;
3099
3100         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3101
3102         for_all_evt_queues(adapter, eqo, i)
3103                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3104 }
3105
3106 static void be_unmap_pci_bars(struct be_adapter *adapter)
3107 {
3108         if (adapter->csr)
3109                 iounmap(adapter->csr);
3110         if (adapter->db)
3111                 iounmap(adapter->db);
3112 }
3113
3114 static int be_map_pci_bars(struct be_adapter *adapter)
3115 {
3116         u8 __iomem *addr;
3117         int db_reg;
3118
3119         if (lancer_chip(adapter)) {
3120                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3121                         pci_resource_len(adapter->pdev, 0));
3122                 if (addr == NULL)
3123                         return -ENOMEM;
3124                 adapter->db = addr;
3125                 return 0;
3126         }
3127
3128         if (be_physfn(adapter)) {
3129                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3130                                 pci_resource_len(adapter->pdev, 2));
3131                 if (addr == NULL)
3132                         return -ENOMEM;
3133                 adapter->csr = addr;
3134         }
3135
3136         if (adapter->generation == BE_GEN2) {
3137                 db_reg = 4;
3138         } else {
3139                 if (be_physfn(adapter))
3140                         db_reg = 4;
3141                 else
3142                         db_reg = 0;
3143         }
3144         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3145                                 pci_resource_len(adapter->pdev, db_reg));
3146         if (addr == NULL)
3147                 goto pci_map_err;
3148         adapter->db = addr;
3149
3150         return 0;
3151 pci_map_err:
3152         be_unmap_pci_bars(adapter);
3153         return -ENOMEM;
3154 }
3155
3156
3157 static void be_ctrl_cleanup(struct be_adapter *adapter)
3158 {
3159         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3160
3161         be_unmap_pci_bars(adapter);
3162
3163         if (mem->va)
3164                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3165                                   mem->dma);
3166
3167         mem = &adapter->rx_filter;
3168         if (mem->va)
3169                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3170                                   mem->dma);
3171 }
3172
3173 static int be_ctrl_init(struct be_adapter *adapter)
3174 {
3175         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3176         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3177         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3178         int status;
3179
3180         status = be_map_pci_bars(adapter);
3181         if (status)
3182                 goto done;
3183
3184         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3185         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3186                                                 mbox_mem_alloc->size,
3187                                                 &mbox_mem_alloc->dma,
3188                                                 GFP_KERNEL);
3189         if (!mbox_mem_alloc->va) {
3190                 status = -ENOMEM;
3191                 goto unmap_pci_bars;
3192         }
3193         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3194         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3195         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3196         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3197
3198         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3199         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3200                                         &rx_filter->dma, GFP_KERNEL);
3201         if (rx_filter->va == NULL) {
3202                 status = -ENOMEM;
3203                 goto free_mbox;
3204         }
3205         memset(rx_filter->va, 0, rx_filter->size);
3206
3207         mutex_init(&adapter->mbox_lock);
3208         spin_lock_init(&adapter->mcc_lock);
3209         spin_lock_init(&adapter->mcc_cq_lock);
3210
3211         init_completion(&adapter->flash_compl);
3212         pci_save_state(adapter->pdev);
3213         return 0;
3214
3215 free_mbox:
3216         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3217                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3218
3219 unmap_pci_bars:
3220         be_unmap_pci_bars(adapter);
3221
3222 done:
3223         return status;
3224 }
3225
3226 static void be_stats_cleanup(struct be_adapter *adapter)
3227 {
3228         struct be_dma_mem *cmd = &adapter->stats_cmd;
3229
3230         if (cmd->va)
3231                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3232                                   cmd->va, cmd->dma);
3233 }
3234
3235 static int be_stats_init(struct be_adapter *adapter)
3236 {
3237         struct be_dma_mem *cmd = &adapter->stats_cmd;
3238
3239         if (adapter->generation == BE_GEN2) {
3240                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3241         } else {
3242                 if (lancer_chip(adapter))
3243                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3244                 else
3245                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3246         }
3247         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3248                                      GFP_KERNEL);
3249         if (cmd->va == NULL)
3250                 return -1;
3251         memset(cmd->va, 0, cmd->size);
3252         return 0;
3253 }
3254
3255 static void __devexit be_remove(struct pci_dev *pdev)
3256 {
3257         struct be_adapter *adapter = pci_get_drvdata(pdev);
3258
3259         if (!adapter)
3260                 return;
3261
3262         unregister_netdev(adapter->netdev);
3263
3264         be_clear(adapter);
3265
3266         be_stats_cleanup(adapter);
3267
3268         be_ctrl_cleanup(adapter);
3269
3270         be_sriov_disable(adapter);
3271
3272         pci_set_drvdata(pdev, NULL);
3273         pci_release_regions(pdev);
3274         pci_disable_device(pdev);
3275
3276         free_netdev(adapter->netdev);
3277 }
3278
3279 bool be_is_wol_supported(struct be_adapter *adapter)
3280 {
3281         return ((adapter->wol_cap & BE_WOL_CAP) &&
3282                 !be_is_wol_excluded(adapter)) ? true : false;
3283 }
3284
3285 static int be_get_config(struct be_adapter *adapter)
3286 {
3287         int status;
3288
3289         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3290                         &adapter->function_mode, &adapter->function_caps);
3291         if (status)
3292                 return status;
3293
3294         if (adapter->function_mode & FLEX10_MODE)
3295                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3296         else
3297                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3298
3299         if (be_physfn(adapter))
3300                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3301         else
3302                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3303
3304         /* primary mac needs 1 pmac entry */
3305         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3306                                   sizeof(u32), GFP_KERNEL);
3307         if (!adapter->pmac_id)
3308                 return -ENOMEM;
3309
3310         status = be_cmd_get_cntl_attributes(adapter);
3311         if (status)
3312                 return status;
3313
3314         status = be_cmd_get_acpi_wol_cap(adapter);
3315         if (status) {
3316                 /* in case of a failure to get wol capabillities
3317                  * check the exclusion list to determine WOL capability */
3318                 if (!be_is_wol_excluded(adapter))
3319                         adapter->wol_cap |= BE_WOL_CAP;
3320         }
3321
3322         if (be_is_wol_supported(adapter))
3323                 adapter->wol = true;
3324
3325         return 0;
3326 }
3327
3328 static int be_dev_family_check(struct be_adapter *adapter)
3329 {
3330         struct pci_dev *pdev = adapter->pdev;
3331         u32 sli_intf = 0, if_type;
3332
3333         switch (pdev->device) {
3334         case BE_DEVICE_ID1:
3335         case OC_DEVICE_ID1:
3336                 adapter->generation = BE_GEN2;
3337                 break;
3338         case BE_DEVICE_ID2:
3339         case OC_DEVICE_ID2:
3340         case OC_DEVICE_ID5:
3341                 adapter->generation = BE_GEN3;
3342                 break;
3343         case OC_DEVICE_ID3:
3344         case OC_DEVICE_ID4:
3345                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3346                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3347                                                 SLI_INTF_IF_TYPE_SHIFT;
3348
3349                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3350                         if_type != 0x02) {
3351                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3352                         return -EINVAL;
3353                 }
3354                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3355                                          SLI_INTF_FAMILY_SHIFT);
3356                 adapter->generation = BE_GEN3;
3357                 break;
3358         default:
3359                 adapter->generation = 0;
3360         }
3361         return 0;
3362 }
3363
3364 static int lancer_wait_ready(struct be_adapter *adapter)
3365 {
3366 #define SLIPORT_READY_TIMEOUT 30
3367         u32 sliport_status;
3368         int status = 0, i;
3369
3370         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3371                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3372                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3373                         break;
3374
3375                 msleep(1000);
3376         }
3377
3378         if (i == SLIPORT_READY_TIMEOUT)
3379                 status = -1;
3380
3381         return status;
3382 }
3383
3384 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3385 {
3386         int status;
3387         u32 sliport_status, err, reset_needed;
3388         status = lancer_wait_ready(adapter);
3389         if (!status) {
3390                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3391                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3392                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3393                 if (err && reset_needed) {
3394                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3395                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3396
3397                         /* check adapter has corrected the error */
3398                         status = lancer_wait_ready(adapter);
3399                         sliport_status = ioread32(adapter->db +
3400                                                         SLIPORT_STATUS_OFFSET);
3401                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3402                                                 SLIPORT_STATUS_RN_MASK);
3403                         if (status || sliport_status)
3404                                 status = -1;
3405                 } else if (err || reset_needed) {
3406                         status = -1;
3407                 }
3408         }
3409         return status;
3410 }
3411
3412 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3413 {
3414         int status;
3415         u32 sliport_status;
3416
3417         if (adapter->eeh_err || adapter->ue_detected)
3418                 return;
3419
3420         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3421
3422         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3423                 dev_err(&adapter->pdev->dev,
3424                                 "Adapter in error state."
3425                                 "Trying to recover.\n");
3426
3427                 status = lancer_test_and_set_rdy_state(adapter);
3428                 if (status)
3429                         goto err;
3430
3431                 netif_device_detach(adapter->netdev);
3432
3433                 if (netif_running(adapter->netdev))
3434                         be_close(adapter->netdev);
3435
3436                 be_clear(adapter);
3437
3438                 adapter->fw_timeout = false;
3439
3440                 status = be_setup(adapter);
3441                 if (status)
3442                         goto err;
3443
3444                 if (netif_running(adapter->netdev)) {
3445                         status = be_open(adapter->netdev);
3446                         if (status)
3447                                 goto err;
3448                 }
3449
3450                 netif_device_attach(adapter->netdev);
3451
3452                 dev_err(&adapter->pdev->dev,
3453                                 "Adapter error recovery succeeded\n");
3454         }
3455         return;
3456 err:
3457         dev_err(&adapter->pdev->dev,
3458                         "Adapter error recovery failed\n");
3459 }
3460
3461 static void be_worker(struct work_struct *work)
3462 {
3463         struct be_adapter *adapter =
3464                 container_of(work, struct be_adapter, work.work);
3465         struct be_rx_obj *rxo;
3466         struct be_eq_obj *eqo;
3467         int i;
3468
3469         if (lancer_chip(adapter))
3470                 lancer_test_and_recover_fn_err(adapter);
3471
3472         be_detect_dump_ue(adapter);
3473
3474         /* when interrupts are not yet enabled, just reap any pending
3475         * mcc completions */
3476         if (!netif_running(adapter->netdev)) {
3477                 be_process_mcc(adapter);
3478                 goto reschedule;
3479         }
3480
3481         if (!adapter->stats_cmd_sent) {
3482                 if (lancer_chip(adapter))
3483                         lancer_cmd_get_pport_stats(adapter,
3484                                                 &adapter->stats_cmd);
3485                 else
3486                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3487         }
3488
3489         for_all_rx_queues(adapter, rxo, i) {
3490                 if (rxo->rx_post_starved) {
3491                         rxo->rx_post_starved = false;
3492                         be_post_rx_frags(rxo, GFP_KERNEL);
3493                 }
3494         }
3495
3496         for_all_evt_queues(adapter, eqo, i)
3497                 be_eqd_update(adapter, eqo);
3498
3499 reschedule:
3500         adapter->work_counter++;
3501         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3502 }
3503
3504 static int __devinit be_probe(struct pci_dev *pdev,
3505                         const struct pci_device_id *pdev_id)
3506 {
3507         int status = 0;
3508         struct be_adapter *adapter;
3509         struct net_device *netdev;
3510
3511         status = pci_enable_device(pdev);
3512         if (status)
3513                 goto do_none;
3514
3515         status = pci_request_regions(pdev, DRV_NAME);
3516         if (status)
3517                 goto disable_dev;
3518         pci_set_master(pdev);
3519
3520         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3521         if (netdev == NULL) {
3522                 status = -ENOMEM;
3523                 goto rel_reg;
3524         }
3525         adapter = netdev_priv(netdev);
3526         adapter->pdev = pdev;
3527         pci_set_drvdata(pdev, adapter);
3528
3529         status = be_dev_family_check(adapter);
3530         if (status)
3531                 goto free_netdev;
3532
3533         adapter->netdev = netdev;
3534         SET_NETDEV_DEV(netdev, &pdev->dev);
3535
3536         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3537         if (!status) {
3538                 netdev->features |= NETIF_F_HIGHDMA;
3539         } else {
3540                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3541                 if (status) {
3542                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3543                         goto free_netdev;
3544                 }
3545         }
3546
3547         status = be_sriov_enable(adapter);
3548         if (status)
3549                 goto free_netdev;
3550
3551         status = be_ctrl_init(adapter);
3552         if (status)
3553                 goto disable_sriov;
3554
3555         if (lancer_chip(adapter)) {
3556                 status = lancer_wait_ready(adapter);
3557                 if (!status) {
3558                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3559                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3560                         status = lancer_test_and_set_rdy_state(adapter);
3561                 }
3562                 if (status) {
3563                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3564                         goto ctrl_clean;
3565                 }
3566         }
3567
3568         /* sync up with fw's ready state */
3569         if (be_physfn(adapter)) {
3570                 status = be_cmd_POST(adapter);
3571                 if (status)
3572                         goto ctrl_clean;
3573         }
3574
3575         /* tell fw we're ready to fire cmds */
3576         status = be_cmd_fw_init(adapter);
3577         if (status)
3578                 goto ctrl_clean;
3579
3580         status = be_cmd_reset_function(adapter);
3581         if (status)
3582                 goto ctrl_clean;
3583
3584         /* The INTR bit may be set in the card when probed by a kdump kernel
3585          * after a crash.
3586          */
3587         if (!lancer_chip(adapter))
3588                 be_intr_set(adapter, false);
3589
3590         status = be_stats_init(adapter);
3591         if (status)
3592                 goto ctrl_clean;
3593
3594         status = be_get_config(adapter);
3595         if (status)
3596                 goto stats_clean;
3597
3598         INIT_DELAYED_WORK(&adapter->work, be_worker);
3599         adapter->rx_fc = adapter->tx_fc = true;
3600
3601         status = be_setup(adapter);
3602         if (status)
3603                 goto msix_disable;
3604
3605         be_netdev_init(netdev);
3606         status = register_netdev(netdev);
3607         if (status != 0)
3608                 goto unsetup;
3609
3610         dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3611                 adapter->port_num);
3612
3613         return 0;
3614
3615 unsetup:
3616         be_clear(adapter);
3617 msix_disable:
3618         be_msix_disable(adapter);
3619 stats_clean:
3620         be_stats_cleanup(adapter);
3621 ctrl_clean:
3622         be_ctrl_cleanup(adapter);
3623 disable_sriov:
3624         be_sriov_disable(adapter);
3625 free_netdev:
3626         free_netdev(netdev);
3627         pci_set_drvdata(pdev, NULL);
3628 rel_reg:
3629         pci_release_regions(pdev);
3630 disable_dev:
3631         pci_disable_device(pdev);
3632 do_none:
3633         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3634         return status;
3635 }
3636
3637 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3638 {
3639         struct be_adapter *adapter = pci_get_drvdata(pdev);
3640         struct net_device *netdev =  adapter->netdev;
3641
3642         if (adapter->wol)
3643                 be_setup_wol(adapter, true);
3644
3645         netif_device_detach(netdev);
3646         if (netif_running(netdev)) {
3647                 rtnl_lock();
3648                 be_close(netdev);
3649                 rtnl_unlock();
3650         }
3651         be_clear(adapter);
3652
3653         pci_save_state(pdev);
3654         pci_disable_device(pdev);
3655         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3656         return 0;
3657 }
3658
3659 static int be_resume(struct pci_dev *pdev)
3660 {
3661         int status = 0;
3662         struct be_adapter *adapter = pci_get_drvdata(pdev);
3663         struct net_device *netdev =  adapter->netdev;
3664
3665         netif_device_detach(netdev);
3666
3667         status = pci_enable_device(pdev);
3668         if (status)
3669                 return status;
3670
3671         pci_set_power_state(pdev, 0);
3672         pci_restore_state(pdev);
3673
3674         /* tell fw we're ready to fire cmds */
3675         status = be_cmd_fw_init(adapter);
3676         if (status)
3677                 return status;
3678
3679         be_setup(adapter);
3680         if (netif_running(netdev)) {
3681                 rtnl_lock();
3682                 be_open(netdev);
3683                 rtnl_unlock();
3684         }
3685         netif_device_attach(netdev);
3686
3687         if (adapter->wol)
3688                 be_setup_wol(adapter, false);
3689
3690         return 0;
3691 }
3692
3693 /*
3694  * An FLR will stop BE from DMAing any data.
3695  */
3696 static void be_shutdown(struct pci_dev *pdev)
3697 {
3698         struct be_adapter *adapter = pci_get_drvdata(pdev);
3699
3700         if (!adapter)
3701                 return;
3702
3703         cancel_delayed_work_sync(&adapter->work);
3704
3705         netif_device_detach(adapter->netdev);
3706
3707         if (adapter->wol)
3708                 be_setup_wol(adapter, true);
3709
3710         be_cmd_reset_function(adapter);
3711
3712         pci_disable_device(pdev);
3713 }
3714
3715 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3716                                 pci_channel_state_t state)
3717 {
3718         struct be_adapter *adapter = pci_get_drvdata(pdev);
3719         struct net_device *netdev =  adapter->netdev;
3720
3721         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3722
3723         adapter->eeh_err = true;
3724
3725         netif_device_detach(netdev);
3726
3727         if (netif_running(netdev)) {
3728                 rtnl_lock();
3729                 be_close(netdev);
3730                 rtnl_unlock();
3731         }
3732         be_clear(adapter);
3733
3734         if (state == pci_channel_io_perm_failure)
3735                 return PCI_ERS_RESULT_DISCONNECT;
3736
3737         pci_disable_device(pdev);
3738
3739         return PCI_ERS_RESULT_NEED_RESET;
3740 }
3741
3742 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3743 {
3744         struct be_adapter *adapter = pci_get_drvdata(pdev);
3745         int status;
3746
3747         dev_info(&adapter->pdev->dev, "EEH reset\n");
3748         adapter->eeh_err = false;
3749         adapter->ue_detected = false;
3750         adapter->fw_timeout = false;
3751
3752         status = pci_enable_device(pdev);
3753         if (status)
3754                 return PCI_ERS_RESULT_DISCONNECT;
3755
3756         pci_set_master(pdev);
3757         pci_set_power_state(pdev, 0);
3758         pci_restore_state(pdev);
3759
3760         /* Check if card is ok and fw is ready */
3761         status = be_cmd_POST(adapter);
3762         if (status)
3763                 return PCI_ERS_RESULT_DISCONNECT;
3764
3765         return PCI_ERS_RESULT_RECOVERED;
3766 }
3767
3768 static void be_eeh_resume(struct pci_dev *pdev)
3769 {
3770         int status = 0;
3771         struct be_adapter *adapter = pci_get_drvdata(pdev);
3772         struct net_device *netdev =  adapter->netdev;
3773
3774         dev_info(&adapter->pdev->dev, "EEH resume\n");
3775
3776         pci_save_state(pdev);
3777
3778         /* tell fw we're ready to fire cmds */
3779         status = be_cmd_fw_init(adapter);
3780         if (status)
3781                 goto err;
3782
3783         status = be_setup(adapter);
3784         if (status)
3785                 goto err;
3786
3787         if (netif_running(netdev)) {
3788                 status = be_open(netdev);
3789                 if (status)
3790                         goto err;
3791         }
3792         netif_device_attach(netdev);
3793         return;
3794 err:
3795         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3796 }
3797
3798 static struct pci_error_handlers be_eeh_handlers = {
3799         .error_detected = be_eeh_err_detected,
3800         .slot_reset = be_eeh_reset,
3801         .resume = be_eeh_resume,
3802 };
3803
3804 static struct pci_driver be_driver = {
3805         .name = DRV_NAME,
3806         .id_table = be_dev_ids,
3807         .probe = be_probe,
3808         .remove = be_remove,
3809         .suspend = be_suspend,
3810         .resume = be_resume,
3811         .shutdown = be_shutdown,
3812         .err_handler = &be_eeh_handlers
3813 };
3814
3815 static int __init be_init_module(void)
3816 {
3817         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3818             rx_frag_size != 2048) {
3819                 printk(KERN_WARNING DRV_NAME
3820                         " : Module param rx_frag_size must be 2048/4096/8192."
3821                         " Using 2048\n");
3822                 rx_frag_size = 2048;
3823         }
3824
3825         return pci_register_driver(&be_driver);
3826 }
3827 module_init(be_init_module);
3828
3829 static void __exit be_exit_module(void)
3830 {
3831         pci_unregister_driver(&be_driver);
3832 }
3833 module_exit(be_exit_module);