vlan: Don't check for vlan group before vlan_tx_tag_present.
[linux-2.6.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static char *ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC"
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 static inline bool be_multi_rxq(struct be_adapter *adapter)
119 {
120         return (adapter->num_rx_qs > 1);
121 }
122
123 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
124 {
125         struct be_dma_mem *mem = &q->dma_mem;
126         if (mem->va)
127                 pci_free_consistent(adapter->pdev, mem->size,
128                         mem->va, mem->dma);
129 }
130
131 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132                 u16 len, u16 entry_size)
133 {
134         struct be_dma_mem *mem = &q->dma_mem;
135
136         memset(q, 0, sizeof(*q));
137         q->len = len;
138         q->entry_size = entry_size;
139         mem->size = len * entry_size;
140         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141         if (!mem->va)
142                 return -1;
143         memset(mem->va, 0, mem->size);
144         return 0;
145 }
146
147 static void be_intr_set(struct be_adapter *adapter, bool enable)
148 {
149         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
150         u32 reg = ioread32(addr);
151         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153         if (adapter->eeh_err)
154                 return;
155
156         if (!enabled && enable)
157                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158         else if (enabled && !enable)
159                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160         else
161                 return;
162
163         iowrite32(reg, addr);
164 }
165
166 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
167 {
168         u32 val = 0;
169         val |= qid & DB_RQ_RING_ID_MASK;
170         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
171
172         wmb();
173         iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 }
175
176 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_TXULP_RING_ID_MASK;
180         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 }
185
186 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
187                 bool arm, bool clear_int, u16 num_popped)
188 {
189         u32 val = 0;
190         val |= qid & DB_EQ_RING_ID_MASK;
191
192         if (adapter->eeh_err)
193                 return;
194
195         if (arm)
196                 val |= 1 << DB_EQ_REARM_SHIFT;
197         if (clear_int)
198                 val |= 1 << DB_EQ_CLR_SHIFT;
199         val |= 1 << DB_EQ_EVNT_SHIFT;
200         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201         iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206         u32 val = 0;
207         val |= qid & DB_CQ_RING_ID_MASK;
208
209         if (adapter->eeh_err)
210                 return;
211
212         if (arm)
213                 val |= 1 << DB_CQ_REARM_SHIFT;
214         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_CQ_OFFSET);
216 }
217
218 static int be_mac_addr_set(struct net_device *netdev, void *p)
219 {
220         struct be_adapter *adapter = netdev_priv(netdev);
221         struct sockaddr *addr = p;
222         int status = 0;
223
224         if (!is_valid_ether_addr(addr->sa_data))
225                 return -EADDRNOTAVAIL;
226
227         /* MAC addr configuration will be done in hardware for VFs
228          * by their corresponding PFs. Just copy to netdev addr here
229          */
230         if (!be_physfn(adapter))
231                 goto netdev_addr;
232
233         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234         if (status)
235                 return status;
236
237         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238                         adapter->if_handle, &adapter->pmac_id);
239 netdev_addr:
240         if (!status)
241                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243         return status;
244 }
245
246 void netdev_stats_update(struct be_adapter *adapter)
247 {
248         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
249         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250         struct be_port_rxf_stats *port_stats =
251                         &rxf_stats->port[adapter->port_num];
252         struct net_device_stats *dev_stats = &adapter->netdev->stats;
253         struct be_erx_stats *erx_stats = &hw_stats->erx;
254         struct be_rx_obj *rxo;
255         int i;
256
257         memset(dev_stats, 0, sizeof(*dev_stats));
258         for_all_rx_queues(adapter, rxo, i) {
259                 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260                 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261                 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262                 /*  no space in linux buffers: best possible approximation */
263                 dev_stats->rx_dropped +=
264                         erx_stats->rx_drops_no_fragments[rxo->q.id];
265         }
266
267         dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268         dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
269
270         /* bad pkts received */
271         dev_stats->rx_errors = port_stats->rx_crc_errors +
272                 port_stats->rx_alignment_symbol_errors +
273                 port_stats->rx_in_range_errors +
274                 port_stats->rx_out_range_errors +
275                 port_stats->rx_frame_too_long +
276                 port_stats->rx_dropped_too_small +
277                 port_stats->rx_dropped_too_short +
278                 port_stats->rx_dropped_header_too_small +
279                 port_stats->rx_dropped_tcp_length +
280                 port_stats->rx_dropped_runt +
281                 port_stats->rx_tcp_checksum_errs +
282                 port_stats->rx_ip_checksum_errs +
283                 port_stats->rx_udp_checksum_errs;
284
285         /* detailed rx errors */
286         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
287                 port_stats->rx_out_range_errors +
288                 port_stats->rx_frame_too_long;
289
290         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
291
292         /* frame alignment errors */
293         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
294
295         /* receiver fifo overrun */
296         /* drops_no_pbuf is no per i/f, it's per BE card */
297         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298                                         port_stats->rx_input_fifo_overflow +
299                                         rxf_stats->rx_drops_no_pbuf;
300 }
301
302 void be_link_status_update(struct be_adapter *adapter, bool link_up)
303 {
304         struct net_device *netdev = adapter->netdev;
305
306         /* If link came up or went down */
307         if (adapter->link_up != link_up) {
308                 adapter->link_speed = -1;
309                 if (link_up) {
310                         netif_start_queue(netdev);
311                         netif_carrier_on(netdev);
312                         printk(KERN_INFO "%s: Link up\n", netdev->name);
313                 } else {
314                         netif_stop_queue(netdev);
315                         netif_carrier_off(netdev);
316                         printk(KERN_INFO "%s: Link down\n", netdev->name);
317                 }
318                 adapter->link_up = link_up;
319         }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325         struct be_eq_obj *rx_eq = &rxo->rx_eq;
326         struct be_rx_stats *stats = &rxo->stats;
327         ulong now = jiffies;
328         u32 eqd;
329
330         if (!rx_eq->enable_aic)
331                 return;
332
333         /* Wrapped around */
334         if (time_before(now, stats->rx_fps_jiffies)) {
335                 stats->rx_fps_jiffies = now;
336                 return;
337         }
338
339         /* Update once a second */
340         if ((now - stats->rx_fps_jiffies) < HZ)
341                 return;
342
343         stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344                         ((now - stats->rx_fps_jiffies) / HZ);
345
346         stats->rx_fps_jiffies = now;
347         stats->prev_rx_frags = stats->rx_frags;
348         eqd = stats->rx_fps / 110000;
349         eqd = eqd << 3;
350         if (eqd > rx_eq->max_eqd)
351                 eqd = rx_eq->max_eqd;
352         if (eqd < rx_eq->min_eqd)
353                 eqd = rx_eq->min_eqd;
354         if (eqd < 10)
355                 eqd = 0;
356         if (eqd != rx_eq->cur_eqd)
357                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359         rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364         u64 rate = bytes;
365
366         do_div(rate, ticks / HZ);
367         rate <<= 3;                     /* bytes/sec -> bits/sec */
368         do_div(rate, 1000000ul);        /* MB/Sec */
369
370         return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375         struct be_tx_stats *stats = tx_stats(adapter);
376         ulong now = jiffies;
377
378         /* Wrapped around? */
379         if (time_before(now, stats->be_tx_jiffies)) {
380                 stats->be_tx_jiffies = now;
381                 return;
382         }
383
384         /* Update tx rate once in two seconds */
385         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387                                                   - stats->be_tx_bytes_prev,
388                                                  now - stats->be_tx_jiffies);
389                 stats->be_tx_jiffies = now;
390                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391         }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397         struct be_tx_stats *stats = tx_stats(adapter);
398         stats->be_tx_reqs++;
399         stats->be_tx_wrbs += wrb_cnt;
400         stats->be_tx_bytes += copied;
401         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402         if (stopped)
403                 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
408 {
409         int cnt = (skb->len > skb->data_len);
410
411         cnt += skb_shinfo(skb)->nr_frags;
412
413         /* to account for hdr wrb */
414         cnt++;
415         if (cnt & 1) {
416                 /* add a dummy to make it an even num */
417                 cnt++;
418                 *dummy = true;
419         } else
420                 *dummy = false;
421         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422         return cnt;
423 }
424
425 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
426 {
427         wrb->frag_pa_hi = upper_32_bits(addr);
428         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
430 }
431
432 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
433                 u32 wrb_cnt, u32 len)
434 {
435         memset(hdr, 0, sizeof(*hdr));
436
437         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
438
439         if (skb_is_gso(skb)) {
440                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
441                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
442                         hdr, skb_shinfo(skb)->gso_size);
443                 if (skb_is_gso_v6(skb))
444                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
445         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
446                 if (is_tcp_pkt(skb))
447                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
448                 else if (is_udp_pkt(skb))
449                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
450         }
451
452         if (vlan_tx_tag_present(skb)) {
453                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
454                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
455                         hdr, vlan_tx_tag_get(skb));
456         }
457
458         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
459         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
460         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
461         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
462 }
463
464 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
465                 bool unmap_single)
466 {
467         dma_addr_t dma;
468
469         be_dws_le_to_cpu(wrb, sizeof(*wrb));
470
471         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
472         if (wrb->frag_len) {
473                 if (unmap_single)
474                         pci_unmap_single(pdev, dma, wrb->frag_len,
475                                 PCI_DMA_TODEVICE);
476                 else
477                         pci_unmap_page(pdev, dma, wrb->frag_len,
478                                 PCI_DMA_TODEVICE);
479         }
480 }
481
482 static int make_tx_wrbs(struct be_adapter *adapter,
483                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
484 {
485         dma_addr_t busaddr;
486         int i, copied = 0;
487         struct pci_dev *pdev = adapter->pdev;
488         struct sk_buff *first_skb = skb;
489         struct be_queue_info *txq = &adapter->tx_obj.q;
490         struct be_eth_wrb *wrb;
491         struct be_eth_hdr_wrb *hdr;
492         bool map_single = false;
493         u16 map_head;
494
495         hdr = queue_head_node(txq);
496         queue_head_inc(txq);
497         map_head = txq->head;
498
499         if (skb->len > skb->data_len) {
500                 int len = skb_headlen(skb);
501                 busaddr = pci_map_single(pdev, skb->data, len,
502                                          PCI_DMA_TODEVICE);
503                 if (pci_dma_mapping_error(pdev, busaddr))
504                         goto dma_err;
505                 map_single = true;
506                 wrb = queue_head_node(txq);
507                 wrb_fill(wrb, busaddr, len);
508                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
509                 queue_head_inc(txq);
510                 copied += len;
511         }
512
513         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
514                 struct skb_frag_struct *frag =
515                         &skb_shinfo(skb)->frags[i];
516                 busaddr = pci_map_page(pdev, frag->page,
517                                        frag->page_offset,
518                                        frag->size, PCI_DMA_TODEVICE);
519                 if (pci_dma_mapping_error(pdev, busaddr))
520                         goto dma_err;
521                 wrb = queue_head_node(txq);
522                 wrb_fill(wrb, busaddr, frag->size);
523                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
524                 queue_head_inc(txq);
525                 copied += frag->size;
526         }
527
528         if (dummy_wrb) {
529                 wrb = queue_head_node(txq);
530                 wrb_fill(wrb, 0, 0);
531                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532                 queue_head_inc(txq);
533         }
534
535         wrb_fill_hdr(hdr, first_skb, wrb_cnt, copied);
536         be_dws_cpu_to_le(hdr, sizeof(*hdr));
537
538         return copied;
539 dma_err:
540         txq->head = map_head;
541         while (copied) {
542                 wrb = queue_head_node(txq);
543                 unmap_tx_frag(pdev, wrb, map_single);
544                 map_single = false;
545                 copied -= wrb->frag_len;
546                 queue_head_inc(txq);
547         }
548         return 0;
549 }
550
551 static netdev_tx_t be_xmit(struct sk_buff *skb,
552                         struct net_device *netdev)
553 {
554         struct be_adapter *adapter = netdev_priv(netdev);
555         struct be_tx_obj *tx_obj = &adapter->tx_obj;
556         struct be_queue_info *txq = &tx_obj->q;
557         u32 wrb_cnt = 0, copied = 0;
558         u32 start = txq->head;
559         bool dummy_wrb, stopped = false;
560
561         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
562
563         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
564         if (copied) {
565                 /* record the sent skb in the sent_skb table */
566                 BUG_ON(tx_obj->sent_skb_list[start]);
567                 tx_obj->sent_skb_list[start] = skb;
568
569                 /* Ensure txq has space for the next skb; Else stop the queue
570                  * *BEFORE* ringing the tx doorbell, so that we serialze the
571                  * tx compls of the current transmit which'll wake up the queue
572                  */
573                 atomic_add(wrb_cnt, &txq->used);
574                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
575                                                                 txq->len) {
576                         netif_stop_queue(netdev);
577                         stopped = true;
578                 }
579
580                 be_txq_notify(adapter, txq->id, wrb_cnt);
581
582                 be_tx_stats_update(adapter, wrb_cnt, copied,
583                                 skb_shinfo(skb)->gso_segs, stopped);
584         } else {
585                 txq->head = start;
586                 dev_kfree_skb_any(skb);
587         }
588         return NETDEV_TX_OK;
589 }
590
591 static int be_change_mtu(struct net_device *netdev, int new_mtu)
592 {
593         struct be_adapter *adapter = netdev_priv(netdev);
594         if (new_mtu < BE_MIN_MTU ||
595                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
596                                         (ETH_HLEN + ETH_FCS_LEN))) {
597                 dev_info(&adapter->pdev->dev,
598                         "MTU must be between %d and %d bytes\n",
599                         BE_MIN_MTU,
600                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
601                 return -EINVAL;
602         }
603         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
604                         netdev->mtu, new_mtu);
605         netdev->mtu = new_mtu;
606         return 0;
607 }
608
609 /*
610  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
611  * If the user configures more, place BE in vlan promiscuous mode.
612  */
613 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
614 {
615         u16 vtag[BE_NUM_VLANS_SUPPORTED];
616         u16 ntags = 0, i;
617         int status = 0;
618         u32 if_handle;
619
620         if (vf) {
621                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
622                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
623                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
624         }
625
626         if (adapter->vlans_added <= adapter->max_vlans)  {
627                 /* Construct VLAN Table to give to HW */
628                 for (i = 0; i < VLAN_N_VID; i++) {
629                         if (adapter->vlan_tag[i]) {
630                                 vtag[ntags] = cpu_to_le16(i);
631                                 ntags++;
632                         }
633                 }
634                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
635                                         vtag, ntags, 1, 0);
636         } else {
637                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
638                                         NULL, 0, 1, 1);
639         }
640
641         return status;
642 }
643
644 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
645 {
646         struct be_adapter *adapter = netdev_priv(netdev);
647
648         adapter->vlan_grp = grp;
649 }
650
651 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
652 {
653         struct be_adapter *adapter = netdev_priv(netdev);
654
655         adapter->vlans_added++;
656         if (!be_physfn(adapter))
657                 return;
658
659         adapter->vlan_tag[vid] = 1;
660         if (adapter->vlans_added <= (adapter->max_vlans + 1))
661                 be_vid_config(adapter, false, 0);
662 }
663
664 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
665 {
666         struct be_adapter *adapter = netdev_priv(netdev);
667
668         adapter->vlans_added--;
669         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
670
671         if (!be_physfn(adapter))
672                 return;
673
674         adapter->vlan_tag[vid] = 0;
675         if (adapter->vlans_added <= adapter->max_vlans)
676                 be_vid_config(adapter, false, 0);
677 }
678
679 static void be_set_multicast_list(struct net_device *netdev)
680 {
681         struct be_adapter *adapter = netdev_priv(netdev);
682
683         if (netdev->flags & IFF_PROMISC) {
684                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
685                 adapter->promiscuous = true;
686                 goto done;
687         }
688
689         /* BE was previously in promiscous mode; disable it */
690         if (adapter->promiscuous) {
691                 adapter->promiscuous = false;
692                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
693         }
694
695         /* Enable multicast promisc if num configured exceeds what we support */
696         if (netdev->flags & IFF_ALLMULTI ||
697             netdev_mc_count(netdev) > BE_MAX_MC) {
698                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
699                                 &adapter->mc_cmd_mem);
700                 goto done;
701         }
702
703         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
704                 &adapter->mc_cmd_mem);
705 done:
706         return;
707 }
708
709 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
710 {
711         struct be_adapter *adapter = netdev_priv(netdev);
712         int status;
713
714         if (!adapter->sriov_enabled)
715                 return -EPERM;
716
717         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
718                 return -EINVAL;
719
720         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
721                 status = be_cmd_pmac_del(adapter,
722                                         adapter->vf_cfg[vf].vf_if_handle,
723                                         adapter->vf_cfg[vf].vf_pmac_id);
724
725         status = be_cmd_pmac_add(adapter, mac,
726                                 adapter->vf_cfg[vf].vf_if_handle,
727                                 &adapter->vf_cfg[vf].vf_pmac_id);
728
729         if (status)
730                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
731                                 mac, vf);
732         else
733                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
734
735         return status;
736 }
737
738 static int be_get_vf_config(struct net_device *netdev, int vf,
739                         struct ifla_vf_info *vi)
740 {
741         struct be_adapter *adapter = netdev_priv(netdev);
742
743         if (!adapter->sriov_enabled)
744                 return -EPERM;
745
746         if (vf >= num_vfs)
747                 return -EINVAL;
748
749         vi->vf = vf;
750         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
751         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
752         vi->qos = 0;
753         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
754
755         return 0;
756 }
757
758 static int be_set_vf_vlan(struct net_device *netdev,
759                         int vf, u16 vlan, u8 qos)
760 {
761         struct be_adapter *adapter = netdev_priv(netdev);
762         int status = 0;
763
764         if (!adapter->sriov_enabled)
765                 return -EPERM;
766
767         if ((vf >= num_vfs) || (vlan > 4095))
768                 return -EINVAL;
769
770         if (vlan) {
771                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
772                 adapter->vlans_added++;
773         } else {
774                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
775                 adapter->vlans_added--;
776         }
777
778         status = be_vid_config(adapter, true, vf);
779
780         if (status)
781                 dev_info(&adapter->pdev->dev,
782                                 "VLAN %d config on VF %d failed\n", vlan, vf);
783         return status;
784 }
785
786 static int be_set_vf_tx_rate(struct net_device *netdev,
787                         int vf, int rate)
788 {
789         struct be_adapter *adapter = netdev_priv(netdev);
790         int status = 0;
791
792         if (!adapter->sriov_enabled)
793                 return -EPERM;
794
795         if ((vf >= num_vfs) || (rate < 0))
796                 return -EINVAL;
797
798         if (rate > 10000)
799                 rate = 10000;
800
801         adapter->vf_cfg[vf].vf_tx_rate = rate;
802         status = be_cmd_set_qos(adapter, rate / 10, vf);
803
804         if (status)
805                 dev_info(&adapter->pdev->dev,
806                                 "tx rate %d on VF %d failed\n", rate, vf);
807         return status;
808 }
809
810 static void be_rx_rate_update(struct be_rx_obj *rxo)
811 {
812         struct be_rx_stats *stats = &rxo->stats;
813         ulong now = jiffies;
814
815         /* Wrapped around */
816         if (time_before(now, stats->rx_jiffies)) {
817                 stats->rx_jiffies = now;
818                 return;
819         }
820
821         /* Update the rate once in two seconds */
822         if ((now - stats->rx_jiffies) < 2 * HZ)
823                 return;
824
825         stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
826                                 now - stats->rx_jiffies);
827         stats->rx_jiffies = now;
828         stats->rx_bytes_prev = stats->rx_bytes;
829 }
830
831 static void be_rx_stats_update(struct be_rx_obj *rxo,
832                 u32 pktsize, u16 numfrags, u8 pkt_type)
833 {
834         struct be_rx_stats *stats = &rxo->stats;
835
836         stats->rx_compl++;
837         stats->rx_frags += numfrags;
838         stats->rx_bytes += pktsize;
839         stats->rx_pkts++;
840         if (pkt_type == BE_MULTICAST_PACKET)
841                 stats->rx_mcast_pkts++;
842 }
843
844 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
845 {
846         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
847
848         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
849         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
850         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
851         if (ip_version) {
852                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
853                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
854         }
855         ipv6_chk = (ip_version && (tcpf || udpf));
856
857         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
858 }
859
860 static struct be_rx_page_info *
861 get_rx_page_info(struct be_adapter *adapter,
862                 struct be_rx_obj *rxo,
863                 u16 frag_idx)
864 {
865         struct be_rx_page_info *rx_page_info;
866         struct be_queue_info *rxq = &rxo->q;
867
868         rx_page_info = &rxo->page_info_tbl[frag_idx];
869         BUG_ON(!rx_page_info->page);
870
871         if (rx_page_info->last_page_user) {
872                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
873                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
874                 rx_page_info->last_page_user = false;
875         }
876
877         atomic_dec(&rxq->used);
878         return rx_page_info;
879 }
880
881 /* Throwaway the data in the Rx completion */
882 static void be_rx_compl_discard(struct be_adapter *adapter,
883                 struct be_rx_obj *rxo,
884                 struct be_eth_rx_compl *rxcp)
885 {
886         struct be_queue_info *rxq = &rxo->q;
887         struct be_rx_page_info *page_info;
888         u16 rxq_idx, i, num_rcvd;
889
890         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
891         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
892
893         for (i = 0; i < num_rcvd; i++) {
894                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
895                 put_page(page_info->page);
896                 memset(page_info, 0, sizeof(*page_info));
897                 index_inc(&rxq_idx, rxq->len);
898         }
899 }
900
901 /*
902  * skb_fill_rx_data forms a complete skb for an ether frame
903  * indicated by rxcp.
904  */
905 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
906                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
907                         u16 num_rcvd)
908 {
909         struct be_queue_info *rxq = &rxo->q;
910         struct be_rx_page_info *page_info;
911         u16 rxq_idx, i, j;
912         u32 pktsize, hdr_len, curr_frag_len, size;
913         u8 *start;
914         u8 pkt_type;
915
916         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
917         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
918         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
919
920         page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921
922         start = page_address(page_info->page) + page_info->page_offset;
923         prefetch(start);
924
925         /* Copy data in the first descriptor of this completion */
926         curr_frag_len = min(pktsize, rx_frag_size);
927
928         /* Copy the header portion into skb_data */
929         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
930         memcpy(skb->data, start, hdr_len);
931         skb->len = curr_frag_len;
932         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
933                 /* Complete packet has now been moved to data */
934                 put_page(page_info->page);
935                 skb->data_len = 0;
936                 skb->tail += curr_frag_len;
937         } else {
938                 skb_shinfo(skb)->nr_frags = 1;
939                 skb_shinfo(skb)->frags[0].page = page_info->page;
940                 skb_shinfo(skb)->frags[0].page_offset =
941                                         page_info->page_offset + hdr_len;
942                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
943                 skb->data_len = curr_frag_len - hdr_len;
944                 skb->tail += hdr_len;
945         }
946         page_info->page = NULL;
947
948         if (pktsize <= rx_frag_size) {
949                 BUG_ON(num_rcvd != 1);
950                 goto done;
951         }
952
953         /* More frags present for this completion */
954         size = pktsize;
955         for (i = 1, j = 0; i < num_rcvd; i++) {
956                 size -= curr_frag_len;
957                 index_inc(&rxq_idx, rxq->len);
958                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
959
960                 curr_frag_len = min(size, rx_frag_size);
961
962                 /* Coalesce all frags from the same physical page in one slot */
963                 if (page_info->page_offset == 0) {
964                         /* Fresh page */
965                         j++;
966                         skb_shinfo(skb)->frags[j].page = page_info->page;
967                         skb_shinfo(skb)->frags[j].page_offset =
968                                                         page_info->page_offset;
969                         skb_shinfo(skb)->frags[j].size = 0;
970                         skb_shinfo(skb)->nr_frags++;
971                 } else {
972                         put_page(page_info->page);
973                 }
974
975                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976                 skb->len += curr_frag_len;
977                 skb->data_len += curr_frag_len;
978
979                 page_info->page = NULL;
980         }
981         BUG_ON(j > MAX_SKB_FRAGS);
982
983 done:
984         be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
985 }
986
987 /* Process the RX completion indicated by rxcp when GRO is disabled */
988 static void be_rx_compl_process(struct be_adapter *adapter,
989                         struct be_rx_obj *rxo,
990                         struct be_eth_rx_compl *rxcp)
991 {
992         struct sk_buff *skb;
993         u32 vlanf, vid;
994         u16 num_rcvd;
995         u8 vtm;
996
997         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
998         /* Is it a flush compl that has no data */
999         if (unlikely(num_rcvd == 0))
1000                 return;
1001
1002         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1003         if (unlikely(!skb)) {
1004                 if (net_ratelimit())
1005                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1006                 be_rx_compl_discard(adapter, rxo, rxcp);
1007                 return;
1008         }
1009
1010         skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1011
1012         if (do_pkt_csum(rxcp, adapter->rx_csum))
1013                 skb_checksum_none_assert(skb);
1014         else
1015                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1016
1017         skb->truesize = skb->len + sizeof(struct sk_buff);
1018         skb->protocol = eth_type_trans(skb, adapter->netdev);
1019
1020         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1021         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1022
1023         /* vlanf could be wrongly set in some cards.
1024          * ignore if vtm is not set */
1025         if ((adapter->function_mode & 0x400) && !vtm)
1026                 vlanf = 0;
1027
1028         if (unlikely(vlanf)) {
1029                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1030                         kfree_skb(skb);
1031                         return;
1032                 }
1033                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1034                 vid = swab16(vid);
1035                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1036         } else {
1037                 netif_receive_skb(skb);
1038         }
1039 }
1040
1041 /* Process the RX completion indicated by rxcp when GRO is enabled */
1042 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1043                 struct be_rx_obj *rxo,
1044                 struct be_eth_rx_compl *rxcp)
1045 {
1046         struct be_rx_page_info *page_info;
1047         struct sk_buff *skb = NULL;
1048         struct be_queue_info *rxq = &rxo->q;
1049         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1050         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1051         u16 i, rxq_idx = 0, vid, j;
1052         u8 vtm;
1053         u8 pkt_type;
1054
1055         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1056         /* Is it a flush compl that has no data */
1057         if (unlikely(num_rcvd == 0))
1058                 return;
1059
1060         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1061         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1062         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1063         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1064         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1065
1066         /* vlanf could be wrongly set in some cards.
1067          * ignore if vtm is not set */
1068         if ((adapter->function_mode & 0x400) && !vtm)
1069                 vlanf = 0;
1070
1071         skb = napi_get_frags(&eq_obj->napi);
1072         if (!skb) {
1073                 be_rx_compl_discard(adapter, rxo, rxcp);
1074                 return;
1075         }
1076
1077         remaining = pkt_size;
1078         for (i = 0, j = -1; i < num_rcvd; i++) {
1079                 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1080
1081                 curr_frag_len = min(remaining, rx_frag_size);
1082
1083                 /* Coalesce all frags from the same physical page in one slot */
1084                 if (i == 0 || page_info->page_offset == 0) {
1085                         /* First frag or Fresh page */
1086                         j++;
1087                         skb_shinfo(skb)->frags[j].page = page_info->page;
1088                         skb_shinfo(skb)->frags[j].page_offset =
1089                                                         page_info->page_offset;
1090                         skb_shinfo(skb)->frags[j].size = 0;
1091                 } else {
1092                         put_page(page_info->page);
1093                 }
1094                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1095
1096                 remaining -= curr_frag_len;
1097                 index_inc(&rxq_idx, rxq->len);
1098                 memset(page_info, 0, sizeof(*page_info));
1099         }
1100         BUG_ON(j > MAX_SKB_FRAGS);
1101
1102         skb_shinfo(skb)->nr_frags = j + 1;
1103         skb->len = pkt_size;
1104         skb->data_len = pkt_size;
1105         skb->truesize += pkt_size;
1106         skb->ip_summed = CHECKSUM_UNNECESSARY;
1107
1108         if (likely(!vlanf)) {
1109                 napi_gro_frags(&eq_obj->napi);
1110         } else {
1111                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1112                 vid = swab16(vid);
1113
1114                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1115                         return;
1116
1117                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1118         }
1119
1120         be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1121 }
1122
1123 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1124 {
1125         struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1126
1127         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1128                 return NULL;
1129
1130         rmb();
1131         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1132
1133         queue_tail_inc(&rxo->cq);
1134         return rxcp;
1135 }
1136
1137 /* To reset the valid bit, we need to reset the whole word as
1138  * when walking the queue the valid entries are little-endian
1139  * and invalid entries are host endian
1140  */
1141 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1142 {
1143         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1144 }
1145
1146 static inline struct page *be_alloc_pages(u32 size)
1147 {
1148         gfp_t alloc_flags = GFP_ATOMIC;
1149         u32 order = get_order(size);
1150         if (order > 0)
1151                 alloc_flags |= __GFP_COMP;
1152         return  alloc_pages(alloc_flags, order);
1153 }
1154
1155 /*
1156  * Allocate a page, split it to fragments of size rx_frag_size and post as
1157  * receive buffers to BE
1158  */
1159 static void be_post_rx_frags(struct be_rx_obj *rxo)
1160 {
1161         struct be_adapter *adapter = rxo->adapter;
1162         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1163         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1164         struct be_queue_info *rxq = &rxo->q;
1165         struct page *pagep = NULL;
1166         struct be_eth_rx_d *rxd;
1167         u64 page_dmaaddr = 0, frag_dmaaddr;
1168         u32 posted, page_offset = 0;
1169
1170         page_info = &rxo->page_info_tbl[rxq->head];
1171         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1172                 if (!pagep) {
1173                         pagep = be_alloc_pages(adapter->big_page_size);
1174                         if (unlikely(!pagep)) {
1175                                 rxo->stats.rx_post_fail++;
1176                                 break;
1177                         }
1178                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1179                                                 adapter->big_page_size,
1180                                                 PCI_DMA_FROMDEVICE);
1181                         page_info->page_offset = 0;
1182                 } else {
1183                         get_page(pagep);
1184                         page_info->page_offset = page_offset + rx_frag_size;
1185                 }
1186                 page_offset = page_info->page_offset;
1187                 page_info->page = pagep;
1188                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1189                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1190
1191                 rxd = queue_head_node(rxq);
1192                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1193                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1194
1195                 /* Any space left in the current big page for another frag? */
1196                 if ((page_offset + rx_frag_size + rx_frag_size) >
1197                                         adapter->big_page_size) {
1198                         pagep = NULL;
1199                         page_info->last_page_user = true;
1200                 }
1201
1202                 prev_page_info = page_info;
1203                 queue_head_inc(rxq);
1204                 page_info = &page_info_tbl[rxq->head];
1205         }
1206         if (pagep)
1207                 prev_page_info->last_page_user = true;
1208
1209         if (posted) {
1210                 atomic_add(posted, &rxq->used);
1211                 be_rxq_notify(adapter, rxq->id, posted);
1212         } else if (atomic_read(&rxq->used) == 0) {
1213                 /* Let be_worker replenish when memory is available */
1214                 rxo->rx_post_starved = true;
1215         }
1216 }
1217
1218 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1219 {
1220         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1221
1222         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1223                 return NULL;
1224
1225         rmb();
1226         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1227
1228         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1229
1230         queue_tail_inc(tx_cq);
1231         return txcp;
1232 }
1233
1234 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1235 {
1236         struct be_queue_info *txq = &adapter->tx_obj.q;
1237         struct be_eth_wrb *wrb;
1238         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1239         struct sk_buff *sent_skb;
1240         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1241         bool unmap_skb_hdr = true;
1242
1243         sent_skb = sent_skbs[txq->tail];
1244         BUG_ON(!sent_skb);
1245         sent_skbs[txq->tail] = NULL;
1246
1247         /* skip header wrb */
1248         queue_tail_inc(txq);
1249
1250         do {
1251                 cur_index = txq->tail;
1252                 wrb = queue_tail_node(txq);
1253                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1254                                         skb_headlen(sent_skb)));
1255                 unmap_skb_hdr = false;
1256
1257                 num_wrbs++;
1258                 queue_tail_inc(txq);
1259         } while (cur_index != last_index);
1260
1261         atomic_sub(num_wrbs, &txq->used);
1262
1263         kfree_skb(sent_skb);
1264 }
1265
1266 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1267 {
1268         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1269
1270         if (!eqe->evt)
1271                 return NULL;
1272
1273         rmb();
1274         eqe->evt = le32_to_cpu(eqe->evt);
1275         queue_tail_inc(&eq_obj->q);
1276         return eqe;
1277 }
1278
1279 static int event_handle(struct be_adapter *adapter,
1280                         struct be_eq_obj *eq_obj)
1281 {
1282         struct be_eq_entry *eqe;
1283         u16 num = 0;
1284
1285         while ((eqe = event_get(eq_obj)) != NULL) {
1286                 eqe->evt = 0;
1287                 num++;
1288         }
1289
1290         /* Deal with any spurious interrupts that come
1291          * without events
1292          */
1293         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1294         if (num)
1295                 napi_schedule(&eq_obj->napi);
1296
1297         return num;
1298 }
1299
1300 /* Just read and notify events without processing them.
1301  * Used at the time of destroying event queues */
1302 static void be_eq_clean(struct be_adapter *adapter,
1303                         struct be_eq_obj *eq_obj)
1304 {
1305         struct be_eq_entry *eqe;
1306         u16 num = 0;
1307
1308         while ((eqe = event_get(eq_obj)) != NULL) {
1309                 eqe->evt = 0;
1310                 num++;
1311         }
1312
1313         if (num)
1314                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1315 }
1316
1317 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1318 {
1319         struct be_rx_page_info *page_info;
1320         struct be_queue_info *rxq = &rxo->q;
1321         struct be_queue_info *rx_cq = &rxo->cq;
1322         struct be_eth_rx_compl *rxcp;
1323         u16 tail;
1324
1325         /* First cleanup pending rx completions */
1326         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1327                 be_rx_compl_discard(adapter, rxo, rxcp);
1328                 be_rx_compl_reset(rxcp);
1329                 be_cq_notify(adapter, rx_cq->id, true, 1);
1330         }
1331
1332         /* Then free posted rx buffer that were not used */
1333         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1334         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1335                 page_info = get_rx_page_info(adapter, rxo, tail);
1336                 put_page(page_info->page);
1337                 memset(page_info, 0, sizeof(*page_info));
1338         }
1339         BUG_ON(atomic_read(&rxq->used));
1340 }
1341
1342 static void be_tx_compl_clean(struct be_adapter *adapter)
1343 {
1344         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1345         struct be_queue_info *txq = &adapter->tx_obj.q;
1346         struct be_eth_tx_compl *txcp;
1347         u16 end_idx, cmpl = 0, timeo = 0;
1348         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1349         struct sk_buff *sent_skb;
1350         bool dummy_wrb;
1351
1352         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1353         do {
1354                 while ((txcp = be_tx_compl_get(tx_cq))) {
1355                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1356                                         wrb_index, txcp);
1357                         be_tx_compl_process(adapter, end_idx);
1358                         cmpl++;
1359                 }
1360                 if (cmpl) {
1361                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1362                         cmpl = 0;
1363                 }
1364
1365                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1366                         break;
1367
1368                 mdelay(1);
1369         } while (true);
1370
1371         if (atomic_read(&txq->used))
1372                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1373                         atomic_read(&txq->used));
1374
1375         /* free posted tx for which compls will never arrive */
1376         while (atomic_read(&txq->used)) {
1377                 sent_skb = sent_skbs[txq->tail];
1378                 end_idx = txq->tail;
1379                 index_adv(&end_idx,
1380                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1381                 be_tx_compl_process(adapter, end_idx);
1382         }
1383 }
1384
1385 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1386 {
1387         struct be_queue_info *q;
1388
1389         q = &adapter->mcc_obj.q;
1390         if (q->created)
1391                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1392         be_queue_free(adapter, q);
1393
1394         q = &adapter->mcc_obj.cq;
1395         if (q->created)
1396                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1397         be_queue_free(adapter, q);
1398 }
1399
1400 /* Must be called only after TX qs are created as MCC shares TX EQ */
1401 static int be_mcc_queues_create(struct be_adapter *adapter)
1402 {
1403         struct be_queue_info *q, *cq;
1404
1405         /* Alloc MCC compl queue */
1406         cq = &adapter->mcc_obj.cq;
1407         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1408                         sizeof(struct be_mcc_compl)))
1409                 goto err;
1410
1411         /* Ask BE to create MCC compl queue; share TX's eq */
1412         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1413                 goto mcc_cq_free;
1414
1415         /* Alloc MCC queue */
1416         q = &adapter->mcc_obj.q;
1417         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1418                 goto mcc_cq_destroy;
1419
1420         /* Ask BE to create MCC queue */
1421         if (be_cmd_mccq_create(adapter, q, cq))
1422                 goto mcc_q_free;
1423
1424         return 0;
1425
1426 mcc_q_free:
1427         be_queue_free(adapter, q);
1428 mcc_cq_destroy:
1429         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1430 mcc_cq_free:
1431         be_queue_free(adapter, cq);
1432 err:
1433         return -1;
1434 }
1435
1436 static void be_tx_queues_destroy(struct be_adapter *adapter)
1437 {
1438         struct be_queue_info *q;
1439
1440         q = &adapter->tx_obj.q;
1441         if (q->created)
1442                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1443         be_queue_free(adapter, q);
1444
1445         q = &adapter->tx_obj.cq;
1446         if (q->created)
1447                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1448         be_queue_free(adapter, q);
1449
1450         /* Clear any residual events */
1451         be_eq_clean(adapter, &adapter->tx_eq);
1452
1453         q = &adapter->tx_eq.q;
1454         if (q->created)
1455                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1456         be_queue_free(adapter, q);
1457 }
1458
1459 static int be_tx_queues_create(struct be_adapter *adapter)
1460 {
1461         struct be_queue_info *eq, *q, *cq;
1462
1463         adapter->tx_eq.max_eqd = 0;
1464         adapter->tx_eq.min_eqd = 0;
1465         adapter->tx_eq.cur_eqd = 96;
1466         adapter->tx_eq.enable_aic = false;
1467         /* Alloc Tx Event queue */
1468         eq = &adapter->tx_eq.q;
1469         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1470                 return -1;
1471
1472         /* Ask BE to create Tx Event queue */
1473         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1474                 goto tx_eq_free;
1475         adapter->base_eq_id = adapter->tx_eq.q.id;
1476
1477         /* Alloc TX eth compl queue */
1478         cq = &adapter->tx_obj.cq;
1479         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1480                         sizeof(struct be_eth_tx_compl)))
1481                 goto tx_eq_destroy;
1482
1483         /* Ask BE to create Tx eth compl queue */
1484         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1485                 goto tx_cq_free;
1486
1487         /* Alloc TX eth queue */
1488         q = &adapter->tx_obj.q;
1489         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1490                 goto tx_cq_destroy;
1491
1492         /* Ask BE to create Tx eth queue */
1493         if (be_cmd_txq_create(adapter, q, cq))
1494                 goto tx_q_free;
1495         return 0;
1496
1497 tx_q_free:
1498         be_queue_free(adapter, q);
1499 tx_cq_destroy:
1500         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1501 tx_cq_free:
1502         be_queue_free(adapter, cq);
1503 tx_eq_destroy:
1504         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1505 tx_eq_free:
1506         be_queue_free(adapter, eq);
1507         return -1;
1508 }
1509
1510 static void be_rx_queues_destroy(struct be_adapter *adapter)
1511 {
1512         struct be_queue_info *q;
1513         struct be_rx_obj *rxo;
1514         int i;
1515
1516         for_all_rx_queues(adapter, rxo, i) {
1517                 q = &rxo->q;
1518                 if (q->created) {
1519                         be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1520                         /* After the rxq is invalidated, wait for a grace time
1521                          * of 1ms for all dma to end and the flush compl to
1522                          * arrive
1523                          */
1524                         mdelay(1);
1525                         be_rx_q_clean(adapter, rxo);
1526                 }
1527                 be_queue_free(adapter, q);
1528
1529                 q = &rxo->cq;
1530                 if (q->created)
1531                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1532                 be_queue_free(adapter, q);
1533
1534                 /* Clear any residual events */
1535                 q = &rxo->rx_eq.q;
1536                 if (q->created) {
1537                         be_eq_clean(adapter, &rxo->rx_eq);
1538                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1539                 }
1540                 be_queue_free(adapter, q);
1541         }
1542 }
1543
1544 static int be_rx_queues_create(struct be_adapter *adapter)
1545 {
1546         struct be_queue_info *eq, *q, *cq;
1547         struct be_rx_obj *rxo;
1548         int rc, i;
1549
1550         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1551         for_all_rx_queues(adapter, rxo, i) {
1552                 rxo->adapter = adapter;
1553                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1554                 rxo->rx_eq.enable_aic = true;
1555
1556                 /* EQ */
1557                 eq = &rxo->rx_eq.q;
1558                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1559                                         sizeof(struct be_eq_entry));
1560                 if (rc)
1561                         goto err;
1562
1563                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1564                 if (rc)
1565                         goto err;
1566
1567                 /* CQ */
1568                 cq = &rxo->cq;
1569                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1570                                 sizeof(struct be_eth_rx_compl));
1571                 if (rc)
1572                         goto err;
1573
1574                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1575                 if (rc)
1576                         goto err;
1577
1578                 /* Rx Q */
1579                 q = &rxo->q;
1580                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1581                                 sizeof(struct be_eth_rx_d));
1582                 if (rc)
1583                         goto err;
1584
1585                 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1586                         BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1587                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1588                 if (rc)
1589                         goto err;
1590         }
1591
1592         if (be_multi_rxq(adapter)) {
1593                 u8 rsstable[MAX_RSS_QS];
1594
1595                 for_all_rss_queues(adapter, rxo, i)
1596                         rsstable[i] = rxo->rss_id;
1597
1598                 rc = be_cmd_rss_config(adapter, rsstable,
1599                         adapter->num_rx_qs - 1);
1600                 if (rc)
1601                         goto err;
1602         }
1603
1604         return 0;
1605 err:
1606         be_rx_queues_destroy(adapter);
1607         return -1;
1608 }
1609
1610 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1611 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1612 {
1613         return eq_id - adapter->base_eq_id;
1614 }
1615
1616 static irqreturn_t be_intx(int irq, void *dev)
1617 {
1618         struct be_adapter *adapter = dev;
1619         struct be_rx_obj *rxo;
1620         int isr, i;
1621
1622         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1623                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1624         if (!isr)
1625                 return IRQ_NONE;
1626
1627         if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1628                 event_handle(adapter, &adapter->tx_eq);
1629
1630         for_all_rx_queues(adapter, rxo, i) {
1631                 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1632                         event_handle(adapter, &rxo->rx_eq);
1633         }
1634
1635         return IRQ_HANDLED;
1636 }
1637
1638 static irqreturn_t be_msix_rx(int irq, void *dev)
1639 {
1640         struct be_rx_obj *rxo = dev;
1641         struct be_adapter *adapter = rxo->adapter;
1642
1643         event_handle(adapter, &rxo->rx_eq);
1644
1645         return IRQ_HANDLED;
1646 }
1647
1648 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1649 {
1650         struct be_adapter *adapter = dev;
1651
1652         event_handle(adapter, &adapter->tx_eq);
1653
1654         return IRQ_HANDLED;
1655 }
1656
1657 static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1658                         struct be_eth_rx_compl *rxcp)
1659 {
1660         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1661         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1662
1663         if (err)
1664                 rxo->stats.rxcp_err++;
1665
1666         return (tcp_frame && !err) ? true : false;
1667 }
1668
1669 int be_poll_rx(struct napi_struct *napi, int budget)
1670 {
1671         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1672         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1673         struct be_adapter *adapter = rxo->adapter;
1674         struct be_queue_info *rx_cq = &rxo->cq;
1675         struct be_eth_rx_compl *rxcp;
1676         u32 work_done;
1677
1678         rxo->stats.rx_polls++;
1679         for (work_done = 0; work_done < budget; work_done++) {
1680                 rxcp = be_rx_compl_get(rxo);
1681                 if (!rxcp)
1682                         break;
1683
1684                 if (do_gro(adapter, rxo, rxcp))
1685                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1686                 else
1687                         be_rx_compl_process(adapter, rxo, rxcp);
1688
1689                 be_rx_compl_reset(rxcp);
1690         }
1691
1692         /* Refill the queue */
1693         if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1694                 be_post_rx_frags(rxo);
1695
1696         /* All consumed */
1697         if (work_done < budget) {
1698                 napi_complete(napi);
1699                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1700         } else {
1701                 /* More to be consumed; continue with interrupts disabled */
1702                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1703         }
1704         return work_done;
1705 }
1706
1707 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1708  * For TX/MCC we don't honour budget; consume everything
1709  */
1710 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1711 {
1712         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1713         struct be_adapter *adapter =
1714                 container_of(tx_eq, struct be_adapter, tx_eq);
1715         struct be_queue_info *txq = &adapter->tx_obj.q;
1716         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1717         struct be_eth_tx_compl *txcp;
1718         int tx_compl = 0, mcc_compl, status = 0;
1719         u16 end_idx;
1720
1721         while ((txcp = be_tx_compl_get(tx_cq))) {
1722                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1723                                 wrb_index, txcp);
1724                 be_tx_compl_process(adapter, end_idx);
1725                 tx_compl++;
1726         }
1727
1728         mcc_compl = be_process_mcc(adapter, &status);
1729
1730         napi_complete(napi);
1731
1732         if (mcc_compl) {
1733                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1734                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1735         }
1736
1737         if (tx_compl) {
1738                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1739
1740                 /* As Tx wrbs have been freed up, wake up netdev queue if
1741                  * it was stopped due to lack of tx wrbs.
1742                  */
1743                 if (netif_queue_stopped(adapter->netdev) &&
1744                         atomic_read(&txq->used) < txq->len / 2) {
1745                         netif_wake_queue(adapter->netdev);
1746                 }
1747
1748                 tx_stats(adapter)->be_tx_events++;
1749                 tx_stats(adapter)->be_tx_compl += tx_compl;
1750         }
1751
1752         return 1;
1753 }
1754
1755 void be_detect_dump_ue(struct be_adapter *adapter)
1756 {
1757         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1758         u32 i;
1759
1760         pci_read_config_dword(adapter->pdev,
1761                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1762         pci_read_config_dword(adapter->pdev,
1763                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1764         pci_read_config_dword(adapter->pdev,
1765                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1766         pci_read_config_dword(adapter->pdev,
1767                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1768
1769         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1770         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1771
1772         if (ue_status_lo || ue_status_hi) {
1773                 adapter->ue_detected = true;
1774                 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1775         }
1776
1777         if (ue_status_lo) {
1778                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1779                         if (ue_status_lo & 1)
1780                                 dev_err(&adapter->pdev->dev,
1781                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1782                 }
1783         }
1784         if (ue_status_hi) {
1785                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1786                         if (ue_status_hi & 1)
1787                                 dev_err(&adapter->pdev->dev,
1788                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1789                 }
1790         }
1791
1792 }
1793
1794 static void be_worker(struct work_struct *work)
1795 {
1796         struct be_adapter *adapter =
1797                 container_of(work, struct be_adapter, work.work);
1798         struct be_rx_obj *rxo;
1799         int i;
1800
1801         if (!adapter->stats_ioctl_sent)
1802                 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1803
1804         be_tx_rate_update(adapter);
1805
1806         for_all_rx_queues(adapter, rxo, i) {
1807                 be_rx_rate_update(rxo);
1808                 be_rx_eqd_update(adapter, rxo);
1809
1810                 if (rxo->rx_post_starved) {
1811                         rxo->rx_post_starved = false;
1812                         be_post_rx_frags(rxo);
1813                 }
1814         }
1815
1816         if (!adapter->ue_detected)
1817                 be_detect_dump_ue(adapter);
1818
1819         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1820 }
1821
1822 static void be_msix_disable(struct be_adapter *adapter)
1823 {
1824         if (adapter->msix_enabled) {
1825                 pci_disable_msix(adapter->pdev);
1826                 adapter->msix_enabled = false;
1827         }
1828 }
1829
1830 static int be_num_rxqs_get(struct be_adapter *adapter)
1831 {
1832         if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1833                 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1834                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1835         } else {
1836                 dev_warn(&adapter->pdev->dev,
1837                         "No support for multiple RX queues\n");
1838                 return 1;
1839         }
1840 }
1841
1842 static void be_msix_enable(struct be_adapter *adapter)
1843 {
1844 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
1845         int i, status;
1846
1847         adapter->num_rx_qs = be_num_rxqs_get(adapter);
1848
1849         for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1850                 adapter->msix_entries[i].entry = i;
1851
1852         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1853                         adapter->num_rx_qs + 1);
1854         if (status == 0) {
1855                 goto done;
1856         } else if (status >= BE_MIN_MSIX_VECTORS) {
1857                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1858                                 status) == 0) {
1859                         adapter->num_rx_qs = status - 1;
1860                         dev_warn(&adapter->pdev->dev,
1861                                 "Could alloc only %d MSIx vectors. "
1862                                 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1863                         goto done;
1864                 }
1865         }
1866         return;
1867 done:
1868         adapter->msix_enabled = true;
1869 }
1870
1871 static void be_sriov_enable(struct be_adapter *adapter)
1872 {
1873         be_check_sriov_fn_type(adapter);
1874 #ifdef CONFIG_PCI_IOV
1875         if (be_physfn(adapter) && num_vfs) {
1876                 int status;
1877
1878                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1879                 adapter->sriov_enabled = status ? false : true;
1880         }
1881 #endif
1882 }
1883
1884 static void be_sriov_disable(struct be_adapter *adapter)
1885 {
1886 #ifdef CONFIG_PCI_IOV
1887         if (adapter->sriov_enabled) {
1888                 pci_disable_sriov(adapter->pdev);
1889                 adapter->sriov_enabled = false;
1890         }
1891 #endif
1892 }
1893
1894 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1895 {
1896         return adapter->msix_entries[
1897                         be_evt_bit_get(adapter, eq_id)].vector;
1898 }
1899
1900 static int be_request_irq(struct be_adapter *adapter,
1901                 struct be_eq_obj *eq_obj,
1902                 void *handler, char *desc, void *context)
1903 {
1904         struct net_device *netdev = adapter->netdev;
1905         int vec;
1906
1907         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1908         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1909         return request_irq(vec, handler, 0, eq_obj->desc, context);
1910 }
1911
1912 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1913                         void *context)
1914 {
1915         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1916         free_irq(vec, context);
1917 }
1918
1919 static int be_msix_register(struct be_adapter *adapter)
1920 {
1921         struct be_rx_obj *rxo;
1922         int status, i;
1923         char qname[10];
1924
1925         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1926                                 adapter);
1927         if (status)
1928                 goto err;
1929
1930         for_all_rx_queues(adapter, rxo, i) {
1931                 sprintf(qname, "rxq%d", i);
1932                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1933                                 qname, rxo);
1934                 if (status)
1935                         goto err_msix;
1936         }
1937
1938         return 0;
1939
1940 err_msix:
1941         be_free_irq(adapter, &adapter->tx_eq, adapter);
1942
1943         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1944                 be_free_irq(adapter, &rxo->rx_eq, rxo);
1945
1946 err:
1947         dev_warn(&adapter->pdev->dev,
1948                 "MSIX Request IRQ failed - err %d\n", status);
1949         pci_disable_msix(adapter->pdev);
1950         adapter->msix_enabled = false;
1951         return status;
1952 }
1953
1954 static int be_irq_register(struct be_adapter *adapter)
1955 {
1956         struct net_device *netdev = adapter->netdev;
1957         int status;
1958
1959         if (adapter->msix_enabled) {
1960                 status = be_msix_register(adapter);
1961                 if (status == 0)
1962                         goto done;
1963                 /* INTx is not supported for VF */
1964                 if (!be_physfn(adapter))
1965                         return status;
1966         }
1967
1968         /* INTx */
1969         netdev->irq = adapter->pdev->irq;
1970         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1971                         adapter);
1972         if (status) {
1973                 dev_err(&adapter->pdev->dev,
1974                         "INTx request IRQ failed - err %d\n", status);
1975                 return status;
1976         }
1977 done:
1978         adapter->isr_registered = true;
1979         return 0;
1980 }
1981
1982 static void be_irq_unregister(struct be_adapter *adapter)
1983 {
1984         struct net_device *netdev = adapter->netdev;
1985         struct be_rx_obj *rxo;
1986         int i;
1987
1988         if (!adapter->isr_registered)
1989                 return;
1990
1991         /* INTx */
1992         if (!adapter->msix_enabled) {
1993                 free_irq(netdev->irq, adapter);
1994                 goto done;
1995         }
1996
1997         /* MSIx */
1998         be_free_irq(adapter, &adapter->tx_eq, adapter);
1999
2000         for_all_rx_queues(adapter, rxo, i)
2001                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2002
2003 done:
2004         adapter->isr_registered = false;
2005 }
2006
2007 static int be_close(struct net_device *netdev)
2008 {
2009         struct be_adapter *adapter = netdev_priv(netdev);
2010         struct be_rx_obj *rxo;
2011         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2012         int vec, i;
2013
2014         cancel_delayed_work_sync(&adapter->work);
2015
2016         be_async_mcc_disable(adapter);
2017
2018         netif_stop_queue(netdev);
2019         netif_carrier_off(netdev);
2020         adapter->link_up = false;
2021
2022         be_intr_set(adapter, false);
2023
2024         if (adapter->msix_enabled) {
2025                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2026                 synchronize_irq(vec);
2027
2028                 for_all_rx_queues(adapter, rxo, i) {
2029                         vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2030                         synchronize_irq(vec);
2031                 }
2032         } else {
2033                 synchronize_irq(netdev->irq);
2034         }
2035         be_irq_unregister(adapter);
2036
2037         for_all_rx_queues(adapter, rxo, i)
2038                 napi_disable(&rxo->rx_eq.napi);
2039
2040         napi_disable(&tx_eq->napi);
2041
2042         /* Wait for all pending tx completions to arrive so that
2043          * all tx skbs are freed.
2044          */
2045         be_tx_compl_clean(adapter);
2046
2047         return 0;
2048 }
2049
2050 static int be_open(struct net_device *netdev)
2051 {
2052         struct be_adapter *adapter = netdev_priv(netdev);
2053         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2054         struct be_rx_obj *rxo;
2055         bool link_up;
2056         int status, i;
2057         u8 mac_speed;
2058         u16 link_speed;
2059
2060         for_all_rx_queues(adapter, rxo, i) {
2061                 be_post_rx_frags(rxo);
2062                 napi_enable(&rxo->rx_eq.napi);
2063         }
2064         napi_enable(&tx_eq->napi);
2065
2066         be_irq_register(adapter);
2067
2068         be_intr_set(adapter, true);
2069
2070         /* The evt queues are created in unarmed state; arm them */
2071         for_all_rx_queues(adapter, rxo, i) {
2072                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2073                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2074         }
2075         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2076
2077         /* Now that interrupts are on we can process async mcc */
2078         be_async_mcc_enable(adapter);
2079
2080         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2081
2082         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2083                         &link_speed);
2084         if (status)
2085                 goto err;
2086         be_link_status_update(adapter, link_up);
2087
2088         if (be_physfn(adapter)) {
2089                 status = be_vid_config(adapter, false, 0);
2090                 if (status)
2091                         goto err;
2092
2093                 status = be_cmd_set_flow_control(adapter,
2094                                 adapter->tx_fc, adapter->rx_fc);
2095                 if (status)
2096                         goto err;
2097         }
2098
2099         return 0;
2100 err:
2101         be_close(adapter->netdev);
2102         return -EIO;
2103 }
2104
2105 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2106 {
2107         struct be_dma_mem cmd;
2108         int status = 0;
2109         u8 mac[ETH_ALEN];
2110
2111         memset(mac, 0, ETH_ALEN);
2112
2113         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2114         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2115         if (cmd.va == NULL)
2116                 return -1;
2117         memset(cmd.va, 0, cmd.size);
2118
2119         if (enable) {
2120                 status = pci_write_config_dword(adapter->pdev,
2121                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2122                 if (status) {
2123                         dev_err(&adapter->pdev->dev,
2124                                 "Could not enable Wake-on-lan\n");
2125                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2126                                         cmd.dma);
2127                         return status;
2128                 }
2129                 status = be_cmd_enable_magic_wol(adapter,
2130                                 adapter->netdev->dev_addr, &cmd);
2131                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2132                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2133         } else {
2134                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2135                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2136                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2137         }
2138
2139         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2140         return status;
2141 }
2142
2143 /*
2144  * Generate a seed MAC address from the PF MAC Address using jhash.
2145  * MAC Address for VFs are assigned incrementally starting from the seed.
2146  * These addresses are programmed in the ASIC by the PF and the VF driver
2147  * queries for the MAC address during its probe.
2148  */
2149 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2150 {
2151         u32 vf = 0;
2152         int status = 0;
2153         u8 mac[ETH_ALEN];
2154
2155         be_vf_eth_addr_generate(adapter, mac);
2156
2157         for (vf = 0; vf < num_vfs; vf++) {
2158                 status = be_cmd_pmac_add(adapter, mac,
2159                                         adapter->vf_cfg[vf].vf_if_handle,
2160                                         &adapter->vf_cfg[vf].vf_pmac_id);
2161                 if (status)
2162                         dev_err(&adapter->pdev->dev,
2163                                 "Mac address add failed for VF %d\n", vf);
2164                 else
2165                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2166
2167                 mac[5] += 1;
2168         }
2169         return status;
2170 }
2171
2172 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2173 {
2174         u32 vf;
2175
2176         for (vf = 0; vf < num_vfs; vf++) {
2177                 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2178                         be_cmd_pmac_del(adapter,
2179                                         adapter->vf_cfg[vf].vf_if_handle,
2180                                         adapter->vf_cfg[vf].vf_pmac_id);
2181         }
2182 }
2183
2184 static int be_setup(struct be_adapter *adapter)
2185 {
2186         struct net_device *netdev = adapter->netdev;
2187         u32 cap_flags, en_flags, vf = 0;
2188         int status;
2189         u8 mac[ETH_ALEN];
2190
2191         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2192
2193         if (be_physfn(adapter)) {
2194                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2195                                 BE_IF_FLAGS_PROMISCUOUS |
2196                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2197                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2198
2199                 if (be_multi_rxq(adapter)) {
2200                         cap_flags |= BE_IF_FLAGS_RSS;
2201                         en_flags |= BE_IF_FLAGS_RSS;
2202                 }
2203         }
2204
2205         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2206                         netdev->dev_addr, false/* pmac_invalid */,
2207                         &adapter->if_handle, &adapter->pmac_id, 0);
2208         if (status != 0)
2209                 goto do_none;
2210
2211         if (be_physfn(adapter)) {
2212                 while (vf < num_vfs) {
2213                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2214                                         | BE_IF_FLAGS_BROADCAST;
2215                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2216                                         mac, true,
2217                                         &adapter->vf_cfg[vf].vf_if_handle,
2218                                         NULL, vf+1);
2219                         if (status) {
2220                                 dev_err(&adapter->pdev->dev,
2221                                 "Interface Create failed for VF %d\n", vf);
2222                                 goto if_destroy;
2223                         }
2224                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2225                         vf++;
2226                 }
2227         } else if (!be_physfn(adapter)) {
2228                 status = be_cmd_mac_addr_query(adapter, mac,
2229                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2230                 if (!status) {
2231                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2232                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2233                 }
2234         }
2235
2236         status = be_tx_queues_create(adapter);
2237         if (status != 0)
2238                 goto if_destroy;
2239
2240         status = be_rx_queues_create(adapter);
2241         if (status != 0)
2242                 goto tx_qs_destroy;
2243
2244         status = be_mcc_queues_create(adapter);
2245         if (status != 0)
2246                 goto rx_qs_destroy;
2247
2248         if (be_physfn(adapter)) {
2249                 status = be_vf_eth_addr_config(adapter);
2250                 if (status)
2251                         goto mcc_q_destroy;
2252         }
2253
2254         adapter->link_speed = -1;
2255
2256         return 0;
2257
2258 mcc_q_destroy:
2259         if (be_physfn(adapter))
2260                 be_vf_eth_addr_rem(adapter);
2261         be_mcc_queues_destroy(adapter);
2262 rx_qs_destroy:
2263         be_rx_queues_destroy(adapter);
2264 tx_qs_destroy:
2265         be_tx_queues_destroy(adapter);
2266 if_destroy:
2267         for (vf = 0; vf < num_vfs; vf++)
2268                 if (adapter->vf_cfg[vf].vf_if_handle)
2269                         be_cmd_if_destroy(adapter,
2270                                         adapter->vf_cfg[vf].vf_if_handle);
2271         be_cmd_if_destroy(adapter, adapter->if_handle);
2272 do_none:
2273         return status;
2274 }
2275
2276 static int be_clear(struct be_adapter *adapter)
2277 {
2278         if (be_physfn(adapter))
2279                 be_vf_eth_addr_rem(adapter);
2280
2281         be_mcc_queues_destroy(adapter);
2282         be_rx_queues_destroy(adapter);
2283         be_tx_queues_destroy(adapter);
2284
2285         be_cmd_if_destroy(adapter, adapter->if_handle);
2286
2287         /* tell fw we're done with firing cmds */
2288         be_cmd_fw_clean(adapter);
2289         return 0;
2290 }
2291
2292
2293 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2294 char flash_cookie[2][16] =      {"*** SE FLAS",
2295                                 "H DIRECTORY *** "};
2296
2297 static bool be_flash_redboot(struct be_adapter *adapter,
2298                         const u8 *p, u32 img_start, int image_size,
2299                         int hdr_size)
2300 {
2301         u32 crc_offset;
2302         u8 flashed_crc[4];
2303         int status;
2304
2305         crc_offset = hdr_size + img_start + image_size - 4;
2306
2307         p += crc_offset;
2308
2309         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2310                         (image_size - 4));
2311         if (status) {
2312                 dev_err(&adapter->pdev->dev,
2313                 "could not get crc from flash, not flashing redboot\n");
2314                 return false;
2315         }
2316
2317         /*update redboot only if crc does not match*/
2318         if (!memcmp(flashed_crc, p, 4))
2319                 return false;
2320         else
2321                 return true;
2322 }
2323
2324 static int be_flash_data(struct be_adapter *adapter,
2325                         const struct firmware *fw,
2326                         struct be_dma_mem *flash_cmd, int num_of_images)
2327
2328 {
2329         int status = 0, i, filehdr_size = 0;
2330         u32 total_bytes = 0, flash_op;
2331         int num_bytes;
2332         const u8 *p = fw->data;
2333         struct be_cmd_write_flashrom *req = flash_cmd->va;
2334         struct flash_comp *pflashcomp;
2335         int num_comp;
2336
2337         struct flash_comp gen3_flash_types[9] = {
2338                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2339                         FLASH_IMAGE_MAX_SIZE_g3},
2340                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2341                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2342                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2343                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2344                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2345                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2346                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2347                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2348                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2349                         FLASH_IMAGE_MAX_SIZE_g3},
2350                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2351                         FLASH_IMAGE_MAX_SIZE_g3},
2352                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2353                         FLASH_IMAGE_MAX_SIZE_g3},
2354                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2355                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2356         };
2357         struct flash_comp gen2_flash_types[8] = {
2358                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2359                         FLASH_IMAGE_MAX_SIZE_g2},
2360                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2361                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2362                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2363                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2364                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2365                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2366                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2367                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2368                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2369                         FLASH_IMAGE_MAX_SIZE_g2},
2370                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2371                         FLASH_IMAGE_MAX_SIZE_g2},
2372                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2373                          FLASH_IMAGE_MAX_SIZE_g2}
2374         };
2375
2376         if (adapter->generation == BE_GEN3) {
2377                 pflashcomp = gen3_flash_types;
2378                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2379                 num_comp = 9;
2380         } else {
2381                 pflashcomp = gen2_flash_types;
2382                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2383                 num_comp = 8;
2384         }
2385         for (i = 0; i < num_comp; i++) {
2386                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2387                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2388                         continue;
2389                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2390                         (!be_flash_redboot(adapter, fw->data,
2391                          pflashcomp[i].offset, pflashcomp[i].size,
2392                          filehdr_size)))
2393                         continue;
2394                 p = fw->data;
2395                 p += filehdr_size + pflashcomp[i].offset
2396                         + (num_of_images * sizeof(struct image_hdr));
2397         if (p + pflashcomp[i].size > fw->data + fw->size)
2398                 return -1;
2399         total_bytes = pflashcomp[i].size;
2400                 while (total_bytes) {
2401                         if (total_bytes > 32*1024)
2402                                 num_bytes = 32*1024;
2403                         else
2404                                 num_bytes = total_bytes;
2405                         total_bytes -= num_bytes;
2406
2407                         if (!total_bytes)
2408                                 flash_op = FLASHROM_OPER_FLASH;
2409                         else
2410                                 flash_op = FLASHROM_OPER_SAVE;
2411                         memcpy(req->params.data_buf, p, num_bytes);
2412                         p += num_bytes;
2413                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2414                                 pflashcomp[i].optype, flash_op, num_bytes);
2415                         if (status) {
2416                                 dev_err(&adapter->pdev->dev,
2417                                         "cmd to write to flash rom failed.\n");
2418                                 return -1;
2419                         }
2420                         yield();
2421                 }
2422         }
2423         return 0;
2424 }
2425
2426 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2427 {
2428         if (fhdr == NULL)
2429                 return 0;
2430         if (fhdr->build[0] == '3')
2431                 return BE_GEN3;
2432         else if (fhdr->build[0] == '2')
2433                 return BE_GEN2;
2434         else
2435                 return 0;
2436 }
2437
2438 int be_load_fw(struct be_adapter *adapter, u8 *func)
2439 {
2440         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2441         const struct firmware *fw;
2442         struct flash_file_hdr_g2 *fhdr;
2443         struct flash_file_hdr_g3 *fhdr3;
2444         struct image_hdr *img_hdr_ptr = NULL;
2445         struct be_dma_mem flash_cmd;
2446         int status, i = 0, num_imgs = 0;
2447         const u8 *p;
2448
2449         strcpy(fw_file, func);
2450
2451         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2452         if (status)
2453                 goto fw_exit;
2454
2455         p = fw->data;
2456         fhdr = (struct flash_file_hdr_g2 *) p;
2457         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2458
2459         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2460         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2461                                         &flash_cmd.dma);
2462         if (!flash_cmd.va) {
2463                 status = -ENOMEM;
2464                 dev_err(&adapter->pdev->dev,
2465                         "Memory allocation failure while flashing\n");
2466                 goto fw_exit;
2467         }
2468
2469         if ((adapter->generation == BE_GEN3) &&
2470                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2471                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2472                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2473                 for (i = 0; i < num_imgs; i++) {
2474                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2475                                         (sizeof(struct flash_file_hdr_g3) +
2476                                          i * sizeof(struct image_hdr)));
2477                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2478                                 status = be_flash_data(adapter, fw, &flash_cmd,
2479                                                         num_imgs);
2480                 }
2481         } else if ((adapter->generation == BE_GEN2) &&
2482                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2483                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2484         } else {
2485                 dev_err(&adapter->pdev->dev,
2486                         "UFI and Interface are not compatible for flashing\n");
2487                 status = -1;
2488         }
2489
2490         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2491                                 flash_cmd.dma);
2492         if (status) {
2493                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2494                 goto fw_exit;
2495         }
2496
2497         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2498
2499 fw_exit:
2500         release_firmware(fw);
2501         return status;
2502 }
2503
2504 static struct net_device_ops be_netdev_ops = {
2505         .ndo_open               = be_open,
2506         .ndo_stop               = be_close,
2507         .ndo_start_xmit         = be_xmit,
2508         .ndo_set_rx_mode        = be_set_multicast_list,
2509         .ndo_set_mac_address    = be_mac_addr_set,
2510         .ndo_change_mtu         = be_change_mtu,
2511         .ndo_validate_addr      = eth_validate_addr,
2512         .ndo_vlan_rx_register   = be_vlan_register,
2513         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2514         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2515         .ndo_set_vf_mac         = be_set_vf_mac,
2516         .ndo_set_vf_vlan        = be_set_vf_vlan,
2517         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2518         .ndo_get_vf_config      = be_get_vf_config
2519 };
2520
2521 static void be_netdev_init(struct net_device *netdev)
2522 {
2523         struct be_adapter *adapter = netdev_priv(netdev);
2524         struct be_rx_obj *rxo;
2525         int i;
2526
2527         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2528                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2529                 NETIF_F_GRO | NETIF_F_TSO6;
2530
2531         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2532
2533         netdev->flags |= IFF_MULTICAST;
2534
2535         adapter->rx_csum = true;
2536
2537         /* Default settings for Rx and Tx flow control */
2538         adapter->rx_fc = true;
2539         adapter->tx_fc = true;
2540
2541         netif_set_gso_max_size(netdev, 65535);
2542
2543         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2544
2545         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2546
2547         for_all_rx_queues(adapter, rxo, i)
2548                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2549                                 BE_NAPI_WEIGHT);
2550
2551         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2552                 BE_NAPI_WEIGHT);
2553
2554         netif_carrier_off(netdev);
2555         netif_stop_queue(netdev);
2556 }
2557
2558 static void be_unmap_pci_bars(struct be_adapter *adapter)
2559 {
2560         if (adapter->csr)
2561                 iounmap(adapter->csr);
2562         if (adapter->db)
2563                 iounmap(adapter->db);
2564         if (adapter->pcicfg && be_physfn(adapter))
2565                 iounmap(adapter->pcicfg);
2566 }
2567
2568 static int be_map_pci_bars(struct be_adapter *adapter)
2569 {
2570         u8 __iomem *addr;
2571         int pcicfg_reg, db_reg;
2572
2573         if (be_physfn(adapter)) {
2574                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2575                                 pci_resource_len(adapter->pdev, 2));
2576                 if (addr == NULL)
2577                         return -ENOMEM;
2578                 adapter->csr = addr;
2579         }
2580
2581         if (adapter->generation == BE_GEN2) {
2582                 pcicfg_reg = 1;
2583                 db_reg = 4;
2584         } else {
2585                 pcicfg_reg = 0;
2586                 if (be_physfn(adapter))
2587                         db_reg = 4;
2588                 else
2589                         db_reg = 0;
2590         }
2591         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2592                                 pci_resource_len(adapter->pdev, db_reg));
2593         if (addr == NULL)
2594                 goto pci_map_err;
2595         adapter->db = addr;
2596
2597         if (be_physfn(adapter)) {
2598                 addr = ioremap_nocache(
2599                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2600                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2601                 if (addr == NULL)
2602                         goto pci_map_err;
2603                 adapter->pcicfg = addr;
2604         } else
2605                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2606
2607         return 0;
2608 pci_map_err:
2609         be_unmap_pci_bars(adapter);
2610         return -ENOMEM;
2611 }
2612
2613
2614 static void be_ctrl_cleanup(struct be_adapter *adapter)
2615 {
2616         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2617
2618         be_unmap_pci_bars(adapter);
2619
2620         if (mem->va)
2621                 pci_free_consistent(adapter->pdev, mem->size,
2622                         mem->va, mem->dma);
2623
2624         mem = &adapter->mc_cmd_mem;
2625         if (mem->va)
2626                 pci_free_consistent(adapter->pdev, mem->size,
2627                         mem->va, mem->dma);
2628 }
2629
2630 static int be_ctrl_init(struct be_adapter *adapter)
2631 {
2632         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2633         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2634         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2635         int status;
2636
2637         status = be_map_pci_bars(adapter);
2638         if (status)
2639                 goto done;
2640
2641         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2642         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2643                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2644         if (!mbox_mem_alloc->va) {
2645                 status = -ENOMEM;
2646                 goto unmap_pci_bars;
2647         }
2648
2649         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2650         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2651         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2652         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2653
2654         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2655         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2656                         &mc_cmd_mem->dma);
2657         if (mc_cmd_mem->va == NULL) {
2658                 status = -ENOMEM;
2659                 goto free_mbox;
2660         }
2661         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2662
2663         spin_lock_init(&adapter->mbox_lock);
2664         spin_lock_init(&adapter->mcc_lock);
2665         spin_lock_init(&adapter->mcc_cq_lock);
2666
2667         init_completion(&adapter->flash_compl);
2668         pci_save_state(adapter->pdev);
2669         return 0;
2670
2671 free_mbox:
2672         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2673                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2674
2675 unmap_pci_bars:
2676         be_unmap_pci_bars(adapter);
2677
2678 done:
2679         return status;
2680 }
2681
2682 static void be_stats_cleanup(struct be_adapter *adapter)
2683 {
2684         struct be_dma_mem *cmd = &adapter->stats_cmd;
2685
2686         if (cmd->va)
2687                 pci_free_consistent(adapter->pdev, cmd->size,
2688                         cmd->va, cmd->dma);
2689 }
2690
2691 static int be_stats_init(struct be_adapter *adapter)
2692 {
2693         struct be_dma_mem *cmd = &adapter->stats_cmd;
2694
2695         cmd->size = sizeof(struct be_cmd_req_get_stats);
2696         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2697         if (cmd->va == NULL)
2698                 return -1;
2699         memset(cmd->va, 0, cmd->size);
2700         return 0;
2701 }
2702
2703 static void __devexit be_remove(struct pci_dev *pdev)
2704 {
2705         struct be_adapter *adapter = pci_get_drvdata(pdev);
2706
2707         if (!adapter)
2708                 return;
2709
2710         unregister_netdev(adapter->netdev);
2711
2712         be_clear(adapter);
2713
2714         be_stats_cleanup(adapter);
2715
2716         be_ctrl_cleanup(adapter);
2717
2718         be_sriov_disable(adapter);
2719
2720         be_msix_disable(adapter);
2721
2722         pci_set_drvdata(pdev, NULL);
2723         pci_release_regions(pdev);
2724         pci_disable_device(pdev);
2725
2726         free_netdev(adapter->netdev);
2727 }
2728
2729 static int be_get_config(struct be_adapter *adapter)
2730 {
2731         int status;
2732         u8 mac[ETH_ALEN];
2733
2734         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2735         if (status)
2736                 return status;
2737
2738         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2739                         &adapter->function_mode, &adapter->function_caps);
2740         if (status)
2741                 return status;
2742
2743         memset(mac, 0, ETH_ALEN);
2744
2745         if (be_physfn(adapter)) {
2746                 status = be_cmd_mac_addr_query(adapter, mac,
2747                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2748
2749                 if (status)
2750                         return status;
2751
2752                 if (!is_valid_ether_addr(mac))
2753                         return -EADDRNOTAVAIL;
2754
2755                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2756                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2757         }
2758
2759         if (adapter->function_mode & 0x400)
2760                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2761         else
2762                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2763
2764         return 0;
2765 }
2766
2767 static int __devinit be_probe(struct pci_dev *pdev,
2768                         const struct pci_device_id *pdev_id)
2769 {
2770         int status = 0;
2771         struct be_adapter *adapter;
2772         struct net_device *netdev;
2773
2774         status = pci_enable_device(pdev);
2775         if (status)
2776                 goto do_none;
2777
2778         status = pci_request_regions(pdev, DRV_NAME);
2779         if (status)
2780                 goto disable_dev;
2781         pci_set_master(pdev);
2782
2783         netdev = alloc_etherdev(sizeof(struct be_adapter));
2784         if (netdev == NULL) {
2785                 status = -ENOMEM;
2786                 goto rel_reg;
2787         }
2788         adapter = netdev_priv(netdev);
2789
2790         switch (pdev->device) {
2791         case BE_DEVICE_ID1:
2792         case OC_DEVICE_ID1:
2793                 adapter->generation = BE_GEN2;
2794                 break;
2795         case BE_DEVICE_ID2:
2796         case OC_DEVICE_ID2:
2797                 adapter->generation = BE_GEN3;
2798                 break;
2799         default:
2800                 adapter->generation = 0;
2801         }
2802
2803         adapter->pdev = pdev;
2804         pci_set_drvdata(pdev, adapter);
2805         adapter->netdev = netdev;
2806         SET_NETDEV_DEV(netdev, &pdev->dev);
2807
2808         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2809         if (!status) {
2810                 netdev->features |= NETIF_F_HIGHDMA;
2811         } else {
2812                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2813                 if (status) {
2814                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2815                         goto free_netdev;
2816                 }
2817         }
2818
2819         be_sriov_enable(adapter);
2820
2821         status = be_ctrl_init(adapter);
2822         if (status)
2823                 goto free_netdev;
2824
2825         /* sync up with fw's ready state */
2826         if (be_physfn(adapter)) {
2827                 status = be_cmd_POST(adapter);
2828                 if (status)
2829                         goto ctrl_clean;
2830         }
2831
2832         /* tell fw we're ready to fire cmds */
2833         status = be_cmd_fw_init(adapter);
2834         if (status)
2835                 goto ctrl_clean;
2836
2837         if (be_physfn(adapter)) {
2838                 status = be_cmd_reset_function(adapter);
2839                 if (status)
2840                         goto ctrl_clean;
2841         }
2842
2843         status = be_stats_init(adapter);
2844         if (status)
2845                 goto ctrl_clean;
2846
2847         status = be_get_config(adapter);
2848         if (status)
2849                 goto stats_clean;
2850
2851         be_msix_enable(adapter);
2852
2853         INIT_DELAYED_WORK(&adapter->work, be_worker);
2854
2855         status = be_setup(adapter);
2856         if (status)
2857                 goto msix_disable;
2858
2859         be_netdev_init(netdev);
2860         status = register_netdev(netdev);
2861         if (status != 0)
2862                 goto unsetup;
2863
2864         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2865         return 0;
2866
2867 unsetup:
2868         be_clear(adapter);
2869 msix_disable:
2870         be_msix_disable(adapter);
2871 stats_clean:
2872         be_stats_cleanup(adapter);
2873 ctrl_clean:
2874         be_ctrl_cleanup(adapter);
2875 free_netdev:
2876         be_sriov_disable(adapter);
2877         free_netdev(adapter->netdev);
2878         pci_set_drvdata(pdev, NULL);
2879 rel_reg:
2880         pci_release_regions(pdev);
2881 disable_dev:
2882         pci_disable_device(pdev);
2883 do_none:
2884         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2885         return status;
2886 }
2887
2888 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2889 {
2890         struct be_adapter *adapter = pci_get_drvdata(pdev);
2891         struct net_device *netdev =  adapter->netdev;
2892
2893         if (adapter->wol)
2894                 be_setup_wol(adapter, true);
2895
2896         netif_device_detach(netdev);
2897         if (netif_running(netdev)) {
2898                 rtnl_lock();
2899                 be_close(netdev);
2900                 rtnl_unlock();
2901         }
2902         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2903         be_clear(adapter);
2904
2905         pci_save_state(pdev);
2906         pci_disable_device(pdev);
2907         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2908         return 0;
2909 }
2910
2911 static int be_resume(struct pci_dev *pdev)
2912 {
2913         int status = 0;
2914         struct be_adapter *adapter = pci_get_drvdata(pdev);
2915         struct net_device *netdev =  adapter->netdev;
2916
2917         netif_device_detach(netdev);
2918
2919         status = pci_enable_device(pdev);
2920         if (status)
2921                 return status;
2922
2923         pci_set_power_state(pdev, 0);
2924         pci_restore_state(pdev);
2925
2926         /* tell fw we're ready to fire cmds */
2927         status = be_cmd_fw_init(adapter);
2928         if (status)
2929                 return status;
2930
2931         be_setup(adapter);
2932         if (netif_running(netdev)) {
2933                 rtnl_lock();
2934                 be_open(netdev);
2935                 rtnl_unlock();
2936         }
2937         netif_device_attach(netdev);
2938
2939         if (adapter->wol)
2940                 be_setup_wol(adapter, false);
2941         return 0;
2942 }
2943
2944 /*
2945  * An FLR will stop BE from DMAing any data.
2946  */
2947 static void be_shutdown(struct pci_dev *pdev)
2948 {
2949         struct be_adapter *adapter = pci_get_drvdata(pdev);
2950         struct net_device *netdev =  adapter->netdev;
2951
2952         netif_device_detach(netdev);
2953
2954         be_cmd_reset_function(adapter);
2955
2956         if (adapter->wol)
2957                 be_setup_wol(adapter, true);
2958
2959         pci_disable_device(pdev);
2960 }
2961
2962 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2963                                 pci_channel_state_t state)
2964 {
2965         struct be_adapter *adapter = pci_get_drvdata(pdev);
2966         struct net_device *netdev =  adapter->netdev;
2967
2968         dev_err(&adapter->pdev->dev, "EEH error detected\n");
2969
2970         adapter->eeh_err = true;
2971
2972         netif_device_detach(netdev);
2973
2974         if (netif_running(netdev)) {
2975                 rtnl_lock();
2976                 be_close(netdev);
2977                 rtnl_unlock();
2978         }
2979         be_clear(adapter);
2980
2981         if (state == pci_channel_io_perm_failure)
2982                 return PCI_ERS_RESULT_DISCONNECT;
2983
2984         pci_disable_device(pdev);
2985
2986         return PCI_ERS_RESULT_NEED_RESET;
2987 }
2988
2989 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2990 {
2991         struct be_adapter *adapter = pci_get_drvdata(pdev);
2992         int status;
2993
2994         dev_info(&adapter->pdev->dev, "EEH reset\n");
2995         adapter->eeh_err = false;
2996
2997         status = pci_enable_device(pdev);
2998         if (status)
2999                 return PCI_ERS_RESULT_DISCONNECT;
3000
3001         pci_set_master(pdev);
3002         pci_set_power_state(pdev, 0);
3003         pci_restore_state(pdev);
3004
3005         /* Check if card is ok and fw is ready */
3006         status = be_cmd_POST(adapter);
3007         if (status)
3008                 return PCI_ERS_RESULT_DISCONNECT;
3009
3010         return PCI_ERS_RESULT_RECOVERED;
3011 }
3012
3013 static void be_eeh_resume(struct pci_dev *pdev)
3014 {
3015         int status = 0;
3016         struct be_adapter *adapter = pci_get_drvdata(pdev);
3017         struct net_device *netdev =  adapter->netdev;
3018
3019         dev_info(&adapter->pdev->dev, "EEH resume\n");
3020
3021         pci_save_state(pdev);
3022
3023         /* tell fw we're ready to fire cmds */
3024         status = be_cmd_fw_init(adapter);
3025         if (status)
3026                 goto err;
3027
3028         status = be_setup(adapter);
3029         if (status)
3030                 goto err;
3031
3032         if (netif_running(netdev)) {
3033                 status = be_open(netdev);
3034                 if (status)
3035                         goto err;
3036         }
3037         netif_device_attach(netdev);
3038         return;
3039 err:
3040         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3041 }
3042
3043 static struct pci_error_handlers be_eeh_handlers = {
3044         .error_detected = be_eeh_err_detected,
3045         .slot_reset = be_eeh_reset,
3046         .resume = be_eeh_resume,
3047 };
3048
3049 static struct pci_driver be_driver = {
3050         .name = DRV_NAME,
3051         .id_table = be_dev_ids,
3052         .probe = be_probe,
3053         .remove = be_remove,
3054         .suspend = be_suspend,
3055         .resume = be_resume,
3056         .shutdown = be_shutdown,
3057         .err_handler = &be_eeh_handlers
3058 };
3059
3060 static int __init be_init_module(void)
3061 {
3062         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3063             rx_frag_size != 2048) {
3064                 printk(KERN_WARNING DRV_NAME
3065                         " : Module param rx_frag_size must be 2048/4096/8192."
3066                         " Using 2048\n");
3067                 rx_frag_size = 2048;
3068         }
3069
3070         if (num_vfs > 32) {
3071                 printk(KERN_WARNING DRV_NAME
3072                         " : Module param num_vfs must not be greater than 32."
3073                         "Using 32\n");
3074                 num_vfs = 32;
3075         }
3076
3077         return pci_register_driver(&be_driver);
3078 }
3079 module_init(be_init_module);
3080
3081 static void __exit be_exit_module(void)
3082 {
3083         pci_unregister_driver(&be_driver);
3084 }
3085 module_exit(be_exit_module);