vlan: Don't check for vlan group before vlan_tx_tag_present.
[linux-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2010 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/tcp.h>
37 #include <linux/pkt_sched.h>
38 #include <linux/ipv6.h>
39 #include <linux/slab.h>
40 #include <net/checksum.h>
41 #include <net/ip6_checksum.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <scsi/fc/fc_fcoe.h>
45
46 #include "ixgbe.h"
47 #include "ixgbe_common.h"
48 #include "ixgbe_dcb_82599.h"
49 #include "ixgbe_sriov.h"
50
51 char ixgbe_driver_name[] = "ixgbe";
52 static const char ixgbe_driver_string[] =
53                               "Intel(R) 10 Gigabit PCI Express Network Driver";
54
55 #define DRV_VERSION "2.0.84-k2"
56 const char ixgbe_driver_version[] = DRV_VERSION;
57 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58
59 static const struct ixgbe_info *ixgbe_info_tbl[] = {
60         [board_82598] = &ixgbe_82598_info,
61         [board_82599] = &ixgbe_82599_info,
62 };
63
64 /* ixgbe_pci_tbl - PCI Device ID Table
65  *
66  * Wildcard entries (PCI_ANY_ID) should come last
67  * Last entry must be all 0s
68  *
69  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
70  *   Class, Class Mask, private data (not used) }
71  */
72 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
73         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
74          board_82598 },
75         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
76          board_82598 },
77         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
78          board_82598 },
79         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
80          board_82598 },
81         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
82          board_82598 },
83         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
84          board_82598 },
85         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
86          board_82598 },
87         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
88          board_82598 },
89         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
90          board_82598 },
91         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
92          board_82598 },
93         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
94          board_82598 },
95         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
96          board_82598 },
97         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
98          board_82599 },
99         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
100          board_82599 },
101         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
102          board_82599 },
103         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
104          board_82599 },
105         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
106          board_82599 },
107         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
108          board_82599 },
109         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110          board_82599 },
111         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112          board_82599 },
113         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114          board_82599 },
115
116         /* required last entry */
117         {0, }
118 };
119 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120
121 #ifdef CONFIG_IXGBE_DCA
122 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
123                             void *p);
124 static struct notifier_block dca_notifier = {
125         .notifier_call = ixgbe_notify_dca,
126         .next          = NULL,
127         .priority      = 0
128 };
129 #endif
130
131 #ifdef CONFIG_PCI_IOV
132 static unsigned int max_vfs;
133 module_param(max_vfs, uint, 0);
134 MODULE_PARM_DESC(max_vfs,
135                  "Maximum number of virtual functions to allocate per physical function");
136 #endif /* CONFIG_PCI_IOV */
137
138 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
139 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_VERSION);
142
143 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
144
145 static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
146 {
147         struct ixgbe_hw *hw = &adapter->hw;
148         u32 gcr;
149         u32 gpie;
150         u32 vmdctl;
151
152 #ifdef CONFIG_PCI_IOV
153         /* disable iov and allow time for transactions to clear */
154         pci_disable_sriov(adapter->pdev);
155 #endif
156
157         /* turn off device IOV mode */
158         gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
159         gcr &= ~(IXGBE_GCR_EXT_SRIOV);
160         IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
161         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
162         gpie &= ~IXGBE_GPIE_VTMODE_MASK;
163         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
164
165         /* set default pool back to 0 */
166         vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
167         vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
168         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
169
170         /* take a breather then clean up driver data */
171         msleep(100);
172
173         kfree(adapter->vfinfo);
174         adapter->vfinfo = NULL;
175
176         adapter->num_vfs = 0;
177         adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
178 }
179
180 struct ixgbe_reg_info {
181         u32 ofs;
182         char *name;
183 };
184
185 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
186
187         /* General Registers */
188         {IXGBE_CTRL, "CTRL"},
189         {IXGBE_STATUS, "STATUS"},
190         {IXGBE_CTRL_EXT, "CTRL_EXT"},
191
192         /* Interrupt Registers */
193         {IXGBE_EICR, "EICR"},
194
195         /* RX Registers */
196         {IXGBE_SRRCTL(0), "SRRCTL"},
197         {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
198         {IXGBE_RDLEN(0), "RDLEN"},
199         {IXGBE_RDH(0), "RDH"},
200         {IXGBE_RDT(0), "RDT"},
201         {IXGBE_RXDCTL(0), "RXDCTL"},
202         {IXGBE_RDBAL(0), "RDBAL"},
203         {IXGBE_RDBAH(0), "RDBAH"},
204
205         /* TX Registers */
206         {IXGBE_TDBAL(0), "TDBAL"},
207         {IXGBE_TDBAH(0), "TDBAH"},
208         {IXGBE_TDLEN(0), "TDLEN"},
209         {IXGBE_TDH(0), "TDH"},
210         {IXGBE_TDT(0), "TDT"},
211         {IXGBE_TXDCTL(0), "TXDCTL"},
212
213         /* List Terminator */
214         {}
215 };
216
217
218 /*
219  * ixgbe_regdump - register printout routine
220  */
221 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
222 {
223         int i = 0, j = 0;
224         char rname[16];
225         u32 regs[64];
226
227         switch (reginfo->ofs) {
228         case IXGBE_SRRCTL(0):
229                 for (i = 0; i < 64; i++)
230                         regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
231                 break;
232         case IXGBE_DCA_RXCTRL(0):
233                 for (i = 0; i < 64; i++)
234                         regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
235                 break;
236         case IXGBE_RDLEN(0):
237                 for (i = 0; i < 64; i++)
238                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
239                 break;
240         case IXGBE_RDH(0):
241                 for (i = 0; i < 64; i++)
242                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
243                 break;
244         case IXGBE_RDT(0):
245                 for (i = 0; i < 64; i++)
246                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
247                 break;
248         case IXGBE_RXDCTL(0):
249                 for (i = 0; i < 64; i++)
250                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
251                 break;
252         case IXGBE_RDBAL(0):
253                 for (i = 0; i < 64; i++)
254                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
255                 break;
256         case IXGBE_RDBAH(0):
257                 for (i = 0; i < 64; i++)
258                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
259                 break;
260         case IXGBE_TDBAL(0):
261                 for (i = 0; i < 64; i++)
262                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
263                 break;
264         case IXGBE_TDBAH(0):
265                 for (i = 0; i < 64; i++)
266                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
267                 break;
268         case IXGBE_TDLEN(0):
269                 for (i = 0; i < 64; i++)
270                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
271                 break;
272         case IXGBE_TDH(0):
273                 for (i = 0; i < 64; i++)
274                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
275                 break;
276         case IXGBE_TDT(0):
277                 for (i = 0; i < 64; i++)
278                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
279                 break;
280         case IXGBE_TXDCTL(0):
281                 for (i = 0; i < 64; i++)
282                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283                 break;
284         default:
285                 pr_info("%-15s %08x\n", reginfo->name,
286                         IXGBE_READ_REG(hw, reginfo->ofs));
287                 return;
288         }
289
290         for (i = 0; i < 8; i++) {
291                 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
292                 pr_err("%-15s", rname);
293                 for (j = 0; j < 8; j++)
294                         pr_cont(" %08x", regs[i*8+j]);
295                 pr_cont("\n");
296         }
297
298 }
299
300 /*
301  * ixgbe_dump - Print registers, tx-rings and rx-rings
302  */
303 static void ixgbe_dump(struct ixgbe_adapter *adapter)
304 {
305         struct net_device *netdev = adapter->netdev;
306         struct ixgbe_hw *hw = &adapter->hw;
307         struct ixgbe_reg_info *reginfo;
308         int n = 0;
309         struct ixgbe_ring *tx_ring;
310         struct ixgbe_tx_buffer *tx_buffer_info;
311         union ixgbe_adv_tx_desc *tx_desc;
312         struct my_u0 { u64 a; u64 b; } *u0;
313         struct ixgbe_ring *rx_ring;
314         union ixgbe_adv_rx_desc *rx_desc;
315         struct ixgbe_rx_buffer *rx_buffer_info;
316         u32 staterr;
317         int i = 0;
318
319         if (!netif_msg_hw(adapter))
320                 return;
321
322         /* Print netdevice Info */
323         if (netdev) {
324                 dev_info(&adapter->pdev->dev, "Net device Info\n");
325                 pr_info("Device Name     state            "
326                         "trans_start      last_rx\n");
327                 pr_info("%-15s %016lX %016lX %016lX\n",
328                         netdev->name,
329                         netdev->state,
330                         netdev->trans_start,
331                         netdev->last_rx);
332         }
333
334         /* Print Registers */
335         dev_info(&adapter->pdev->dev, "Register Dump\n");
336         pr_info(" Register Name   Value\n");
337         for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338              reginfo->name; reginfo++) {
339                 ixgbe_regdump(hw, reginfo);
340         }
341
342         /* Print TX Ring Summary */
343         if (!netdev || !netif_running(netdev))
344                 goto exit;
345
346         dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
347         pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
348         for (n = 0; n < adapter->num_tx_queues; n++) {
349                 tx_ring = adapter->tx_ring[n];
350                 tx_buffer_info =
351                         &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
352                 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
353                            n, tx_ring->next_to_use, tx_ring->next_to_clean,
354                            (u64)tx_buffer_info->dma,
355                            tx_buffer_info->length,
356                            tx_buffer_info->next_to_watch,
357                            (u64)tx_buffer_info->time_stamp);
358         }
359
360         /* Print TX Rings */
361         if (!netif_msg_tx_done(adapter))
362                 goto rx_ring_summary;
363
364         dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
365
366         /* Transmit Descriptor Formats
367          *
368          * Advanced Transmit Descriptor
369          *   +--------------------------------------------------------------+
370          * 0 |         Buffer Address [63:0]                                |
371          *   +--------------------------------------------------------------+
372          * 8 |  PAYLEN  | PORTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
373          *   +--------------------------------------------------------------+
374          *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
375          */
376
377         for (n = 0; n < adapter->num_tx_queues; n++) {
378                 tx_ring = adapter->tx_ring[n];
379                 pr_info("------------------------------------\n");
380                 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
381                 pr_info("------------------------------------\n");
382                 pr_info("T [desc]     [address 63:0  ] "
383                         "[PlPOIdStDDt Ln] [bi->dma       ] "
384                         "leng  ntw timestamp        bi->skb\n");
385
386                 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
387                         tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
388                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
389                         u0 = (struct my_u0 *)tx_desc;
390                         pr_info("T [0x%03X]    %016llX %016llX %016llX"
391                                 " %04X  %3X %016llX %p", i,
392                                 le64_to_cpu(u0->a),
393                                 le64_to_cpu(u0->b),
394                                 (u64)tx_buffer_info->dma,
395                                 tx_buffer_info->length,
396                                 tx_buffer_info->next_to_watch,
397                                 (u64)tx_buffer_info->time_stamp,
398                                 tx_buffer_info->skb);
399                         if (i == tx_ring->next_to_use &&
400                                 i == tx_ring->next_to_clean)
401                                 pr_cont(" NTC/U\n");
402                         else if (i == tx_ring->next_to_use)
403                                 pr_cont(" NTU\n");
404                         else if (i == tx_ring->next_to_clean)
405                                 pr_cont(" NTC\n");
406                         else
407                                 pr_cont("\n");
408
409                         if (netif_msg_pktdata(adapter) &&
410                                 tx_buffer_info->dma != 0)
411                                 print_hex_dump(KERN_INFO, "",
412                                         DUMP_PREFIX_ADDRESS, 16, 1,
413                                         phys_to_virt(tx_buffer_info->dma),
414                                         tx_buffer_info->length, true);
415                 }
416         }
417
418         /* Print RX Rings Summary */
419 rx_ring_summary:
420         dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
421         pr_info("Queue [NTU] [NTC]\n");
422         for (n = 0; n < adapter->num_rx_queues; n++) {
423                 rx_ring = adapter->rx_ring[n];
424                 pr_info("%5d %5X %5X\n",
425                         n, rx_ring->next_to_use, rx_ring->next_to_clean);
426         }
427
428         /* Print RX Rings */
429         if (!netif_msg_rx_status(adapter))
430                 goto exit;
431
432         dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
433
434         /* Advanced Receive Descriptor (Read) Format
435          *    63                                           1        0
436          *    +-----------------------------------------------------+
437          *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
438          *    +----------------------------------------------+------+
439          *  8 |       Header Buffer Address [63:1]           |  DD  |
440          *    +-----------------------------------------------------+
441          *
442          *
443          * Advanced Receive Descriptor (Write-Back) Format
444          *
445          *   63       48 47    32 31  30      21 20 16 15   4 3     0
446          *   +------------------------------------------------------+
447          * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
448          *   | Checksum   Ident  |   |           |    | Type | Type |
449          *   +------------------------------------------------------+
450          * 8 | VLAN Tag | Length | Extended Error | Extended Status |
451          *   +------------------------------------------------------+
452          *   63       48 47    32 31            20 19               0
453          */
454         for (n = 0; n < adapter->num_rx_queues; n++) {
455                 rx_ring = adapter->rx_ring[n];
456                 pr_info("------------------------------------\n");
457                 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
458                 pr_info("------------------------------------\n");
459                 pr_info("R  [desc]      [ PktBuf     A0] "
460                         "[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
461                         "<-- Adv Rx Read format\n");
462                 pr_info("RWB[desc]      [PcsmIpSHl PtRs] "
463                         "[vl er S cks ln] ---------------- [bi->skb] "
464                         "<-- Adv Rx Write-Back format\n");
465
466                 for (i = 0; i < rx_ring->count; i++) {
467                         rx_buffer_info = &rx_ring->rx_buffer_info[i];
468                         rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
469                         u0 = (struct my_u0 *)rx_desc;
470                         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
471                         if (staterr & IXGBE_RXD_STAT_DD) {
472                                 /* Descriptor Done */
473                                 pr_info("RWB[0x%03X]     %016llX "
474                                         "%016llX ---------------- %p", i,
475                                         le64_to_cpu(u0->a),
476                                         le64_to_cpu(u0->b),
477                                         rx_buffer_info->skb);
478                         } else {
479                                 pr_info("R  [0x%03X]     %016llX "
480                                         "%016llX %016llX %p", i,
481                                         le64_to_cpu(u0->a),
482                                         le64_to_cpu(u0->b),
483                                         (u64)rx_buffer_info->dma,
484                                         rx_buffer_info->skb);
485
486                                 if (netif_msg_pktdata(adapter)) {
487                                         print_hex_dump(KERN_INFO, "",
488                                            DUMP_PREFIX_ADDRESS, 16, 1,
489                                            phys_to_virt(rx_buffer_info->dma),
490                                            rx_ring->rx_buf_len, true);
491
492                                         if (rx_ring->rx_buf_len
493                                                 < IXGBE_RXBUFFER_2048)
494                                                 print_hex_dump(KERN_INFO, "",
495                                                   DUMP_PREFIX_ADDRESS, 16, 1,
496                                                   phys_to_virt(
497                                                     rx_buffer_info->page_dma +
498                                                     rx_buffer_info->page_offset
499                                                   ),
500                                                   PAGE_SIZE/2, true);
501                                 }
502                         }
503
504                         if (i == rx_ring->next_to_use)
505                                 pr_cont(" NTU\n");
506                         else if (i == rx_ring->next_to_clean)
507                                 pr_cont(" NTC\n");
508                         else
509                                 pr_cont("\n");
510
511                 }
512         }
513
514 exit:
515         return;
516 }
517
518 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
519 {
520         u32 ctrl_ext;
521
522         /* Let firmware take over control of h/w */
523         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
524         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
525                         ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
526 }
527
528 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
529 {
530         u32 ctrl_ext;
531
532         /* Let firmware know the driver has taken over */
533         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
534         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
535                         ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
536 }
537
538 /*
539  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
540  * @adapter: pointer to adapter struct
541  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
542  * @queue: queue to map the corresponding interrupt to
543  * @msix_vector: the vector to map to the corresponding queue
544  *
545  */
546 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
547                            u8 queue, u8 msix_vector)
548 {
549         u32 ivar, index;
550         struct ixgbe_hw *hw = &adapter->hw;
551         switch (hw->mac.type) {
552         case ixgbe_mac_82598EB:
553                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
554                 if (direction == -1)
555                         direction = 0;
556                 index = (((direction * 64) + queue) >> 2) & 0x1F;
557                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
558                 ivar &= ~(0xFF << (8 * (queue & 0x3)));
559                 ivar |= (msix_vector << (8 * (queue & 0x3)));
560                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
561                 break;
562         case ixgbe_mac_82599EB:
563                 if (direction == -1) {
564                         /* other causes */
565                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
566                         index = ((queue & 1) * 8);
567                         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
568                         ivar &= ~(0xFF << index);
569                         ivar |= (msix_vector << index);
570                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
571                         break;
572                 } else {
573                         /* tx or rx causes */
574                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
575                         index = ((16 * (queue & 1)) + (8 * direction));
576                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
577                         ivar &= ~(0xFF << index);
578                         ivar |= (msix_vector << index);
579                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
580                         break;
581                 }
582         default:
583                 break;
584         }
585 }
586
587 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
588                                           u64 qmask)
589 {
590         u32 mask;
591
592         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
593                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595         } else {
596                 mask = (qmask & 0xFFFFFFFF);
597                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598                 mask = (qmask >> 32);
599                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
600         }
601 }
602
603 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
604                                       struct ixgbe_tx_buffer
605                                       *tx_buffer_info)
606 {
607         if (tx_buffer_info->dma) {
608                 if (tx_buffer_info->mapped_as_page)
609                         dma_unmap_page(&adapter->pdev->dev,
610                                        tx_buffer_info->dma,
611                                        tx_buffer_info->length,
612                                        DMA_TO_DEVICE);
613                 else
614                         dma_unmap_single(&adapter->pdev->dev,
615                                          tx_buffer_info->dma,
616                                          tx_buffer_info->length,
617                                          DMA_TO_DEVICE);
618                 tx_buffer_info->dma = 0;
619         }
620         if (tx_buffer_info->skb) {
621                 dev_kfree_skb_any(tx_buffer_info->skb);
622                 tx_buffer_info->skb = NULL;
623         }
624         tx_buffer_info->time_stamp = 0;
625         /* tx_buffer_info must be completely set up in the transmit path */
626 }
627
628 /**
629  * ixgbe_tx_xon_state - check the tx ring xon state
630  * @adapter: the ixgbe adapter
631  * @tx_ring: the corresponding tx_ring
632  *
633  * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
634  * corresponding TC of this tx_ring when checking TFCS.
635  *
636  * Returns : true if in xon state (currently not paused)
637  */
638 static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
639                                       struct ixgbe_ring *tx_ring)
640 {
641         u32 txoff = IXGBE_TFCS_TXOFF;
642
643 #ifdef CONFIG_IXGBE_DCB
644         if (adapter->dcb_cfg.pfc_mode_enable) {
645                 int tc;
646                 int reg_idx = tx_ring->reg_idx;
647                 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
648
649                 switch (adapter->hw.mac.type) {
650                 case ixgbe_mac_82598EB:
651                         tc = reg_idx >> 2;
652                         txoff = IXGBE_TFCS_TXOFF0;
653                         break;
654                 case ixgbe_mac_82599EB:
655                         tc = 0;
656                         txoff = IXGBE_TFCS_TXOFF;
657                         if (dcb_i == 8) {
658                                 /* TC0, TC1 */
659                                 tc = reg_idx >> 5;
660                                 if (tc == 2) /* TC2, TC3 */
661                                         tc += (reg_idx - 64) >> 4;
662                                 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
663                                         tc += 1 + ((reg_idx - 96) >> 3);
664                         } else if (dcb_i == 4) {
665                                 /* TC0, TC1 */
666                                 tc = reg_idx >> 6;
667                                 if (tc == 1) {
668                                         tc += (reg_idx - 64) >> 5;
669                                         if (tc == 2) /* TC2, TC3 */
670                                                 tc += (reg_idx - 96) >> 4;
671                                 }
672                         }
673                         break;
674                 default:
675                         tc = 0;
676                 }
677                 txoff <<= tc;
678         }
679 #endif
680         return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
681 }
682
683 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
684                                        struct ixgbe_ring *tx_ring,
685                                        unsigned int eop)
686 {
687         struct ixgbe_hw *hw = &adapter->hw;
688
689         /* Detect a transmit hang in hardware, this serializes the
690          * check with the clearing of time_stamp and movement of eop */
691         adapter->detect_tx_hung = false;
692         if (tx_ring->tx_buffer_info[eop].time_stamp &&
693             time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
694             ixgbe_tx_xon_state(adapter, tx_ring)) {
695                 /* detected Tx unit hang */
696                 union ixgbe_adv_tx_desc *tx_desc;
697                 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
698                 e_err(drv, "Detected Tx Unit Hang\n"
699                       "  Tx Queue             <%d>\n"
700                       "  TDH, TDT             <%x>, <%x>\n"
701                       "  next_to_use          <%x>\n"
702                       "  next_to_clean        <%x>\n"
703                       "tx_buffer_info[next_to_clean]\n"
704                       "  time_stamp           <%lx>\n"
705                       "  jiffies              <%lx>\n",
706                       tx_ring->queue_index,
707                       IXGBE_READ_REG(hw, tx_ring->head),
708                       IXGBE_READ_REG(hw, tx_ring->tail),
709                       tx_ring->next_to_use, eop,
710                       tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
711                 return true;
712         }
713
714         return false;
715 }
716
717 #define IXGBE_MAX_TXD_PWR       14
718 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
719
720 /* Tx Descriptors needed, worst case */
721 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
722                          (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
723 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
724         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
725
726 static void ixgbe_tx_timeout(struct net_device *netdev);
727
728 /**
729  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
730  * @q_vector: structure containing interrupt and ring information
731  * @tx_ring: tx ring to clean
732  **/
733 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
734                                struct ixgbe_ring *tx_ring)
735 {
736         struct ixgbe_adapter *adapter = q_vector->adapter;
737         struct net_device *netdev = adapter->netdev;
738         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
739         struct ixgbe_tx_buffer *tx_buffer_info;
740         unsigned int i, eop, count = 0;
741         unsigned int total_bytes = 0, total_packets = 0;
742
743         i = tx_ring->next_to_clean;
744         eop = tx_ring->tx_buffer_info[i].next_to_watch;
745         eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
746
747         while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
748                (count < tx_ring->work_limit)) {
749                 bool cleaned = false;
750                 rmb(); /* read buffer_info after eop_desc */
751                 for ( ; !cleaned; count++) {
752                         struct sk_buff *skb;
753                         tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
754                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
755                         cleaned = (i == eop);
756                         skb = tx_buffer_info->skb;
757
758                         if (cleaned && skb) {
759                                 unsigned int segs, bytecount;
760                                 unsigned int hlen = skb_headlen(skb);
761
762                                 /* gso_segs is currently only valid for tcp */
763                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
764 #ifdef IXGBE_FCOE
765                                 /* adjust for FCoE Sequence Offload */
766                                 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767                                     && (skb->protocol == htons(ETH_P_FCOE)) &&
768                                     skb_is_gso(skb)) {
769                                         hlen = skb_transport_offset(skb) +
770                                                 sizeof(struct fc_frame_header) +
771                                                 sizeof(struct fcoe_crc_eof);
772                                         segs = DIV_ROUND_UP(skb->len - hlen,
773                                                 skb_shinfo(skb)->gso_size);
774                                 }
775 #endif /* IXGBE_FCOE */
776                                 /* multiply data chunks by size of headers */
777                                 bytecount = ((segs - 1) * hlen) + skb->len;
778                                 total_packets += segs;
779                                 total_bytes += bytecount;
780                         }
781
782                         ixgbe_unmap_and_free_tx_resource(adapter,
783                                                          tx_buffer_info);
784
785                         tx_desc->wb.status = 0;
786
787                         i++;
788                         if (i == tx_ring->count)
789                                 i = 0;
790                 }
791
792                 eop = tx_ring->tx_buffer_info[i].next_to_watch;
793                 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
794         }
795
796         tx_ring->next_to_clean = i;
797
798 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
799         if (unlikely(count && netif_carrier_ok(netdev) &&
800                      (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
801                 /* Make sure that anybody stopping the queue after this
802                  * sees the new next_to_clean.
803                  */
804                 smp_mb();
805                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
806                     !test_bit(__IXGBE_DOWN, &adapter->state)) {
807                         netif_wake_subqueue(netdev, tx_ring->queue_index);
808                         ++tx_ring->restart_queue;
809                 }
810         }
811
812         if (adapter->detect_tx_hung) {
813                 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
814                         /* schedule immediate reset if we believe we hung */
815                         e_info(probe, "tx hang %d detected, resetting "
816                                "adapter\n", adapter->tx_timeout_count + 1);
817                         ixgbe_tx_timeout(adapter->netdev);
818                 }
819         }
820
821         /* re-arm the interrupt */
822         if (count >= tx_ring->work_limit)
823                 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
824
825         tx_ring->total_bytes += total_bytes;
826         tx_ring->total_packets += total_packets;
827         tx_ring->stats.packets += total_packets;
828         tx_ring->stats.bytes += total_bytes;
829         return count < tx_ring->work_limit;
830 }
831
832 #ifdef CONFIG_IXGBE_DCA
833 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
834                                 struct ixgbe_ring *rx_ring)
835 {
836         u32 rxctrl;
837         int cpu = get_cpu();
838         int q = rx_ring->reg_idx;
839
840         if (rx_ring->cpu != cpu) {
841                 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
842                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
843                         rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
844                         rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
845                 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
846                         rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
847                         rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
848                                    IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
849                 }
850                 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
851                 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
852                 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
853                 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
854                             IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
855                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
856                 rx_ring->cpu = cpu;
857         }
858         put_cpu();
859 }
860
861 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
862                                 struct ixgbe_ring *tx_ring)
863 {
864         u32 txctrl;
865         int cpu = get_cpu();
866         int q = tx_ring->reg_idx;
867         struct ixgbe_hw *hw = &adapter->hw;
868
869         if (tx_ring->cpu != cpu) {
870                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
871                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
872                         txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
873                         txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
874                         txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
875                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
876                 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
877                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
878                         txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
879                         txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
880                                   IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
881                         txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
882                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
883                 }
884                 tx_ring->cpu = cpu;
885         }
886         put_cpu();
887 }
888
889 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
890 {
891         int i;
892
893         if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
894                 return;
895
896         /* always use CB2 mode, difference is masked in the CB driver */
897         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
898
899         for (i = 0; i < adapter->num_tx_queues; i++) {
900                 adapter->tx_ring[i]->cpu = -1;
901                 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
902         }
903         for (i = 0; i < adapter->num_rx_queues; i++) {
904                 adapter->rx_ring[i]->cpu = -1;
905                 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
906         }
907 }
908
909 static int __ixgbe_notify_dca(struct device *dev, void *data)
910 {
911         struct net_device *netdev = dev_get_drvdata(dev);
912         struct ixgbe_adapter *adapter = netdev_priv(netdev);
913         unsigned long event = *(unsigned long *)data;
914
915         switch (event) {
916         case DCA_PROVIDER_ADD:
917                 /* if we're already enabled, don't do it again */
918                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
919                         break;
920                 if (dca_add_requester(dev) == 0) {
921                         adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
922                         ixgbe_setup_dca(adapter);
923                         break;
924                 }
925                 /* Fall Through since DCA is disabled. */
926         case DCA_PROVIDER_REMOVE:
927                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
928                         dca_remove_requester(dev);
929                         adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
930                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
931                 }
932                 break;
933         }
934
935         return 0;
936 }
937
938 #endif /* CONFIG_IXGBE_DCA */
939 /**
940  * ixgbe_receive_skb - Send a completed packet up the stack
941  * @adapter: board private structure
942  * @skb: packet to send up
943  * @status: hardware indication of status of receive
944  * @rx_ring: rx descriptor ring (for a specific queue) to setup
945  * @rx_desc: rx descriptor
946  **/
947 static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
948                               struct sk_buff *skb, u8 status,
949                               struct ixgbe_ring *ring,
950                               union ixgbe_adv_rx_desc *rx_desc)
951 {
952         struct ixgbe_adapter *adapter = q_vector->adapter;
953         struct napi_struct *napi = &q_vector->napi;
954         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
955         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
956
957         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
958                 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
959                         vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
960                 else
961                         napi_gro_receive(napi, skb);
962         } else {
963                 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
964                         vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
965                 else
966                         netif_rx(skb);
967         }
968 }
969
970 /**
971  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
972  * @adapter: address of board private structure
973  * @status_err: hardware indication of status of receive
974  * @skb: skb currently being received and modified
975  **/
976 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
977                                      union ixgbe_adv_rx_desc *rx_desc,
978                                      struct sk_buff *skb)
979 {
980         u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
981
982         skb_checksum_none_assert(skb);
983
984         /* Rx csum disabled */
985         if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
986                 return;
987
988         /* if IP and error */
989         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
990             (status_err & IXGBE_RXDADV_ERR_IPE)) {
991                 adapter->hw_csum_rx_error++;
992                 return;
993         }
994
995         if (!(status_err & IXGBE_RXD_STAT_L4CS))
996                 return;
997
998         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
999                 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1000
1001                 /*
1002                  * 82599 errata, UDP frames with a 0 checksum can be marked as
1003                  * checksum errors.
1004                  */
1005                 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1006                     (adapter->hw.mac.type == ixgbe_mac_82599EB))
1007                         return;
1008
1009                 adapter->hw_csum_rx_error++;
1010                 return;
1011         }
1012
1013         /* It must be a TCP or UDP packet with a valid checksum */
1014         skb->ip_summed = CHECKSUM_UNNECESSARY;
1015 }
1016
1017 static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1018                                          struct ixgbe_ring *rx_ring, u32 val)
1019 {
1020         /*
1021          * Force memory writes to complete before letting h/w
1022          * know there are new descriptors to fetch.  (Only
1023          * applicable for weak-ordered memory model archs,
1024          * such as IA-64).
1025          */
1026         wmb();
1027         IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
1028 }
1029
1030 /**
1031  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1032  * @adapter: address of board private structure
1033  **/
1034 void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1035                             struct ixgbe_ring *rx_ring,
1036                             int cleaned_count)
1037 {
1038         struct net_device *netdev = adapter->netdev;
1039         struct pci_dev *pdev = adapter->pdev;
1040         union ixgbe_adv_rx_desc *rx_desc;
1041         struct ixgbe_rx_buffer *bi;
1042         unsigned int i;
1043         unsigned int bufsz = rx_ring->rx_buf_len;
1044
1045         i = rx_ring->next_to_use;
1046         bi = &rx_ring->rx_buffer_info[i];
1047
1048         while (cleaned_count--) {
1049                 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1050
1051                 if (!bi->page_dma &&
1052                     (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
1053                         if (!bi->page) {
1054                                 bi->page = netdev_alloc_page(netdev);
1055                                 if (!bi->page) {
1056                                         adapter->alloc_rx_page_failed++;
1057                                         goto no_buffers;
1058                                 }
1059                                 bi->page_offset = 0;
1060                         } else {
1061                                 /* use a half page if we're re-using */
1062                                 bi->page_offset ^= (PAGE_SIZE / 2);
1063                         }
1064
1065                         bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1066                                                     bi->page_offset,
1067                                                     (PAGE_SIZE / 2),
1068                                                     DMA_FROM_DEVICE);
1069                 }
1070
1071                 if (!bi->skb) {
1072                         struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1073                                                                         bufsz);
1074                         bi->skb = skb;
1075
1076                         if (!skb) {
1077                                 adapter->alloc_rx_buff_failed++;
1078                                 goto no_buffers;
1079                         }
1080                         /* initialize queue mapping */
1081                         skb_record_rx_queue(skb, rx_ring->queue_index);
1082                 }
1083
1084                 if (!bi->dma) {
1085                         bi->dma = dma_map_single(&pdev->dev,
1086                                                  bi->skb->data,
1087                                                  rx_ring->rx_buf_len,
1088                                                  DMA_FROM_DEVICE);
1089                 }
1090                 /* Refresh the desc even if buffer_addrs didn't change because
1091                  * each write-back erases this info. */
1092                 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1093                         rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1094                         rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1095                 } else {
1096                         rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1097                         rx_desc->read.hdr_addr = 0;
1098                 }
1099
1100                 i++;
1101                 if (i == rx_ring->count)
1102                         i = 0;
1103                 bi = &rx_ring->rx_buffer_info[i];
1104         }
1105
1106 no_buffers:
1107         if (rx_ring->next_to_use != i) {
1108                 rx_ring->next_to_use = i;
1109                 if (i-- == 0)
1110                         i = (rx_ring->count - 1);
1111
1112                 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
1113         }
1114 }
1115
1116 static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
1117 {
1118         return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1119 }
1120
1121 static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1122 {
1123         return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1124 }
1125
1126 static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1127 {
1128         return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1129                 IXGBE_RXDADV_RSCCNT_MASK) >>
1130                 IXGBE_RXDADV_RSCCNT_SHIFT;
1131 }
1132
1133 /**
1134  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1135  * @skb: pointer to the last skb in the rsc queue
1136  * @count: pointer to number of packets coalesced in this context
1137  *
1138  * This function changes a queue full of hw rsc buffers into a completed
1139  * packet.  It uses the ->prev pointers to find the first packet and then
1140  * turns it into the frag list owner.
1141  **/
1142 static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1143                                                         u64 *count)
1144 {
1145         unsigned int frag_list_size = 0;
1146
1147         while (skb->prev) {
1148                 struct sk_buff *prev = skb->prev;
1149                 frag_list_size += skb->len;
1150                 skb->prev = NULL;
1151                 skb = prev;
1152                 *count += 1;
1153         }
1154
1155         skb_shinfo(skb)->frag_list = skb->next;
1156         skb->next = NULL;
1157         skb->len += frag_list_size;
1158         skb->data_len += frag_list_size;
1159         skb->truesize += frag_list_size;
1160         return skb;
1161 }
1162
1163 struct ixgbe_rsc_cb {
1164         dma_addr_t dma;
1165         bool delay_unmap;
1166 };
1167
1168 #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1169
1170 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1171                                struct ixgbe_ring *rx_ring,
1172                                int *work_done, int work_to_do)
1173 {
1174         struct ixgbe_adapter *adapter = q_vector->adapter;
1175         struct net_device *netdev = adapter->netdev;
1176         struct pci_dev *pdev = adapter->pdev;
1177         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1178         struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1179         struct sk_buff *skb;
1180         unsigned int i, rsc_count = 0;
1181         u32 len, staterr;
1182         u16 hdr_info;
1183         bool cleaned = false;
1184         int cleaned_count = 0;
1185         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1186 #ifdef IXGBE_FCOE
1187         int ddp_bytes = 0;
1188 #endif /* IXGBE_FCOE */
1189
1190         i = rx_ring->next_to_clean;
1191         rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1192         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1193         rx_buffer_info = &rx_ring->rx_buffer_info[i];
1194
1195         while (staterr & IXGBE_RXD_STAT_DD) {
1196                 u32 upper_len = 0;
1197                 if (*work_done >= work_to_do)
1198                         break;
1199                 (*work_done)++;
1200
1201                 rmb(); /* read descriptor and rx_buffer_info after status DD */
1202                 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1203                         hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1204                         len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1205                                IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1206                         upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1207                         if ((len > IXGBE_RX_HDR_SIZE) ||
1208                             (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1209                                 len = IXGBE_RX_HDR_SIZE;
1210                 } else {
1211                         len = le16_to_cpu(rx_desc->wb.upper.length);
1212                 }
1213
1214                 cleaned = true;
1215                 skb = rx_buffer_info->skb;
1216                 prefetch(skb->data);
1217                 rx_buffer_info->skb = NULL;
1218
1219                 if (rx_buffer_info->dma) {
1220                         if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
1221                             (!(staterr & IXGBE_RXD_STAT_EOP)) &&
1222                                  (!(skb->prev))) {
1223                                 /*
1224                                  * When HWRSC is enabled, delay unmapping
1225                                  * of the first packet. It carries the
1226                                  * header information, HW may still
1227                                  * access the header after the writeback.
1228                                  * Only unmap it when EOP is reached
1229                                  */
1230                                 IXGBE_RSC_CB(skb)->delay_unmap = true;
1231                                 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1232                         } else {
1233                                 dma_unmap_single(&pdev->dev,
1234                                                  rx_buffer_info->dma,
1235                                                  rx_ring->rx_buf_len,
1236                                                  DMA_FROM_DEVICE);
1237                         }
1238                         rx_buffer_info->dma = 0;
1239                         skb_put(skb, len);
1240                 }
1241
1242                 if (upper_len) {
1243                         dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1244                                        PAGE_SIZE / 2, DMA_FROM_DEVICE);
1245                         rx_buffer_info->page_dma = 0;
1246                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1247                                            rx_buffer_info->page,
1248                                            rx_buffer_info->page_offset,
1249                                            upper_len);
1250
1251                         if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1252                             (page_count(rx_buffer_info->page) != 1))
1253                                 rx_buffer_info->page = NULL;
1254                         else
1255                                 get_page(rx_buffer_info->page);
1256
1257                         skb->len += upper_len;
1258                         skb->data_len += upper_len;
1259                         skb->truesize += upper_len;
1260                 }
1261
1262                 i++;
1263                 if (i == rx_ring->count)
1264                         i = 0;
1265
1266                 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
1267                 prefetch(next_rxd);
1268                 cleaned_count++;
1269
1270                 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
1271                         rsc_count = ixgbe_get_rsc_count(rx_desc);
1272
1273                 if (rsc_count) {
1274                         u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1275                                      IXGBE_RXDADV_NEXTP_SHIFT;
1276                         next_buffer = &rx_ring->rx_buffer_info[nextp];
1277                 } else {
1278                         next_buffer = &rx_ring->rx_buffer_info[i];
1279                 }
1280
1281                 if (staterr & IXGBE_RXD_STAT_EOP) {
1282                         if (skb->prev)
1283                                 skb = ixgbe_transform_rsc_queue(skb,
1284                                                                 &(rx_ring->rsc_count));
1285                         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1286                                 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1287                                         dma_unmap_single(&pdev->dev,
1288                                                          IXGBE_RSC_CB(skb)->dma,
1289                                                          rx_ring->rx_buf_len,
1290                                                          DMA_FROM_DEVICE);
1291                                         IXGBE_RSC_CB(skb)->dma = 0;
1292                                         IXGBE_RSC_CB(skb)->delay_unmap = false;
1293                                 }
1294                                 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1295                                         rx_ring->rsc_count +=
1296                                                 skb_shinfo(skb)->nr_frags;
1297                                 else
1298                                         rx_ring->rsc_count++;
1299                                 rx_ring->rsc_flush++;
1300                         }
1301                         rx_ring->stats.packets++;
1302                         rx_ring->stats.bytes += skb->len;
1303                 } else {
1304                         if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1305                                 rx_buffer_info->skb = next_buffer->skb;
1306                                 rx_buffer_info->dma = next_buffer->dma;
1307                                 next_buffer->skb = skb;
1308                                 next_buffer->dma = 0;
1309                         } else {
1310                                 skb->next = next_buffer->skb;
1311                                 skb->next->prev = skb;
1312                         }
1313                         rx_ring->non_eop_descs++;
1314                         goto next_desc;
1315                 }
1316
1317                 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1318                         dev_kfree_skb_irq(skb);
1319                         goto next_desc;
1320                 }
1321
1322                 ixgbe_rx_checksum(adapter, rx_desc, skb);
1323
1324                 /* probably a little skewed due to removing CRC */
1325                 total_rx_bytes += skb->len;
1326                 total_rx_packets++;
1327
1328                 skb->protocol = eth_type_trans(skb, adapter->netdev);
1329 #ifdef IXGBE_FCOE
1330                 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1331                 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1332                         ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1333                         if (!ddp_bytes)
1334                                 goto next_desc;
1335                 }
1336 #endif /* IXGBE_FCOE */
1337                 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
1338
1339 next_desc:
1340                 rx_desc->wb.upper.status_error = 0;
1341
1342                 /* return some buffers to hardware, one at a time is too slow */
1343                 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1344                         ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1345                         cleaned_count = 0;
1346                 }
1347
1348                 /* use prefetched values */
1349                 rx_desc = next_rxd;
1350                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1351
1352                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1353         }
1354
1355         rx_ring->next_to_clean = i;
1356         cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1357
1358         if (cleaned_count)
1359                 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1360
1361 #ifdef IXGBE_FCOE
1362         /* include DDPed FCoE data */
1363         if (ddp_bytes > 0) {
1364                 unsigned int mss;
1365
1366                 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
1367                         sizeof(struct fc_frame_header) -
1368                         sizeof(struct fcoe_crc_eof);
1369                 if (mss > 512)
1370                         mss &= ~511;
1371                 total_rx_bytes += ddp_bytes;
1372                 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1373         }
1374 #endif /* IXGBE_FCOE */
1375
1376         rx_ring->total_packets += total_rx_packets;
1377         rx_ring->total_bytes += total_rx_bytes;
1378         netdev->stats.rx_bytes += total_rx_bytes;
1379         netdev->stats.rx_packets += total_rx_packets;
1380
1381         return cleaned;
1382 }
1383
1384 static int ixgbe_clean_rxonly(struct napi_struct *, int);
1385 /**
1386  * ixgbe_configure_msix - Configure MSI-X hardware
1387  * @adapter: board private structure
1388  *
1389  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1390  * interrupts.
1391  **/
1392 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1393 {
1394         struct ixgbe_q_vector *q_vector;
1395         int i, j, q_vectors, v_idx, r_idx;
1396         u32 mask;
1397
1398         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1399
1400         /*
1401          * Populate the IVAR table and set the ITR values to the
1402          * corresponding register.
1403          */
1404         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1405                 q_vector = adapter->q_vector[v_idx];
1406                 /* XXX for_each_set_bit(...) */
1407                 r_idx = find_first_bit(q_vector->rxr_idx,
1408                                        adapter->num_rx_queues);
1409
1410                 for (i = 0; i < q_vector->rxr_count; i++) {
1411                         j = adapter->rx_ring[r_idx]->reg_idx;
1412                         ixgbe_set_ivar(adapter, 0, j, v_idx);
1413                         r_idx = find_next_bit(q_vector->rxr_idx,
1414                                               adapter->num_rx_queues,
1415                                               r_idx + 1);
1416                 }
1417                 r_idx = find_first_bit(q_vector->txr_idx,
1418                                        adapter->num_tx_queues);
1419
1420                 for (i = 0; i < q_vector->txr_count; i++) {
1421                         j = adapter->tx_ring[r_idx]->reg_idx;
1422                         ixgbe_set_ivar(adapter, 1, j, v_idx);
1423                         r_idx = find_next_bit(q_vector->txr_idx,
1424                                               adapter->num_tx_queues,
1425                                               r_idx + 1);
1426                 }
1427
1428                 if (q_vector->txr_count && !q_vector->rxr_count)
1429                         /* tx only */
1430                         q_vector->eitr = adapter->tx_eitr_param;
1431                 else if (q_vector->rxr_count)
1432                         /* rx or mixed */
1433                         q_vector->eitr = adapter->rx_eitr_param;
1434
1435                 ixgbe_write_eitr(q_vector);
1436                 /* If Flow Director is enabled, set interrupt affinity */
1437                 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1438                     (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1439                         /*
1440                          * Allocate the affinity_hint cpumask, assign the mask
1441                          * for this vector, and set our affinity_hint for
1442                          * this irq.
1443                          */
1444                         if (!alloc_cpumask_var(&q_vector->affinity_mask,
1445                                                GFP_KERNEL))
1446                                 return;
1447                         cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1448                         irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1449                                               q_vector->affinity_mask);
1450                 }
1451         }
1452
1453         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1454                 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1455                                v_idx);
1456         else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1457                 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1458         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1459
1460         /* set up to autoclear timer, and the vectors */
1461         mask = IXGBE_EIMS_ENABLE_MASK;
1462         if (adapter->num_vfs)
1463                 mask &= ~(IXGBE_EIMS_OTHER |
1464                           IXGBE_EIMS_MAILBOX |
1465                           IXGBE_EIMS_LSC);
1466         else
1467                 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1468         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1469 }
1470
1471 enum latency_range {
1472         lowest_latency = 0,
1473         low_latency = 1,
1474         bulk_latency = 2,
1475         latency_invalid = 255
1476 };
1477
1478 /**
1479  * ixgbe_update_itr - update the dynamic ITR value based on statistics
1480  * @adapter: pointer to adapter
1481  * @eitr: eitr setting (ints per sec) to give last timeslice
1482  * @itr_setting: current throttle rate in ints/second
1483  * @packets: the number of packets during this measurement interval
1484  * @bytes: the number of bytes during this measurement interval
1485  *
1486  *      Stores a new ITR value based on packets and byte
1487  *      counts during the last interrupt.  The advantage of per interrupt
1488  *      computation is faster updates and more accurate ITR for the current
1489  *      traffic pattern.  Constants in this function were computed
1490  *      based on theoretical maximum wire speed and thresholds were set based
1491  *      on testing data as well as attempting to minimize response time
1492  *      while increasing bulk throughput.
1493  *      this functionality is controlled by the InterruptThrottleRate module
1494  *      parameter (see ixgbe_param.c)
1495  **/
1496 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1497                            u32 eitr, u8 itr_setting,
1498                            int packets, int bytes)
1499 {
1500         unsigned int retval = itr_setting;
1501         u32 timepassed_us;
1502         u64 bytes_perint;
1503
1504         if (packets == 0)
1505                 goto update_itr_done;
1506
1507
1508         /* simple throttlerate management
1509          *    0-20MB/s lowest (100000 ints/s)
1510          *   20-100MB/s low   (20000 ints/s)
1511          *  100-1249MB/s bulk (8000 ints/s)
1512          */
1513         /* what was last interrupt timeslice? */
1514         timepassed_us = 1000000/eitr;
1515         bytes_perint = bytes / timepassed_us; /* bytes/usec */
1516
1517         switch (itr_setting) {
1518         case lowest_latency:
1519                 if (bytes_perint > adapter->eitr_low)
1520                         retval = low_latency;
1521                 break;
1522         case low_latency:
1523                 if (bytes_perint > adapter->eitr_high)
1524                         retval = bulk_latency;
1525                 else if (bytes_perint <= adapter->eitr_low)
1526                         retval = lowest_latency;
1527                 break;
1528         case bulk_latency:
1529                 if (bytes_perint <= adapter->eitr_high)
1530                         retval = low_latency;
1531                 break;
1532         }
1533
1534 update_itr_done:
1535         return retval;
1536 }
1537
1538 /**
1539  * ixgbe_write_eitr - write EITR register in hardware specific way
1540  * @q_vector: structure containing interrupt and ring information
1541  *
1542  * This function is made to be called by ethtool and by the driver
1543  * when it needs to update EITR registers at runtime.  Hardware
1544  * specific quirks/differences are taken care of here.
1545  */
1546 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1547 {
1548         struct ixgbe_adapter *adapter = q_vector->adapter;
1549         struct ixgbe_hw *hw = &adapter->hw;
1550         int v_idx = q_vector->v_idx;
1551         u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1552
1553         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1554                 /* must write high and low 16 bits to reset counter */
1555                 itr_reg |= (itr_reg << 16);
1556         } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1557                 /*
1558                  * 82599 can support a value of zero, so allow it for
1559                  * max interrupt rate, but there is an errata where it can
1560                  * not be zero with RSC
1561                  */
1562                 if (itr_reg == 8 &&
1563                     !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1564                         itr_reg = 0;
1565
1566                 /*
1567                  * set the WDIS bit to not clear the timer bits and cause an
1568                  * immediate assertion of the interrupt
1569                  */
1570                 itr_reg |= IXGBE_EITR_CNT_WDIS;
1571         }
1572         IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1573 }
1574
1575 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1576 {
1577         struct ixgbe_adapter *adapter = q_vector->adapter;
1578         u32 new_itr;
1579         u8 current_itr, ret_itr;
1580         int i, r_idx;
1581         struct ixgbe_ring *rx_ring, *tx_ring;
1582
1583         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1584         for (i = 0; i < q_vector->txr_count; i++) {
1585                 tx_ring = adapter->tx_ring[r_idx];
1586                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1587                                            q_vector->tx_itr,
1588                                            tx_ring->total_packets,
1589                                            tx_ring->total_bytes);
1590                 /* if the result for this queue would decrease interrupt
1591                  * rate for this vector then use that result */
1592                 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1593                                     q_vector->tx_itr - 1 : ret_itr);
1594                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1595                                       r_idx + 1);
1596         }
1597
1598         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1599         for (i = 0; i < q_vector->rxr_count; i++) {
1600                 rx_ring = adapter->rx_ring[r_idx];
1601                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1602                                            q_vector->rx_itr,
1603                                            rx_ring->total_packets,
1604                                            rx_ring->total_bytes);
1605                 /* if the result for this queue would decrease interrupt
1606                  * rate for this vector then use that result */
1607                 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1608                                     q_vector->rx_itr - 1 : ret_itr);
1609                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1610                                       r_idx + 1);
1611         }
1612
1613         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1614
1615         switch (current_itr) {
1616         /* counts and packets in update_itr are dependent on these numbers */
1617         case lowest_latency:
1618                 new_itr = 100000;
1619                 break;
1620         case low_latency:
1621                 new_itr = 20000; /* aka hwitr = ~200 */
1622                 break;
1623         case bulk_latency:
1624         default:
1625                 new_itr = 8000;
1626                 break;
1627         }
1628
1629         if (new_itr != q_vector->eitr) {
1630                 /* do an exponential smoothing */
1631                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1632
1633                 /* save the algorithm value here, not the smoothed one */
1634                 q_vector->eitr = new_itr;
1635
1636                 ixgbe_write_eitr(q_vector);
1637         }
1638 }
1639
1640 /**
1641  * ixgbe_check_overtemp_task - worker thread to check over tempurature
1642  * @work: pointer to work_struct containing our data
1643  **/
1644 static void ixgbe_check_overtemp_task(struct work_struct *work)
1645 {
1646         struct ixgbe_adapter *adapter = container_of(work,
1647                                                      struct ixgbe_adapter,
1648                                                      check_overtemp_task);
1649         struct ixgbe_hw *hw = &adapter->hw;
1650         u32 eicr = adapter->interrupt_event;
1651
1652         if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1653                 return;
1654
1655         switch (hw->device_id) {
1656         case IXGBE_DEV_ID_82599_T3_LOM: {
1657                 u32 autoneg;
1658                 bool link_up = false;
1659
1660                 if (hw->mac.ops.check_link)
1661                         hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1662
1663                 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1664                     (eicr & IXGBE_EICR_LSC))
1665                         /* Check if this is due to overtemp */
1666                         if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1667                                 break;
1668                 return;
1669         }
1670         default:
1671                 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1672                         return;
1673                 break;
1674         }
1675         e_crit(drv,
1676                "Network adapter has been stopped because it has over heated. "
1677                "Restart the computer. If the problem persists, "
1678                "power off the system and replace the adapter\n");
1679         /* write to clear the interrupt */
1680         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1681 }
1682
1683 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1684 {
1685         struct ixgbe_hw *hw = &adapter->hw;
1686
1687         if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1688             (eicr & IXGBE_EICR_GPI_SDP1)) {
1689                 e_crit(probe, "Fan has stopped, replace the adapter\n");
1690                 /* write to clear the interrupt */
1691                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1692         }
1693 }
1694
1695 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1696 {
1697         struct ixgbe_hw *hw = &adapter->hw;
1698
1699         if (eicr & IXGBE_EICR_GPI_SDP1) {
1700                 /* Clear the interrupt */
1701                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1702                 schedule_work(&adapter->multispeed_fiber_task);
1703         } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1704                 /* Clear the interrupt */
1705                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1706                 schedule_work(&adapter->sfp_config_module_task);
1707         } else {
1708                 /* Interrupt isn't for us... */
1709                 return;
1710         }
1711 }
1712
1713 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1714 {
1715         struct ixgbe_hw *hw = &adapter->hw;
1716
1717         adapter->lsc_int++;
1718         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1719         adapter->link_check_timeout = jiffies;
1720         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1721                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1722                 IXGBE_WRITE_FLUSH(hw);
1723                 schedule_work(&adapter->watchdog_task);
1724         }
1725 }
1726
1727 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1728 {
1729         struct net_device *netdev = data;
1730         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1731         struct ixgbe_hw *hw = &adapter->hw;
1732         u32 eicr;
1733
1734         /*
1735          * Workaround for Silicon errata.  Use clear-by-write instead
1736          * of clear-by-read.  Reading with EICS will return the
1737          * interrupt causes without clearing, which later be done
1738          * with the write to EICR.
1739          */
1740         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1741         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1742
1743         if (eicr & IXGBE_EICR_LSC)
1744                 ixgbe_check_lsc(adapter);
1745
1746         if (eicr & IXGBE_EICR_MAILBOX)
1747                 ixgbe_msg_task(adapter);
1748
1749         if (hw->mac.type == ixgbe_mac_82598EB)
1750                 ixgbe_check_fan_failure(adapter, eicr);
1751
1752         if (hw->mac.type == ixgbe_mac_82599EB) {
1753                 ixgbe_check_sfp_event(adapter, eicr);
1754                 adapter->interrupt_event = eicr;
1755                 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1756                     ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1757                         schedule_work(&adapter->check_overtemp_task);
1758
1759                 /* Handle Flow Director Full threshold interrupt */
1760                 if (eicr & IXGBE_EICR_FLOW_DIR) {
1761                         int i;
1762                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1763                         /* Disable transmits before FDIR Re-initialization */
1764                         netif_tx_stop_all_queues(netdev);
1765                         for (i = 0; i < adapter->num_tx_queues; i++) {
1766                                 struct ixgbe_ring *tx_ring =
1767                                                             adapter->tx_ring[i];
1768                                 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1769                                                        &tx_ring->reinit_state))
1770                                         schedule_work(&adapter->fdir_reinit_task);
1771                         }
1772                 }
1773         }
1774         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1775                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1776
1777         return IRQ_HANDLED;
1778 }
1779
1780 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1781                                            u64 qmask)
1782 {
1783         u32 mask;
1784
1785         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1786                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1787                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1788         } else {
1789                 mask = (qmask & 0xFFFFFFFF);
1790                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1791                 mask = (qmask >> 32);
1792                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1793         }
1794         /* skip the flush */
1795 }
1796
1797 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1798                                             u64 qmask)
1799 {
1800         u32 mask;
1801
1802         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1803                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1804                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1805         } else {
1806                 mask = (qmask & 0xFFFFFFFF);
1807                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1808                 mask = (qmask >> 32);
1809                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1810         }
1811         /* skip the flush */
1812 }
1813
1814 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1815 {
1816         struct ixgbe_q_vector *q_vector = data;
1817         struct ixgbe_adapter  *adapter = q_vector->adapter;
1818         struct ixgbe_ring     *tx_ring;
1819         int i, r_idx;
1820
1821         if (!q_vector->txr_count)
1822                 return IRQ_HANDLED;
1823
1824         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1825         for (i = 0; i < q_vector->txr_count; i++) {
1826                 tx_ring = adapter->tx_ring[r_idx];
1827                 tx_ring->total_bytes = 0;
1828                 tx_ring->total_packets = 0;
1829                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1830                                       r_idx + 1);
1831         }
1832
1833         /* EIAM disabled interrupts (on this vector) for us */
1834         napi_schedule(&q_vector->napi);
1835
1836         return IRQ_HANDLED;
1837 }
1838
1839 /**
1840  * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1841  * @irq: unused
1842  * @data: pointer to our q_vector struct for this interrupt vector
1843  **/
1844 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1845 {
1846         struct ixgbe_q_vector *q_vector = data;
1847         struct ixgbe_adapter  *adapter = q_vector->adapter;
1848         struct ixgbe_ring  *rx_ring;
1849         int r_idx;
1850         int i;
1851
1852         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1853         for (i = 0;  i < q_vector->rxr_count; i++) {
1854                 rx_ring = adapter->rx_ring[r_idx];
1855                 rx_ring->total_bytes = 0;
1856                 rx_ring->total_packets = 0;
1857                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1858                                       r_idx + 1);
1859         }
1860
1861         if (!q_vector->rxr_count)
1862                 return IRQ_HANDLED;
1863
1864         /* disable interrupts on this vector only */
1865         /* EIAM disabled interrupts (on this vector) for us */
1866         napi_schedule(&q_vector->napi);
1867
1868         return IRQ_HANDLED;
1869 }
1870
1871 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1872 {
1873         struct ixgbe_q_vector *q_vector = data;
1874         struct ixgbe_adapter  *adapter = q_vector->adapter;
1875         struct ixgbe_ring  *ring;
1876         int r_idx;
1877         int i;
1878
1879         if (!q_vector->txr_count && !q_vector->rxr_count)
1880                 return IRQ_HANDLED;
1881
1882         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1883         for (i = 0; i < q_vector->txr_count; i++) {
1884                 ring = adapter->tx_ring[r_idx];
1885                 ring->total_bytes = 0;
1886                 ring->total_packets = 0;
1887                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1888                                       r_idx + 1);
1889         }
1890
1891         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1892         for (i = 0; i < q_vector->rxr_count; i++) {
1893                 ring = adapter->rx_ring[r_idx];
1894                 ring->total_bytes = 0;
1895                 ring->total_packets = 0;
1896                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1897                                       r_idx + 1);
1898         }
1899
1900         /* EIAM disabled interrupts (on this vector) for us */
1901         napi_schedule(&q_vector->napi);
1902
1903         return IRQ_HANDLED;
1904 }
1905
1906 /**
1907  * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1908  * @napi: napi struct with our devices info in it
1909  * @budget: amount of work driver is allowed to do this pass, in packets
1910  *
1911  * This function is optimized for cleaning one queue only on a single
1912  * q_vector!!!
1913  **/
1914 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1915 {
1916         struct ixgbe_q_vector *q_vector =
1917                                container_of(napi, struct ixgbe_q_vector, napi);
1918         struct ixgbe_adapter *adapter = q_vector->adapter;
1919         struct ixgbe_ring *rx_ring = NULL;
1920         int work_done = 0;
1921         long r_idx;
1922
1923         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1924         rx_ring = adapter->rx_ring[r_idx];
1925 #ifdef CONFIG_IXGBE_DCA
1926         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1927                 ixgbe_update_rx_dca(adapter, rx_ring);
1928 #endif
1929
1930         ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1931
1932         /* If all Rx work done, exit the polling mode */
1933         if (work_done < budget) {
1934                 napi_complete(napi);
1935                 if (adapter->rx_itr_setting & 1)
1936                         ixgbe_set_itr_msix(q_vector);
1937                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1938                         ixgbe_irq_enable_queues(adapter,
1939                                                 ((u64)1 << q_vector->v_idx));
1940         }
1941
1942         return work_done;
1943 }
1944
1945 /**
1946  * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
1947  * @napi: napi struct with our devices info in it
1948  * @budget: amount of work driver is allowed to do this pass, in packets
1949  *
1950  * This function will clean more than one rx queue associated with a
1951  * q_vector.
1952  **/
1953 static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1954 {
1955         struct ixgbe_q_vector *q_vector =
1956                                container_of(napi, struct ixgbe_q_vector, napi);
1957         struct ixgbe_adapter *adapter = q_vector->adapter;
1958         struct ixgbe_ring *ring = NULL;
1959         int work_done = 0, i;
1960         long r_idx;
1961         bool tx_clean_complete = true;
1962
1963         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1964         for (i = 0; i < q_vector->txr_count; i++) {
1965                 ring = adapter->tx_ring[r_idx];
1966 #ifdef CONFIG_IXGBE_DCA
1967                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1968                         ixgbe_update_tx_dca(adapter, ring);
1969 #endif
1970                 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1971                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1972                                       r_idx + 1);
1973         }
1974
1975         /* attempt to distribute budget to each queue fairly, but don't allow
1976          * the budget to go below 1 because we'll exit polling */
1977         budget /= (q_vector->rxr_count ?: 1);
1978         budget = max(budget, 1);
1979         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1980         for (i = 0; i < q_vector->rxr_count; i++) {
1981                 ring = adapter->rx_ring[r_idx];
1982 #ifdef CONFIG_IXGBE_DCA
1983                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1984                         ixgbe_update_rx_dca(adapter, ring);
1985 #endif
1986                 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1987                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1988                                       r_idx + 1);
1989         }
1990
1991         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1992         ring = adapter->rx_ring[r_idx];
1993         /* If all Rx work done, exit the polling mode */
1994         if (work_done < budget) {
1995                 napi_complete(napi);
1996                 if (adapter->rx_itr_setting & 1)
1997                         ixgbe_set_itr_msix(q_vector);
1998                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1999                         ixgbe_irq_enable_queues(adapter,
2000                                                 ((u64)1 << q_vector->v_idx));
2001                 return 0;
2002         }
2003
2004         return work_done;
2005 }
2006
2007 /**
2008  * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
2009  * @napi: napi struct with our devices info in it
2010  * @budget: amount of work driver is allowed to do this pass, in packets
2011  *
2012  * This function is optimized for cleaning one queue only on a single
2013  * q_vector!!!
2014  **/
2015 static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2016 {
2017         struct ixgbe_q_vector *q_vector =
2018                                container_of(napi, struct ixgbe_q_vector, napi);
2019         struct ixgbe_adapter *adapter = q_vector->adapter;
2020         struct ixgbe_ring *tx_ring = NULL;
2021         int work_done = 0;
2022         long r_idx;
2023
2024         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2025         tx_ring = adapter->tx_ring[r_idx];
2026 #ifdef CONFIG_IXGBE_DCA
2027         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2028                 ixgbe_update_tx_dca(adapter, tx_ring);
2029 #endif
2030
2031         if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2032                 work_done = budget;
2033
2034         /* If all Tx work done, exit the polling mode */
2035         if (work_done < budget) {
2036                 napi_complete(napi);
2037                 if (adapter->tx_itr_setting & 1)
2038                         ixgbe_set_itr_msix(q_vector);
2039                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2040                         ixgbe_irq_enable_queues(adapter,
2041                                                 ((u64)1 << q_vector->v_idx));
2042         }
2043
2044         return work_done;
2045 }
2046
2047 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2048                                      int r_idx)
2049 {
2050         struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2051
2052         set_bit(r_idx, q_vector->rxr_idx);
2053         q_vector->rxr_count++;
2054 }
2055
2056 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2057                                      int t_idx)
2058 {
2059         struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2060
2061         set_bit(t_idx, q_vector->txr_idx);
2062         q_vector->txr_count++;
2063 }
2064
2065 /**
2066  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2067  * @adapter: board private structure to initialize
2068  * @vectors: allotted vector count for descriptor rings
2069  *
2070  * This function maps descriptor rings to the queue-specific vectors
2071  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
2072  * one vector per ring/queue, but on a constrained vector budget, we
2073  * group the rings as "efficiently" as possible.  You would add new
2074  * mapping configurations in here.
2075  **/
2076 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2077                                       int vectors)
2078 {
2079         int v_start = 0;
2080         int rxr_idx = 0, txr_idx = 0;
2081         int rxr_remaining = adapter->num_rx_queues;
2082         int txr_remaining = adapter->num_tx_queues;
2083         int i, j;
2084         int rqpv, tqpv;
2085         int err = 0;
2086
2087         /* No mapping required if MSI-X is disabled. */
2088         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2089                 goto out;
2090
2091         /*
2092          * The ideal configuration...
2093          * We have enough vectors to map one per queue.
2094          */
2095         if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2096                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2097                         map_vector_to_rxq(adapter, v_start, rxr_idx);
2098
2099                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2100                         map_vector_to_txq(adapter, v_start, txr_idx);
2101
2102                 goto out;
2103         }
2104
2105         /*
2106          * If we don't have enough vectors for a 1-to-1
2107          * mapping, we'll have to group them so there are
2108          * multiple queues per vector.
2109          */
2110         /* Re-adjusting *qpv takes care of the remainder. */
2111         for (i = v_start; i < vectors; i++) {
2112                 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
2113                 for (j = 0; j < rqpv; j++) {
2114                         map_vector_to_rxq(adapter, i, rxr_idx);
2115                         rxr_idx++;
2116                         rxr_remaining--;
2117                 }
2118         }
2119         for (i = v_start; i < vectors; i++) {
2120                 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2121                 for (j = 0; j < tqpv; j++) {
2122                         map_vector_to_txq(adapter, i, txr_idx);
2123                         txr_idx++;
2124                         txr_remaining--;
2125                 }
2126         }
2127
2128 out:
2129         return err;
2130 }
2131
2132 /**
2133  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2134  * @adapter: board private structure
2135  *
2136  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2137  * interrupts from the kernel.
2138  **/
2139 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2140 {
2141         struct net_device *netdev = adapter->netdev;
2142         irqreturn_t (*handler)(int, void *);
2143         int i, vector, q_vectors, err;
2144         int ri = 0, ti = 0;
2145
2146         /* Decrement for Other and TCP Timer vectors */
2147         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2148
2149         /* Map the Tx/Rx rings to the vectors we were allotted. */
2150         err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2151         if (err)
2152                 goto out;
2153
2154 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
2155                          (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2156                          &ixgbe_msix_clean_many)
2157         for (vector = 0; vector < q_vectors; vector++) {
2158                 handler = SET_HANDLER(adapter->q_vector[vector]);
2159
2160                 if (handler == &ixgbe_msix_clean_rx) {
2161                         sprintf(adapter->name[vector], "%s-%s-%d",
2162                                 netdev->name, "rx", ri++);
2163                 } else if (handler == &ixgbe_msix_clean_tx) {
2164                         sprintf(adapter->name[vector], "%s-%s-%d",
2165                                 netdev->name, "tx", ti++);
2166                 } else
2167                         sprintf(adapter->name[vector], "%s-%s-%d",
2168                                 netdev->name, "TxRx", vector);
2169
2170                 err = request_irq(adapter->msix_entries[vector].vector,
2171                                   handler, 0, adapter->name[vector],
2172                                   adapter->q_vector[vector]);
2173                 if (err) {
2174                         e_err(probe, "request_irq failed for MSIX interrupt "
2175                               "Error: %d\n", err);
2176                         goto free_queue_irqs;
2177                 }
2178         }
2179
2180         sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2181         err = request_irq(adapter->msix_entries[vector].vector,
2182                           ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
2183         if (err) {
2184                 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2185                 goto free_queue_irqs;
2186         }
2187
2188         return 0;
2189
2190 free_queue_irqs:
2191         for (i = vector - 1; i >= 0; i--)
2192                 free_irq(adapter->msix_entries[--vector].vector,
2193                          adapter->q_vector[i]);
2194         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2195         pci_disable_msix(adapter->pdev);
2196         kfree(adapter->msix_entries);
2197         adapter->msix_entries = NULL;
2198 out:
2199         return err;
2200 }
2201
2202 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2203 {
2204         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2205         u8 current_itr;
2206         u32 new_itr = q_vector->eitr;
2207         struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2208         struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2209
2210         q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2211                                             q_vector->tx_itr,
2212                                             tx_ring->total_packets,
2213                                             tx_ring->total_bytes);
2214         q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
2215                                             q_vector->rx_itr,
2216                                             rx_ring->total_packets,
2217                                             rx_ring->total_bytes);
2218
2219         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
2220
2221         switch (current_itr) {
2222         /* counts and packets in update_itr are dependent on these numbers */
2223         case lowest_latency:
2224                 new_itr = 100000;
2225                 break;
2226         case low_latency:
2227                 new_itr = 20000; /* aka hwitr = ~200 */
2228                 break;
2229         case bulk_latency:
2230                 new_itr = 8000;
2231                 break;
2232         default:
2233                 break;
2234         }
2235
2236         if (new_itr != q_vector->eitr) {
2237                 /* do an exponential smoothing */
2238                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
2239
2240                 /* save the algorithm value here, not the smoothed one */
2241                 q_vector->eitr = new_itr;
2242
2243                 ixgbe_write_eitr(q_vector);
2244         }
2245 }
2246
2247 /**
2248  * ixgbe_irq_enable - Enable default interrupt generation settings
2249  * @adapter: board private structure
2250  **/
2251 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2252                                     bool flush)
2253 {
2254         u32 mask;
2255
2256         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2257         if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2258                 mask |= IXGBE_EIMS_GPI_SDP0;
2259         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2260                 mask |= IXGBE_EIMS_GPI_SDP1;
2261         if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2262                 mask |= IXGBE_EIMS_ECC;
2263                 mask |= IXGBE_EIMS_GPI_SDP1;
2264                 mask |= IXGBE_EIMS_GPI_SDP2;
2265                 if (adapter->num_vfs)
2266                         mask |= IXGBE_EIMS_MAILBOX;
2267         }
2268         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2269             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2270                 mask |= IXGBE_EIMS_FLOW_DIR;
2271
2272         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2273         if (queues)
2274                 ixgbe_irq_enable_queues(adapter, ~0);
2275         if (flush)
2276                 IXGBE_WRITE_FLUSH(&adapter->hw);
2277
2278         if (adapter->num_vfs > 32) {
2279                 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2280                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2281         }
2282 }
2283
2284 /**
2285  * ixgbe_intr - legacy mode Interrupt Handler
2286  * @irq: interrupt number
2287  * @data: pointer to a network interface device structure
2288  **/
2289 static irqreturn_t ixgbe_intr(int irq, void *data)
2290 {
2291         struct net_device *netdev = data;
2292         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2293         struct ixgbe_hw *hw = &adapter->hw;
2294         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2295         u32 eicr;
2296
2297         /*
2298          * Workaround for silicon errata on 82598.  Mask the interrupts
2299          * before the read of EICR.
2300          */
2301         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2302
2303         /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2304          * therefore no explict interrupt disable is necessary */
2305         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2306         if (!eicr) {
2307                 /*
2308                  * shared interrupt alert!
2309                  * make sure interrupts are enabled because the read will
2310                  * have disabled interrupts due to EIAM
2311                  * finish the workaround of silicon errata on 82598.  Unmask
2312                  * the interrupt that we masked before the EICR read.
2313                  */
2314                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2315                         ixgbe_irq_enable(adapter, true, true);
2316                 return IRQ_NONE;        /* Not our interrupt */
2317         }
2318
2319         if (eicr & IXGBE_EICR_LSC)
2320                 ixgbe_check_lsc(adapter);
2321
2322         if (hw->mac.type == ixgbe_mac_82599EB)
2323                 ixgbe_check_sfp_event(adapter, eicr);
2324
2325         ixgbe_check_fan_failure(adapter, eicr);
2326         if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2327             ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2328                 schedule_work(&adapter->check_overtemp_task);
2329
2330         if (napi_schedule_prep(&(q_vector->napi))) {
2331                 adapter->tx_ring[0]->total_packets = 0;
2332                 adapter->tx_ring[0]->total_bytes = 0;
2333                 adapter->rx_ring[0]->total_packets = 0;
2334                 adapter->rx_ring[0]->total_bytes = 0;
2335                 /* would disable interrupts here but EIAM disabled it */
2336                 __napi_schedule(&(q_vector->napi));
2337         }
2338
2339         /*
2340          * re-enable link(maybe) and non-queue interrupts, no flush.
2341          * ixgbe_poll will re-enable the queue interrupts
2342          */
2343
2344         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2345                 ixgbe_irq_enable(adapter, false, false);
2346
2347         return IRQ_HANDLED;
2348 }
2349
2350 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2351 {
2352         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2353
2354         for (i = 0; i < q_vectors; i++) {
2355                 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
2356                 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2357                 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2358                 q_vector->rxr_count = 0;
2359                 q_vector->txr_count = 0;
2360         }
2361 }
2362
2363 /**
2364  * ixgbe_request_irq - initialize interrupts
2365  * @adapter: board private structure
2366  *
2367  * Attempts to configure interrupts using the best available
2368  * capabilities of the hardware and kernel.
2369  **/
2370 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2371 {
2372         struct net_device *netdev = adapter->netdev;
2373         int err;
2374
2375         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2376                 err = ixgbe_request_msix_irqs(adapter);
2377         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2378                 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2379                                   netdev->name, netdev);
2380         } else {
2381                 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2382                                   netdev->name, netdev);
2383         }
2384
2385         if (err)
2386                 e_err(probe, "request_irq failed, Error %d\n", err);
2387
2388         return err;
2389 }
2390
2391 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2392 {
2393         struct net_device *netdev = adapter->netdev;
2394
2395         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2396                 int i, q_vectors;
2397
2398                 q_vectors = adapter->num_msix_vectors;
2399
2400                 i = q_vectors - 1;
2401                 free_irq(adapter->msix_entries[i].vector, netdev);
2402
2403                 i--;
2404                 for (; i >= 0; i--) {
2405                         free_irq(adapter->msix_entries[i].vector,
2406                                  adapter->q_vector[i]);
2407                 }
2408
2409                 ixgbe_reset_q_vectors(adapter);
2410         } else {
2411                 free_irq(adapter->pdev->irq, netdev);
2412         }
2413 }
2414
2415 /**
2416  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2417  * @adapter: board private structure
2418  **/
2419 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2420 {
2421         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2422                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2423         } else {
2424                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2425                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2426                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2427                 if (adapter->num_vfs > 32)
2428                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2429         }
2430         IXGBE_WRITE_FLUSH(&adapter->hw);
2431         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2432                 int i;
2433                 for (i = 0; i < adapter->num_msix_vectors; i++)
2434                         synchronize_irq(adapter->msix_entries[i].vector);
2435         } else {
2436                 synchronize_irq(adapter->pdev->irq);
2437         }
2438 }
2439
2440 /**
2441  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2442  *
2443  **/
2444 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2445 {
2446         struct ixgbe_hw *hw = &adapter->hw;
2447
2448         IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2449                         EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2450
2451         ixgbe_set_ivar(adapter, 0, 0, 0);
2452         ixgbe_set_ivar(adapter, 1, 0, 0);
2453
2454         map_vector_to_rxq(adapter, 0, 0);
2455         map_vector_to_txq(adapter, 0, 0);
2456
2457         e_info(hw, "Legacy interrupt IVAR setup done\n");
2458 }
2459
2460 /**
2461  * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2462  * @adapter: board private structure
2463  * @ring: structure containing ring specific data
2464  *
2465  * Configure the Tx descriptor ring after a reset.
2466  **/
2467 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2468                              struct ixgbe_ring *ring)
2469 {
2470         struct ixgbe_hw *hw = &adapter->hw;
2471         u64 tdba = ring->dma;
2472         int wait_loop = 10;
2473         u32 txdctl;
2474         u16 reg_idx = ring->reg_idx;
2475
2476         /* disable queue to avoid issues while updating state */
2477         txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2478         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2479                         txdctl & ~IXGBE_TXDCTL_ENABLE);
2480         IXGBE_WRITE_FLUSH(hw);
2481
2482         IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2483                         (tdba & DMA_BIT_MASK(32)));
2484         IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2485         IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2486                         ring->count * sizeof(union ixgbe_adv_tx_desc));
2487         IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2488         IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2489         ring->head = IXGBE_TDH(reg_idx);
2490         ring->tail = IXGBE_TDT(reg_idx);
2491
2492         /* configure fetching thresholds */
2493         if (adapter->rx_itr_setting == 0) {
2494                 /* cannot set wthresh when itr==0 */
2495                 txdctl &= ~0x007F0000;
2496         } else {
2497                 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2498                 txdctl |= (8 << 16);
2499         }
2500         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2501                 /* PThresh workaround for Tx hang with DFP enabled. */
2502                 txdctl |= 32;
2503         }
2504
2505         /* reinitialize flowdirector state */
2506         set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
2507
2508         /* enable queue */
2509         txdctl |= IXGBE_TXDCTL_ENABLE;
2510         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2511
2512         /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2513         if (hw->mac.type == ixgbe_mac_82598EB &&
2514             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2515                 return;
2516
2517         /* poll to verify queue is enabled */
2518         do {
2519                 msleep(1);
2520                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2521         } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2522         if (!wait_loop)
2523                 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2524 }
2525
2526 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2527 {
2528         struct ixgbe_hw *hw = &adapter->hw;
2529         u32 rttdcs;
2530         u32 mask;
2531
2532         if (hw->mac.type == ixgbe_mac_82598EB)
2533                 return;
2534
2535         /* disable the arbiter while setting MTQC */
2536         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2537         rttdcs |= IXGBE_RTTDCS_ARBDIS;
2538         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2539
2540         /* set transmit pool layout */
2541         mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2542         switch (adapter->flags & mask) {
2543
2544         case (IXGBE_FLAG_SRIOV_ENABLED):
2545                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2546                                 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2547                 break;
2548
2549         case (IXGBE_FLAG_DCB_ENABLED):
2550                 /* We enable 8 traffic classes, DCB only */
2551                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2552                               (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2553                 break;
2554
2555         default:
2556                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2557                 break;
2558         }
2559
2560         /* re-enable the arbiter */
2561         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2562         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2563 }
2564
2565 /**
2566  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2567  * @adapter: board private structure
2568  *
2569  * Configure the Tx unit of the MAC after a reset.
2570  **/
2571 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2572 {
2573         struct ixgbe_hw *hw = &adapter->hw;
2574         u32 dmatxctl;
2575         u32 i;
2576
2577         ixgbe_setup_mtqc(adapter);
2578
2579         if (hw->mac.type != ixgbe_mac_82598EB) {
2580                 /* DMATXCTL.EN must be before Tx queues are enabled */
2581                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2582                 dmatxctl |= IXGBE_DMATXCTL_TE;
2583                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2584         }
2585
2586         /* Setup the HW Tx Head and Tail descriptor pointers */
2587         for (i = 0; i < adapter->num_tx_queues; i++)
2588                 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2589 }
2590
2591 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2592
2593 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2594                                    struct ixgbe_ring *rx_ring)
2595 {
2596         u32 srrctl;
2597         int index;
2598         struct ixgbe_ring_feature *feature = adapter->ring_feature;
2599
2600         index = rx_ring->reg_idx;
2601         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2602                 unsigned long mask;
2603                 mask = (unsigned long) feature[RING_F_RSS].mask;
2604                 index = index & mask;
2605         }
2606         srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
2607
2608         srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2609         srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2610         if (adapter->num_vfs)
2611                 srrctl |= IXGBE_SRRCTL_DROP_EN;
2612
2613         srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2614                   IXGBE_SRRCTL_BSIZEHDR_MASK;
2615
2616         if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2617 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2618                 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2619 #else
2620                 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2621 #endif
2622                 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2623         } else {
2624                 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2625                           IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2626                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2627         }
2628
2629         IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2630 }
2631
2632 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2633 {
2634         struct ixgbe_hw *hw = &adapter->hw;
2635         static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2636                           0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2637                           0x6A3E67EA, 0x14364D17, 0x3BED200D};
2638         u32 mrqc = 0, reta = 0;
2639         u32 rxcsum;
2640         int i, j;
2641         int mask;
2642
2643         /* Fill out hash function seeds */
2644         for (i = 0; i < 10; i++)
2645                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2646
2647         /* Fill out redirection table */
2648         for (i = 0, j = 0; i < 128; i++, j++) {
2649                 if (j == adapter->ring_feature[RING_F_RSS].indices)
2650                         j = 0;
2651                 /* reta = 4-byte sliding window of
2652                  * 0x00..(indices-1)(indices-1)00..etc. */
2653                 reta = (reta << 8) | (j * 0x11);
2654                 if ((i & 3) == 3)
2655                         IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2656         }
2657
2658         /* Disable indicating checksum in descriptor, enables RSS hash */
2659         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2660         rxcsum |= IXGBE_RXCSUM_PCSD;
2661         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2662
2663         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2664                 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2665         else
2666                 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2667 #ifdef CONFIG_IXGBE_DCB
2668                                          | IXGBE_FLAG_DCB_ENABLED
2669 #endif
2670                                          | IXGBE_FLAG_SRIOV_ENABLED
2671                                         );
2672
2673         switch (mask) {
2674         case (IXGBE_FLAG_RSS_ENABLED):
2675                 mrqc = IXGBE_MRQC_RSSEN;
2676                 break;
2677         case (IXGBE_FLAG_SRIOV_ENABLED):
2678                 mrqc = IXGBE_MRQC_VMDQEN;
2679                 break;
2680 #ifdef CONFIG_IXGBE_DCB
2681         case (IXGBE_FLAG_DCB_ENABLED):
2682                 mrqc = IXGBE_MRQC_RT8TCEN;
2683                 break;
2684 #endif /* CONFIG_IXGBE_DCB */
2685         default:
2686                 break;
2687         }
2688
2689         /* Perform hash on these packet types */
2690         mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2691               | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2692               | IXGBE_MRQC_RSS_FIELD_IPV6
2693               | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2694
2695         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2696 }
2697
2698 /**
2699  * ixgbe_configure_rscctl - enable RSC for the indicated ring
2700  * @adapter:    address of board private structure
2701  * @index:      index of ring to set
2702  **/
2703 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2704                                    struct ixgbe_ring *ring)
2705 {
2706         struct ixgbe_hw *hw = &adapter->hw;
2707         u32 rscctrl;
2708         int rx_buf_len;
2709         u16 reg_idx = ring->reg_idx;
2710
2711         if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
2712                 return;
2713
2714         rx_buf_len = ring->rx_buf_len;
2715         rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2716         rscctrl |= IXGBE_RSCCTL_RSCEN;
2717         /*
2718          * we must limit the number of descriptors so that the
2719          * total size of max desc * buf_len is not greater
2720          * than 65535
2721          */
2722         if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2723 #if (MAX_SKB_FRAGS > 16)
2724                 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2725 #elif (MAX_SKB_FRAGS > 8)
2726                 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2727 #elif (MAX_SKB_FRAGS > 4)
2728                 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2729 #else
2730                 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2731 #endif
2732         } else {
2733                 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2734                         rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2735                 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2736                         rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2737                 else
2738                         rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2739         }
2740         IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2741 }
2742
2743 /**
2744  *  ixgbe_set_uta - Set unicast filter table address
2745  *  @adapter: board private structure
2746  *
2747  *  The unicast table address is a register array of 32-bit registers.
2748  *  The table is meant to be used in a way similar to how the MTA is used
2749  *  however due to certain limitations in the hardware it is necessary to
2750  *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2751  *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
2752  **/
2753 static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2754 {
2755         struct ixgbe_hw *hw = &adapter->hw;
2756         int i;
2757
2758         /* The UTA table only exists on 82599 hardware and newer */
2759         if (hw->mac.type < ixgbe_mac_82599EB)
2760                 return;
2761
2762         /* we only need to do this if VMDq is enabled */
2763         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2764                 return;
2765
2766         for (i = 0; i < 128; i++)
2767                 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2768 }
2769
2770 #define IXGBE_MAX_RX_DESC_POLL 10
2771 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2772                                        struct ixgbe_ring *ring)
2773 {
2774         struct ixgbe_hw *hw = &adapter->hw;
2775         int reg_idx = ring->reg_idx;
2776         int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2777         u32 rxdctl;
2778
2779         /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2780         if (hw->mac.type == ixgbe_mac_82598EB &&
2781             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2782                 return;
2783
2784         do {
2785                 msleep(1);
2786                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2787         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2788
2789         if (!wait_loop) {
2790                 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2791                       "the polling period\n", reg_idx);
2792         }
2793 }
2794
2795 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2796                              struct ixgbe_ring *ring)
2797 {
2798         struct ixgbe_hw *hw = &adapter->hw;
2799         u64 rdba = ring->dma;
2800         u32 rxdctl;
2801         u16 reg_idx = ring->reg_idx;
2802
2803         /* disable queue to avoid issues while updating state */
2804         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2805         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
2806                         rxdctl & ~IXGBE_RXDCTL_ENABLE);
2807         IXGBE_WRITE_FLUSH(hw);
2808
2809         IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2810         IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2811         IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2812                         ring->count * sizeof(union ixgbe_adv_rx_desc));
2813         IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2814         IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2815         ring->head = IXGBE_RDH(reg_idx);
2816         ring->tail = IXGBE_RDT(reg_idx);
2817
2818         ixgbe_configure_srrctl(adapter, ring);
2819         ixgbe_configure_rscctl(adapter, ring);
2820
2821         if (hw->mac.type == ixgbe_mac_82598EB) {
2822                 /*
2823                  * enable cache line friendly hardware writes:
2824                  * PTHRESH=32 descriptors (half the internal cache),
2825                  * this also removes ugly rx_no_buffer_count increment
2826                  * HTHRESH=4 descriptors (to minimize latency on fetch)
2827                  * WTHRESH=8 burst writeback up to two cache lines
2828                  */
2829                 rxdctl &= ~0x3FFFFF;
2830                 rxdctl |=  0x080420;
2831         }
2832
2833         /* enable receive descriptor ring */
2834         rxdctl |= IXGBE_RXDCTL_ENABLE;
2835         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2836
2837         ixgbe_rx_desc_queue_enable(adapter, ring);
2838         ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
2839 }
2840
2841 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2842 {
2843         struct ixgbe_hw *hw = &adapter->hw;
2844         int p;
2845
2846         /* PSRTYPE must be initialized in non 82598 adapters */
2847         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2848                       IXGBE_PSRTYPE_UDPHDR |
2849                       IXGBE_PSRTYPE_IPV4HDR |
2850                       IXGBE_PSRTYPE_L2HDR |
2851                       IXGBE_PSRTYPE_IPV6HDR;
2852
2853         if (hw->mac.type == ixgbe_mac_82598EB)
2854                 return;
2855
2856         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2857                 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2858
2859         for (p = 0; p < adapter->num_rx_pools; p++)
2860                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2861                                 psrtype);
2862 }
2863
2864 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2865 {
2866         struct ixgbe_hw *hw = &adapter->hw;
2867         u32 gcr_ext;
2868         u32 vt_reg_bits;
2869         u32 reg_offset, vf_shift;
2870         u32 vmdctl;
2871
2872         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2873                 return;
2874
2875         vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2876         vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2877         vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2878         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2879
2880         vf_shift = adapter->num_vfs % 32;
2881         reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2882
2883         /* Enable only the PF's pool for Tx/Rx */
2884         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2885         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2886         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2887         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2888         IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2889
2890         /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2891         hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2892
2893         /*
2894          * Set up VF register offsets for selected VT Mode,
2895          * i.e. 32 or 64 VFs for SR-IOV
2896          */
2897         gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2898         gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2899         gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2900         IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2901
2902         /* enable Tx loopback for VF/PF communication */
2903         IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2904 }
2905
2906 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2907 {
2908         struct ixgbe_hw *hw = &adapter->hw;
2909         struct net_device *netdev = adapter->netdev;
2910         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2911         int rx_buf_len;
2912         struct ixgbe_ring *rx_ring;
2913         int i;
2914         u32 mhadd, hlreg0;
2915
2916         /* Decide whether to use packet split mode or not */
2917         /* Do not use packet split if we're in SR-IOV Mode */
2918         if (!adapter->num_vfs)
2919                 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2920
2921         /* Set the RX buffer length according to the mode */
2922         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2923                 rx_buf_len = IXGBE_RX_HDR_SIZE;
2924         } else {
2925                 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2926                     (netdev->mtu <= ETH_DATA_LEN))
2927                         rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2928                 else
2929                         rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
2930         }
2931
2932 #ifdef IXGBE_FCOE
2933         /* adjust max frame to be able to do baby jumbo for FCoE */
2934         if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2935             (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2936                 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2937
2938 #endif /* IXGBE_FCOE */
2939         mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2940         if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2941                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2942                 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2943
2944                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2945         }
2946
2947         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2948         /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2949         hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2950         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2951
2952         /*
2953          * Setup the HW Rx Head and Tail Descriptor Pointers and
2954          * the Base and Length of the Rx Descriptor Ring
2955          */
2956         for (i = 0; i < adapter->num_rx_queues; i++) {
2957                 rx_ring = adapter->rx_ring[i];
2958                 rx_ring->rx_buf_len = rx_buf_len;
2959
2960                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2961                         rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
2962                 else
2963                         rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2964
2965 #ifdef IXGBE_FCOE
2966                 if (netdev->features & NETIF_F_FCOE_MTU) {
2967                         struct ixgbe_ring_feature *f;
2968                         f = &adapter->ring_feature[RING_F_FCOE];
2969                         if ((i >= f->mask) && (i < f->mask + f->indices)) {
2970                                 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2971                                 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2972                                         rx_ring->rx_buf_len =
2973                                                 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2974                         }
2975                 }
2976 #endif /* IXGBE_FCOE */
2977         }
2978
2979 }
2980
2981 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2982 {
2983         struct ixgbe_hw *hw = &adapter->hw;
2984         u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2985
2986         switch (hw->mac.type) {
2987         case ixgbe_mac_82598EB:
2988                 /*
2989                  * For VMDq support of different descriptor types or
2990                  * buffer sizes through the use of multiple SRRCTL
2991                  * registers, RDRXCTL.MVMEN must be set to 1
2992                  *
2993                  * also, the manual doesn't mention it clearly but DCA hints
2994                  * will only use queue 0's tags unless this bit is set.  Side
2995                  * effects of setting this bit are only that SRRCTL must be
2996                  * fully programmed [0..15]
2997                  */
2998                 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2999                 break;
3000         case ixgbe_mac_82599EB:
3001                 /* Disable RSC for ACK packets */
3002                 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3003                    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3004                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3005                 /* hardware requires some bits to be set by default */
3006                 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3007                 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3008                 break;
3009         default:
3010                 /* We should do nothing since we don't know this hardware */
3011                 return;
3012         }
3013
3014         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3015 }
3016
3017 /**
3018  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3019  * @adapter: board private structure
3020  *
3021  * Configure the Rx unit of the MAC after a reset.
3022  **/
3023 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3024 {
3025         struct ixgbe_hw *hw = &adapter->hw;
3026         int i;
3027         u32 rxctrl;
3028
3029         /* disable receives while setting up the descriptors */
3030         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3031         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3032
3033         ixgbe_setup_psrtype(adapter);
3034         ixgbe_setup_rdrxctl(adapter);
3035
3036         /* Program registers for the distribution of queues */
3037         ixgbe_setup_mrqc(adapter);
3038
3039         ixgbe_set_uta(adapter);
3040
3041         /* set_rx_buffer_len must be called before ring initialization */
3042         ixgbe_set_rx_buffer_len(adapter);
3043
3044         /*
3045          * Setup the HW Rx Head and Tail Descriptor Pointers and
3046          * the Base and Length of the Rx Descriptor Ring
3047          */
3048         for (i = 0; i < adapter->num_rx_queues; i++)
3049                 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3050
3051         /* disable drop enable for 82598 parts */
3052         if (hw->mac.type == ixgbe_mac_82598EB)
3053                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3054
3055         /* enable all receives */
3056         rxctrl |= IXGBE_RXCTRL_RXEN;
3057         hw->mac.ops.enable_rx_dma(hw, rxctrl);
3058 }
3059
3060 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3061 {
3062         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3063         struct ixgbe_hw *hw = &adapter->hw;
3064         int pool_ndx = adapter->num_vfs;
3065
3066         /* add VID to filter table */
3067         hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
3068 }
3069
3070 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3071 {
3072         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3073         struct ixgbe_hw *hw = &adapter->hw;
3074         int pool_ndx = adapter->num_vfs;
3075
3076         if (!test_bit(__IXGBE_DOWN, &adapter->state))
3077                 ixgbe_irq_disable(adapter);
3078
3079         vlan_group_set_device(adapter->vlgrp, vid, NULL);
3080
3081         if (!test_bit(__IXGBE_DOWN, &adapter->state))
3082                 ixgbe_irq_enable(adapter, true, true);
3083
3084         /* remove VID from filter table */
3085         hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
3086 }
3087
3088 /**
3089  * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3090  * @adapter: driver data
3091  */
3092 static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3093 {
3094         struct ixgbe_hw *hw = &adapter->hw;
3095         u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3096         int i, j;
3097
3098         switch (hw->mac.type) {
3099         case ixgbe_mac_82598EB:
3100                 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
3101 #ifdef CONFIG_IXGBE_DCB
3102                 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3103                         vlnctrl &= ~IXGBE_VLNCTRL_VME;
3104 #endif
3105                 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3106                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3107                 break;
3108         case ixgbe_mac_82599EB:
3109                 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
3110                 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3111                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3112 #ifdef CONFIG_IXGBE_DCB
3113                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
3114                         break;
3115 #endif
3116                 for (i = 0; i < adapter->num_rx_queues; i++) {
3117                         j = adapter->rx_ring[i]->reg_idx;
3118                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3119                         vlnctrl &= ~IXGBE_RXDCTL_VME;
3120                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3121                 }
3122                 break;
3123         default:
3124                 break;
3125         }
3126 }
3127
3128 /**
3129  * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3130  * @adapter: driver data
3131  */
3132 static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3133 {
3134         struct ixgbe_hw *hw = &adapter->hw;
3135         u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3136         int i, j;
3137
3138         switch (hw->mac.type) {
3139         case ixgbe_mac_82598EB:
3140                 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
3141                 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3142                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3143                 break;
3144         case ixgbe_mac_82599EB:
3145                 vlnctrl |= IXGBE_VLNCTRL_VFE;
3146                 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3147                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3148                 for (i = 0; i < adapter->num_rx_queues; i++) {
3149                         j = adapter->rx_ring[i]->reg_idx;
3150                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3151                         vlnctrl |= IXGBE_RXDCTL_VME;
3152                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3153                 }
3154                 break;
3155         default:
3156                 break;
3157         }
3158 }
3159
3160 static void ixgbe_vlan_rx_register(struct net_device *netdev,
3161                                    struct vlan_group *grp)
3162 {
3163         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3164
3165         if (!test_bit(__IXGBE_DOWN, &adapter->state))
3166                 ixgbe_irq_disable(adapter);
3167         adapter->vlgrp = grp;
3168
3169         /*
3170          * For a DCB driver, always enable VLAN tag stripping so we can
3171          * still receive traffic from a DCB-enabled host even if we're
3172          * not in DCB mode.
3173          */
3174         ixgbe_vlan_filter_enable(adapter);
3175
3176         ixgbe_vlan_rx_add_vid(netdev, 0);
3177
3178         if (!test_bit(__IXGBE_DOWN, &adapter->state))
3179                 ixgbe_irq_enable(adapter, true, true);
3180 }
3181
3182 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3183 {
3184         ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
3185
3186         if (adapter->vlgrp) {
3187                 u16 vid;
3188                 for (vid = 0; vid < VLAN_N_VID; vid++) {
3189                         if (!vlan_group_get_device(adapter->vlgrp, vid))
3190                                 continue;
3191                         ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
3192                 }
3193         }
3194 }
3195
3196 /**
3197  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3198  * @netdev: network interface device structure
3199  *
3200  * Writes unicast address list to the RAR table.
3201  * Returns: -ENOMEM on failure/insufficient address space
3202  *                0 on no addresses written
3203  *                X on writing X addresses to the RAR table
3204  **/
3205 static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3206 {
3207         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3208         struct ixgbe_hw *hw = &adapter->hw;
3209         unsigned int vfn = adapter->num_vfs;
3210         unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3211         int count = 0;
3212
3213         /* return ENOMEM indicating insufficient memory for addresses */
3214         if (netdev_uc_count(netdev) > rar_entries)
3215                 return -ENOMEM;
3216
3217         if (!netdev_uc_empty(netdev) && rar_entries) {
3218                 struct netdev_hw_addr *ha;
3219                 /* return error if we do not support writing to RAR table */
3220                 if (!hw->mac.ops.set_rar)
3221                         return -ENOMEM;
3222
3223                 netdev_for_each_uc_addr(ha, netdev) {
3224                         if (!rar_entries)
3225                                 break;
3226                         hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3227                                             vfn, IXGBE_RAH_AV);
3228                         count++;
3229                 }
3230         }
3231         /* write the addresses in reverse order to avoid write combining */
3232         for (; rar_entries > 0 ; rar_entries--)
3233                 hw->mac.ops.clear_rar(hw, rar_entries);
3234
3235         return count;
3236 }
3237
3238 /**
3239  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
3240  * @netdev: network interface device structure
3241  *
3242  * The set_rx_method entry point is called whenever the unicast/multicast
3243  * address list or the network interface flags are updated.  This routine is
3244  * responsible for configuring the hardware for proper unicast, multicast and
3245  * promiscuous mode.
3246  **/
3247 void ixgbe_set_rx_mode(struct net_device *netdev)
3248 {
3249         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3250         struct ixgbe_hw *hw = &adapter->hw;
3251         u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3252         int count;
3253
3254         /* Check for Promiscuous and All Multicast modes */
3255
3256         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3257
3258         /* set all bits that we expect to always be set */
3259         fctrl |= IXGBE_FCTRL_BAM;
3260         fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3261         fctrl |= IXGBE_FCTRL_PMCF;
3262
3263         /* clear the bits we are changing the status of */
3264         fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3265
3266         if (netdev->flags & IFF_PROMISC) {
3267                 hw->addr_ctrl.user_set_promisc = true;
3268                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3269                 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3270                 /* don't hardware filter vlans in promisc mode */
3271                 ixgbe_vlan_filter_disable(adapter);
3272         } else {
3273                 if (netdev->flags & IFF_ALLMULTI) {
3274                         fctrl |= IXGBE_FCTRL_MPE;
3275                         vmolr |= IXGBE_VMOLR_MPE;
3276                 } else {
3277                         /*
3278                          * Write addresses to the MTA, if the attempt fails
3279                          * then we should just turn on promiscous mode so
3280                          * that we can at least receive multicast traffic
3281                          */
3282                         hw->mac.ops.update_mc_addr_list(hw, netdev);
3283                         vmolr |= IXGBE_VMOLR_ROMPE;
3284                 }
3285                 ixgbe_vlan_filter_enable(adapter);
3286                 hw->addr_ctrl.user_set_promisc = false;
3287                 /*
3288                  * Write addresses to available RAR registers, if there is not
3289                  * sufficient space to store all the addresses then enable
3290                  * unicast promiscous mode
3291                  */
3292                 count = ixgbe_write_uc_addr_list(netdev);
3293                 if (count < 0) {
3294                         fctrl |= IXGBE_FCTRL_UPE;
3295                         vmolr |= IXGBE_VMOLR_ROPE;
3296                 }
3297         }
3298
3299         if (adapter->num_vfs) {
3300                 ixgbe_restore_vf_multicasts(adapter);
3301                 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3302                          ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3303                            IXGBE_VMOLR_ROPE);
3304                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3305         }
3306
3307         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3308 }
3309
3310 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3311 {
3312         int q_idx;
3313         struct ixgbe_q_vector *q_vector;
3314         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3315
3316         /* legacy and MSI only use one vector */
3317         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3318                 q_vectors = 1;
3319
3320         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
3321                 struct napi_struct *napi;
3322                 q_vector = adapter->q_vector[q_idx];
3323                 napi = &q_vector->napi;
3324                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3325                         if (!q_vector->rxr_count || !q_vector->txr_count) {
3326                                 if (q_vector->txr_count == 1)
3327                                         napi->poll = &ixgbe_clean_txonly;
3328                                 else if (q_vector->rxr_count == 1)
3329                                         napi->poll = &ixgbe_clean_rxonly;
3330                         }
3331                 }
3332
3333                 napi_enable(napi);
3334         }
3335 }
3336
3337 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3338 {
3339         int q_idx;
3340         struct ixgbe_q_vector *q_vector;
3341         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3342
3343         /* legacy and MSI only use one vector */
3344         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3345                 q_vectors = 1;
3346
3347         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
3348                 q_vector = adapter->q_vector[q_idx];
3349                 napi_disable(&q_vector->napi);
3350         }
3351 }
3352
3353 #ifdef CONFIG_IXGBE_DCB
3354 /*
3355  * ixgbe_configure_dcb - Configure DCB hardware
3356  * @adapter: ixgbe adapter struct
3357  *
3358  * This is called by the driver on open to configure the DCB hardware.
3359  * This is also called by the gennetlink interface when reconfiguring
3360  * the DCB state.
3361  */
3362 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3363 {
3364         struct ixgbe_hw *hw = &adapter->hw;
3365         u32 txdctl;
3366         int i, j;
3367
3368         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3369                 if (hw->mac.type == ixgbe_mac_82598EB)
3370                         netif_set_gso_max_size(adapter->netdev, 65536);
3371                 return;
3372         }
3373
3374         if (hw->mac.type == ixgbe_mac_82598EB)
3375                 netif_set_gso_max_size(adapter->netdev, 32768);
3376
3377         ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
3378         ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
3379
3380         /* reconfigure the hardware */
3381         ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
3382
3383         for (i = 0; i < adapter->num_tx_queues; i++) {
3384                 j = adapter->tx_ring[i]->reg_idx;
3385                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3386                 /* PThresh workaround for Tx hang with DFP enabled. */
3387                 txdctl |= 32;
3388                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3389         }
3390         /* Enable VLAN tag insert/strip */
3391         ixgbe_vlan_filter_enable(adapter);
3392
3393         hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3394 }
3395
3396 #endif
3397 static void ixgbe_configure(struct ixgbe_adapter *adapter)
3398 {
3399         struct net_device *netdev = adapter->netdev;
3400         struct ixgbe_hw *hw = &adapter->hw;
3401         int i;
3402
3403         ixgbe_set_rx_mode(netdev);
3404
3405         ixgbe_restore_vlan(adapter);
3406 #ifdef CONFIG_IXGBE_DCB
3407         ixgbe_configure_dcb(adapter);
3408 #endif
3409
3410 #ifdef IXGBE_FCOE
3411         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3412                 ixgbe_configure_fcoe(adapter);
3413
3414 #endif /* IXGBE_FCOE */
3415         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3416                 for (i = 0; i < adapter->num_tx_queues; i++)
3417                         adapter->tx_ring[i]->atr_sample_rate =
3418                                                        adapter->atr_sample_rate;
3419                 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3420         } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3421                 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3422         }
3423         ixgbe_configure_virtualization(adapter);
3424
3425         ixgbe_configure_tx(adapter);
3426         ixgbe_configure_rx(adapter);
3427 }
3428
3429 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3430 {
3431         switch (hw->phy.type) {
3432         case ixgbe_phy_sfp_avago:
3433         case ixgbe_phy_sfp_ftl:
3434         case ixgbe_phy_sfp_intel:
3435         case ixgbe_phy_sfp_unknown:
3436         case ixgbe_phy_sfp_passive_tyco:
3437         case ixgbe_phy_sfp_passive_unknown:
3438         case ixgbe_phy_sfp_active_unknown:
3439         case ixgbe_phy_sfp_ftl_active:
3440                 return true;
3441         default:
3442                 return false;
3443         }
3444 }
3445
3446 /**
3447  * ixgbe_sfp_link_config - set up SFP+ link
3448  * @adapter: pointer to private adapter struct
3449  **/
3450 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3451 {
3452         struct ixgbe_hw *hw = &adapter->hw;
3453
3454                 if (hw->phy.multispeed_fiber) {
3455                         /*
3456                          * In multispeed fiber setups, the device may not have
3457                          * had a physical connection when the driver loaded.
3458                          * If that's the case, the initial link configuration
3459                          * couldn't get the MAC into 10G or 1G mode, so we'll
3460                          * never have a link status change interrupt fire.
3461                          * We need to try and force an autonegotiation
3462                          * session, then bring up link.
3463                          */
3464                         hw->mac.ops.setup_sfp(hw);
3465                         if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3466                                 schedule_work(&adapter->multispeed_fiber_task);
3467                 } else {
3468                         /*
3469                          * Direct Attach Cu and non-multispeed fiber modules
3470                          * still need to be configured properly prior to
3471                          * attempting link.
3472                          */
3473                         if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3474                                 schedule_work(&adapter->sfp_config_module_task);
3475                 }
3476 }
3477
3478 /**
3479  * ixgbe_non_sfp_link_config - set up non-SFP+ link
3480  * @hw: pointer to private hardware struct
3481  *
3482  * Returns 0 on success, negative on failure
3483  **/
3484 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3485 {
3486         u32 autoneg;
3487         bool negotiation, link_up = false;
3488         u32 ret = IXGBE_ERR_LINK_SETUP;
3489
3490         if (hw->mac.ops.check_link)
3491                 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3492
3493         if (ret)
3494                 goto link_cfg_out;
3495
3496         if (hw->mac.ops.get_link_capabilities)
3497                 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3498                                                         &negotiation);
3499         if (ret)
3500                 goto link_cfg_out;
3501
3502         if (hw->mac.ops.setup_link)
3503                 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
3504 link_cfg_out:
3505         return ret;
3506 }
3507
3508 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3509 {
3510         struct ixgbe_hw *hw = &adapter->hw;
3511         u32 gpie = 0;
3512
3513         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3514                 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3515                        IXGBE_GPIE_OCD;
3516                 gpie |= IXGBE_GPIE_EIAME;
3517                 /*
3518                  * use EIAM to auto-mask when MSI-X interrupt is asserted
3519                  * this saves a register write for every interrupt
3520                  */
3521                 switch (hw->mac.type) {
3522                 case ixgbe_mac_82598EB:
3523                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3524                         break;
3525                 default:
3526                 case ixgbe_mac_82599EB:
3527                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3528                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3529                         break;
3530                 }
3531         } else {
3532                 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3533                  * specifically only auto mask tx and rx interrupts */
3534                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3535         }
3536
3537         /* XXX: to interrupt immediately for EICS writes, enable this */
3538         /* gpie |= IXGBE_GPIE_EIMEN; */
3539
3540         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3541                 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3542                 gpie |= IXGBE_GPIE_VTMODE_64;
3543         }
3544
3545         /* Enable fan failure interrupt */
3546         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3547                 gpie |= IXGBE_SDP1_GPIEN;
3548
3549         if (hw->mac.type == ixgbe_mac_82599EB)
3550                 gpie |= IXGBE_SDP1_GPIEN;
3551                 gpie |= IXGBE_SDP2_GPIEN;
3552
3553         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3554 }
3555
3556 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3557 {
3558         struct ixgbe_hw *hw = &adapter->hw;
3559         int err;
3560         u32 ctrl_ext;
3561
3562         ixgbe_get_hw_control(adapter);
3563         ixgbe_setup_gpie(adapter);
3564
3565         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3566                 ixgbe_configure_msix(adapter);
3567         else
3568                 ixgbe_configure_msi_and_legacy(adapter);
3569
3570         /* enable the optics */
3571         if (hw->phy.multispeed_fiber)
3572                 hw->mac.ops.enable_tx_laser(hw);
3573
3574         clear_bit(__IXGBE_DOWN, &adapter->state);
3575         ixgbe_napi_enable_all(adapter);
3576
3577         /* clear any pending interrupts, may auto mask */
3578         IXGBE_READ_REG(hw, IXGBE_EICR);
3579         ixgbe_irq_enable(adapter, true, true);
3580
3581         /*
3582          * If this adapter has a fan, check to see if we had a failure
3583          * before we enabled the interrupt.
3584          */
3585         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3586                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3587                 if (esdp & IXGBE_ESDP_SDP1)
3588                         e_crit(drv, "Fan has stopped, replace the adapter\n");
3589         }
3590
3591         /*
3592          * For hot-pluggable SFP+ devices, a new SFP+ module may have
3593          * arrived before interrupts were enabled but after probe.  Such
3594          * devices wouldn't have their type identified yet. We need to
3595          * kick off the SFP+ module setup first, then try to bring up link.
3596          * If we're not hot-pluggable SFP+, we just need to configure link
3597          * and bring it up.
3598          */
3599         if (hw->phy.type == ixgbe_phy_unknown) {
3600                 err = hw->phy.ops.identify(hw);
3601                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3602                         /*
3603                          * Take the device down and schedule the sfp tasklet
3604                          * which will unregister_netdev and log it.
3605                          */
3606                         ixgbe_down(adapter);
3607                         schedule_work(&adapter->sfp_config_module_task);
3608                         return err;
3609                 }
3610         }
3611
3612         if (ixgbe_is_sfp(hw)) {
3613                 ixgbe_sfp_link_config(adapter);
3614         } else {
3615                 err = ixgbe_non_sfp_link_config(hw);
3616                 if (err)
3617                         e_err(probe, "link_config FAILED %d\n", err);
3618         }
3619
3620         /* enable transmits */
3621         netif_tx_start_all_queues(adapter->netdev);
3622
3623         /* bring the link up in the watchdog, this could race with our first
3624          * link up interrupt but shouldn't be a problem */
3625         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3626         adapter->link_check_timeout = jiffies;
3627         mod_timer(&adapter->watchdog_timer, jiffies);
3628
3629         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3630         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3631         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3632         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3633
3634         return 0;
3635 }
3636
3637 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3638 {
3639         WARN_ON(in_interrupt());
3640         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3641                 msleep(1);
3642         ixgbe_down(adapter);
3643         /*
3644          * If SR-IOV enabled then wait a bit before bringing the adapter
3645          * back up to give the VFs time to respond to the reset.  The
3646          * two second wait is based upon the watchdog timer cycle in
3647          * the VF driver.
3648          */
3649         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3650                 msleep(2000);
3651         ixgbe_up(adapter);
3652         clear_bit(__IXGBE_RESETTING, &adapter->state);
3653 }
3654
3655 int ixgbe_up(struct ixgbe_adapter *adapter)
3656 {
3657         /* hardware has been reset, we need to reload some things */
3658         ixgbe_configure(adapter);
3659
3660         return ixgbe_up_complete(adapter);
3661 }
3662
3663 void ixgbe_reset(struct ixgbe_adapter *adapter)
3664 {
3665         struct ixgbe_hw *hw = &adapter->hw;
3666         int err;
3667
3668         err = hw->mac.ops.init_hw(hw);
3669         switch (err) {
3670         case 0:
3671         case IXGBE_ERR_SFP_NOT_PRESENT:
3672                 break;
3673         case IXGBE_ERR_MASTER_REQUESTS_PENDING:
3674                 e_dev_err("master disable timed out\n");
3675                 break;
3676         case IXGBE_ERR_EEPROM_VERSION:
3677                 /* We are running on a pre-production device, log a warning */
3678                 e_dev_warn("This device is a pre-production adapter/LOM. "
3679                            "Please be aware there may be issuesassociated with "
3680                            "your hardware.  If you are experiencing problems "
3681                            "please contact your Intel or hardware "
3682                            "representative who provided you with this "
3683                            "hardware.\n");
3684                 break;
3685         default:
3686                 e_dev_err("Hardware Error: %d\n", err);
3687         }
3688
3689         /* reprogram the RAR[0] in case user changed it. */
3690         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3691                             IXGBE_RAH_AV);
3692 }
3693
3694 /**
3695  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3696  * @adapter: board private structure
3697  * @rx_ring: ring to free buffers from
3698  **/
3699 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3700                                 struct ixgbe_ring *rx_ring)
3701 {
3702         struct pci_dev *pdev = adapter->pdev;
3703         unsigned long size;
3704         unsigned int i;
3705
3706         /* ring already cleared, nothing to do */
3707         if (!rx_ring->rx_buffer_info)
3708                 return;
3709
3710         /* Free all the Rx ring sk_buffs */
3711         for (i = 0; i < rx_ring->count; i++) {
3712                 struct ixgbe_rx_buffer *rx_buffer_info;
3713
3714                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3715                 if (rx_buffer_info->dma) {
3716                         dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3717                                          rx_ring->rx_buf_len,
3718                                          DMA_FROM_DEVICE);
3719                         rx_buffer_info->dma = 0;
3720                 }
3721                 if (rx_buffer_info->skb) {
3722                         struct sk_buff *skb = rx_buffer_info->skb;
3723                         rx_buffer_info->skb = NULL;
3724                         do {
3725                                 struct sk_buff *this = skb;
3726                                 if (IXGBE_RSC_CB(this)->delay_unmap) {
3727                                         dma_unmap_single(&pdev->dev,
3728                                                          IXGBE_RSC_CB(this)->dma,
3729                                                          rx_ring->rx_buf_len,
3730                                                          DMA_FROM_DEVICE);
3731                                         IXGBE_RSC_CB(this)->dma = 0;
3732                                         IXGBE_RSC_CB(skb)->delay_unmap = false;
3733                                 }
3734                                 skb = skb->prev;
3735                                 dev_kfree_skb(this);
3736                         } while (skb);
3737                 }
3738                 if (!rx_buffer_info->page)
3739                         continue;
3740                 if (rx_buffer_info->page_dma) {
3741                         dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
3742                                        PAGE_SIZE / 2, DMA_FROM_DEVICE);
3743                         rx_buffer_info->page_dma = 0;
3744                 }
3745                 put_page(rx_buffer_info->page);
3746                 rx_buffer_info->page = NULL;
3747                 rx_buffer_info->page_offset = 0;
3748         }
3749
3750         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3751         memset(rx_ring->rx_buffer_info, 0, size);
3752
3753         /* Zero out the descriptor ring */
3754         memset(rx_ring->desc, 0, rx_ring->size);
3755
3756         rx_ring->next_to_clean = 0;
3757         rx_ring->next_to_use = 0;
3758
3759         if (rx_ring->head)
3760                 writel(0, adapter->hw.hw_addr + rx_ring->head);
3761         if (rx_ring->tail)
3762                 writel(0, adapter->hw.hw_addr + rx_ring->tail);
3763 }
3764
3765 /**
3766  * ixgbe_clean_tx_ring - Free Tx Buffers
3767  * @adapter: board private structure
3768  * @tx_ring: ring to be cleaned
3769  **/
3770 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3771                                 struct ixgbe_ring *tx_ring)
3772 {
3773         struct ixgbe_tx_buffer *tx_buffer_info;
3774         unsigned long size;
3775         unsigned int i;
3776
3777         /* ring already cleared, nothing to do */
3778         if (!tx_ring->tx_buffer_info)
3779                 return;
3780
3781         /* Free all the Tx ring sk_buffs */
3782         for (i = 0; i < tx_ring->count; i++) {
3783                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3784                 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3785         }
3786
3787         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3788         memset(tx_ring->tx_buffer_info, 0, size);
3789
3790         /* Zero out the descriptor ring */
3791         memset(tx_ring->desc, 0, tx_ring->size);
3792
3793         tx_ring->next_to_use = 0;
3794         tx_ring->next_to_clean = 0;
3795
3796         if (tx_ring->head)
3797                 writel(0, adapter->hw.hw_addr + tx_ring->head);
3798         if (tx_ring->tail)
3799                 writel(0, adapter->hw.hw_addr + tx_ring->tail);
3800 }
3801
3802 /**
3803  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
3804  * @adapter: board private structure
3805  **/
3806 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3807 {
3808         int i;
3809
3810         for (i = 0; i < adapter->num_rx_queues; i++)
3811                 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
3812 }
3813
3814 /**
3815  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
3816  * @adapter: board private structure
3817  **/
3818 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3819 {
3820         int i;
3821
3822         for (i = 0; i < adapter->num_tx_queues; i++)
3823                 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
3824 }
3825
3826 void ixgbe_down(struct ixgbe_adapter *adapter)
3827 {
3828         struct net_device *netdev = adapter->netdev;
3829         struct ixgbe_hw *hw = &adapter->hw;
3830         u32 rxctrl;
3831         u32 txdctl;
3832         int i, j;
3833         int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3834
3835         /* signal that we are down to the interrupt handler */
3836         set_bit(__IXGBE_DOWN, &adapter->state);
3837
3838         /* disable receive for all VFs and wait one second */
3839         if (adapter->num_vfs) {
3840                 /* ping all the active vfs to let them know we are going down */
3841                 ixgbe_ping_all_vfs(adapter);
3842
3843                 /* Disable all VFTE/VFRE TX/RX */
3844                 ixgbe_disable_tx_rx(adapter);
3845
3846                 /* Mark all the VFs as inactive */
3847                 for (i = 0 ; i < adapter->num_vfs; i++)
3848                         adapter->vfinfo[i].clear_to_send = 0;
3849         }
3850
3851         /* disable receives */
3852         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3853         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3854
3855         IXGBE_WRITE_FLUSH(hw);
3856         msleep(10);
3857
3858         netif_tx_stop_all_queues(netdev);
3859
3860         clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3861         del_timer_sync(&adapter->sfp_timer);
3862         del_timer_sync(&adapter->watchdog_timer);
3863         cancel_work_sync(&adapter->watchdog_task);
3864
3865         netif_carrier_off(netdev);
3866         netif_tx_disable(netdev);
3867
3868         ixgbe_irq_disable(adapter);
3869
3870         ixgbe_napi_disable_all(adapter);
3871
3872         /* Cleanup the affinity_hint CPU mask memory and callback */
3873         for (i = 0; i < num_q_vectors; i++) {
3874                 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
3875                 /* clear the affinity_mask in the IRQ descriptor */
3876                 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
3877                 /* release the CPU mask memory */
3878                 free_cpumask_var(q_vector->affinity_mask);
3879         }
3880
3881         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3882             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3883                 cancel_work_sync(&adapter->fdir_reinit_task);
3884
3885         if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3886                 cancel_work_sync(&adapter->check_overtemp_task);
3887
3888         /* disable transmits in the hardware now that interrupts are off */
3889         for (i = 0; i < adapter->num_tx_queues; i++) {
3890                 j = adapter->tx_ring[i]->reg_idx;
3891                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3892                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3893                                 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3894         }
3895         /* Disable the Tx DMA engine on 82599 */
3896         if (hw->mac.type == ixgbe_mac_82599EB)
3897                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3898                                 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3899                                  ~IXGBE_DMATXCTL_TE));
3900
3901         /* power down the optics */
3902         if (hw->phy.multispeed_fiber)
3903                 hw->mac.ops.disable_tx_laser(hw);
3904
3905         /* clear n-tuple filters that are cached */
3906         ethtool_ntuple_flush(netdev);
3907
3908         if (!pci_channel_offline(adapter->pdev))
3909                 ixgbe_reset(adapter);
3910         ixgbe_clean_all_tx_rings(adapter);
3911         ixgbe_clean_all_rx_rings(adapter);
3912
3913 #ifdef CONFIG_IXGBE_DCA
3914         /* since we reset the hardware DCA settings were cleared */
3915         ixgbe_setup_dca(adapter);
3916 #endif
3917 }
3918
3919 /**
3920  * ixgbe_poll - NAPI Rx polling callback
3921  * @napi: structure for representing this polling device
3922  * @budget: how many packets driver is allowed to clean
3923  *
3924  * This function is used for legacy and MSI, NAPI mode
3925  **/
3926 static int ixgbe_poll(struct napi_struct *napi, int budget)
3927 {
3928         struct ixgbe_q_vector *q_vector =
3929                                 container_of(napi, struct ixgbe_q_vector, napi);
3930         struct ixgbe_adapter *adapter = q_vector->adapter;
3931         int tx_clean_complete, work_done = 0;
3932
3933 #ifdef CONFIG_IXGBE_DCA
3934         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3935                 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
3936                 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3937         }
3938 #endif
3939
3940         tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3941         ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
3942
3943         if (!tx_clean_complete)
3944                 work_done = budget;
3945
3946         /* If budget not fully consumed, exit the polling mode */
3947         if (work_done < budget) {
3948                 napi_complete(napi);
3949                 if (adapter->rx_itr_setting & 1)
3950                         ixgbe_set_itr(adapter);
3951                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3952                         ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
3953         }
3954         return work_done;
3955 }
3956
3957 /**
3958  * ixgbe_tx_timeout - Respond to a Tx Hang
3959  * @netdev: network interface device structure
3960  **/
3961 static void ixgbe_tx_timeout(struct net_device *netdev)
3962 {
3963         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3964
3965         /* Do the reset outside of interrupt context */
3966         schedule_work(&adapter->reset_task);
3967 }
3968
3969 static void ixgbe_reset_task(struct work_struct *work)
3970 {
3971         struct ixgbe_adapter *adapter;
3972         adapter = container_of(work, struct ixgbe_adapter, reset_task);
3973
3974         /* If we're already down or resetting, just bail */
3975         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3976             test_bit(__IXGBE_RESETTING, &adapter->state))
3977                 return;
3978
3979         adapter->tx_timeout_count++;
3980
3981         ixgbe_dump(adapter);
3982         netdev_err(adapter->netdev, "Reset adapter\n");
3983         ixgbe_reinit_locked(adapter);
3984 }
3985
3986 #ifdef CONFIG_IXGBE_DCB
3987 static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
3988 {
3989         bool ret = false;
3990         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
3991
3992         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3993                 return ret;
3994
3995         f->mask = 0x7 << 3;
3996         adapter->num_rx_queues = f->indices;
3997         adapter->num_tx_queues = f->indices;
3998         ret = true;
3999
4000         return ret;
4001 }
4002 #endif
4003
4004 /**
4005  * ixgbe_set_rss_queues: Allocate queues for RSS
4006  * @adapter: board private structure to initialize
4007  *
4008  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
4009  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
4010  *
4011  **/
4012 static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
4013 {
4014         bool ret = false;
4015         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
4016
4017         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4018                 f->mask = 0xF;
4019                 adapter->num_rx_queues = f->indices;
4020                 adapter->num_tx_queues = f->indices;
4021                 ret = true;
4022         } else {
4023                 ret = false;
4024         }
4025
4026         return ret;
4027 }
4028
4029 /**
4030  * ixgbe_set_fdir_queues: Allocate queues for Flow Director
4031  * @adapter: board private structure to initialize
4032  *
4033  * Flow Director is an advanced Rx filter, attempting to get Rx flows back
4034  * to the original CPU that initiated the Tx session.  This runs in addition
4035  * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
4036  * Rx load across CPUs using RSS.
4037  *
4038  **/
4039 static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
4040 {
4041         bool ret = false;
4042         struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
4043
4044         f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
4045         f_fdir->mask = 0;
4046
4047         /* Flow Director must have RSS enabled */
4048         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4049             ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4050              (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
4051                 adapter->num_tx_queues = f_fdir->indices;
4052                 adapter->num_rx_queues = f_fdir->indices;
4053                 ret = true;
4054         } else {
4055                 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4056                 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4057         }
4058         return ret;
4059 }
4060
4061 #ifdef IXGBE_FCOE
4062 /**
4063  * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4064  * @adapter: board private structure to initialize
4065  *
4066  * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4067  * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4068  * rx queues out of the max number of rx queues, instead, it is used as the
4069  * index of the first rx queue used by FCoE.
4070  *
4071  **/
4072 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4073 {
4074         bool ret = false;
4075         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4076
4077         f->indices = min((int)num_online_cpus(), f->indices);
4078         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4079                 adapter->num_rx_queues = 1;
4080                 adapter->num_tx_queues = 1;
4081 #ifdef CONFIG_IXGBE_DCB
4082                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4083                         e_info(probe, "FCoE enabled with DCB\n");
4084                         ixgbe_set_dcb_queues(adapter);
4085                 }
4086 #endif
4087                 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4088                         e_info(probe, "FCoE enabled with RSS\n");
4089                         if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4090                             (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4091                                 ixgbe_set_fdir_queues(adapter);
4092                         else
4093                                 ixgbe_set_rss_queues(adapter);
4094                 }
4095                 /* adding FCoE rx rings to the end */
4096                 f->mask = adapter->num_rx_queues;
4097                 adapter->num_rx_queues += f->indices;
4098                 adapter->num_tx_queues += f->indices;
4099
4100                 ret = true;
4101         }
4102
4103         return ret;
4104 }
4105
4106 #endif /* IXGBE_FCOE */
4107 /**
4108  * ixgbe_set_sriov_queues: Allocate queues for IOV use
4109  * @adapter: board private structure to initialize
4110  *
4111  * IOV doesn't actually use anything, so just NAK the
4112  * request for now and let the other queue routines
4113  * figure out what to do.
4114  */
4115 static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4116 {
4117         return false;
4118 }
4119
4120 /*
4121  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
4122  * @adapter: board private structure to initialize
4123  *
4124  * This is the top level queue allocation routine.  The order here is very
4125  * important, starting with the "most" number of features turned on at once,
4126  * and ending with the smallest set of features.  This way large combinations
4127  * can be allocated if they're turned on, and smaller combinations are the
4128  * fallthrough conditions.
4129  *
4130  **/
4131 static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4132 {
4133         /* Start with base case */
4134         adapter->num_rx_queues = 1;
4135         adapter->num_tx_queues = 1;
4136         adapter->num_rx_pools = adapter->num_rx_queues;
4137         adapter->num_rx_queues_per_pool = 1;
4138
4139         if (ixgbe_set_sriov_queues(adapter))
4140                 goto done;
4141
4142 #ifdef IXGBE_FCOE
4143         if (ixgbe_set_fcoe_queues(adapter))
4144                 goto done;
4145
4146 #endif /* IXGBE_FCOE */
4147 #ifdef CONFIG_IXGBE_DCB
4148         if (ixgbe_set_dcb_queues(adapter))
4149                 goto done;
4150
4151 #endif
4152         if (ixgbe_set_fdir_queues(adapter))
4153                 goto done;
4154
4155         if (ixgbe_set_rss_queues(adapter))
4156                 goto done;
4157
4158         /* fallback to base case */
4159         adapter->num_rx_queues = 1;
4160         adapter->num_tx_queues = 1;
4161
4162 done:
4163         /* Notify the stack of the (possibly) reduced queue counts. */
4164         netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4165         return netif_set_real_num_rx_queues(adapter->netdev,
4166                                             adapter->num_rx_queues);
4167 }
4168
4169 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4170                                        int vectors)
4171 {
4172         int err, vector_threshold;
4173
4174         /* We'll want at least 3 (vector_threshold):
4175          * 1) TxQ[0] Cleanup
4176          * 2) RxQ[0] Cleanup
4177          * 3) Other (Link Status Change, etc.)
4178          * 4) TCP Timer (optional)
4179          */
4180         vector_threshold = MIN_MSIX_COUNT;
4181
4182         /* The more we get, the more we will assign to Tx/Rx Cleanup
4183          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4184          * Right now, we simply care about how many we'll get; we'll
4185          * set them up later while requesting irq's.
4186          */
4187         while (vectors >= vector_threshold) {
4188                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
4189                                       vectors);
4190                 if (!err) /* Success in acquiring all requested vectors. */
4191                         break;
4192                 else if (err < 0)
4193                         vectors = 0; /* Nasty failure, quit now */
4194                 else /* err == number of vectors we should try again with */
4195                         vectors = err;
4196         }
4197
4198         if (vectors < vector_threshold) {
4199                 /* Can't allocate enough MSI-X interrupts?  Oh well.
4200                  * This just means we'll go with either a single MSI
4201                  * vector or fall back to legacy interrupts.
4202                  */
4203                 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4204                              "Unable to allocate MSI-X interrupts\n");
4205                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4206                 kfree(adapter->msix_entries);
4207                 adapter->msix_entries = NULL;
4208         } else {
4209                 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
4210                 /*
4211                  * Adjust for only the vectors we'll use, which is minimum
4212                  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4213                  * vectors we were allocated.
4214                  */
4215                 adapter->num_msix_vectors = min(vectors,
4216                                    adapter->max_msix_q_vectors + NON_Q_VECTORS);
4217         }
4218 }
4219
4220 /**
4221  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
4222  * @adapter: board private structure to initialize
4223  *
4224  * Cache the descriptor ring offsets for RSS to the assigned rings.
4225  *
4226  **/
4227 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4228 {
4229         int i;
4230         bool ret = false;
4231
4232         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4233                 for (i = 0; i < adapter->num_rx_queues; i++)
4234                         adapter->rx_ring[i]->reg_idx = i;
4235                 for (i = 0; i < adapter->num_tx_queues; i++)
4236                         adapter->tx_ring[i]->reg_idx = i;
4237                 ret = true;
4238         } else {
4239                 ret = false;
4240         }
4241
4242         return ret;
4243 }
4244
4245 #ifdef CONFIG_IXGBE_DCB
4246 /**
4247  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4248  * @adapter: board private structure to initialize
4249  *
4250  * Cache the descriptor ring offsets for DCB to the assigned rings.
4251  *
4252  **/
4253 static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4254 {
4255         int i;
4256         bool ret = false;
4257         int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4258
4259         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4260                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4261                         /* the number of queues is assumed to be symmetric */
4262                         for (i = 0; i < dcb_i; i++) {
4263                                 adapter->rx_ring[i]->reg_idx = i << 3;
4264                                 adapter->tx_ring[i]->reg_idx = i << 2;
4265                         }
4266                         ret = true;
4267                 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4268                         if (dcb_i == 8) {
4269                                 /*
4270                                  * Tx TC0 starts at: descriptor queue 0
4271                                  * Tx TC1 starts at: descriptor queue 32
4272                                  * Tx TC2 starts at: descriptor queue 64
4273                                  * Tx TC3 starts at: descriptor queue 80
4274                                  * Tx TC4 starts at: descriptor queue 96
4275                                  * Tx TC5 starts at: descriptor queue 104
4276                                  * Tx TC6 starts at: descriptor queue 112
4277                                  * Tx TC7 starts at: descriptor queue 120
4278                                  *
4279                                  * Rx TC0-TC7 are offset by 16 queues each
4280                                  */
4281                                 for (i = 0; i < 3; i++) {
4282                                         adapter->tx_ring[i]->reg_idx = i << 5;
4283                                         adapter->rx_ring[i]->reg_idx = i << 4;
4284                                 }
4285                                 for ( ; i < 5; i++) {
4286                                         adapter->tx_ring[i]->reg_idx =
4287                                                                  ((i + 2) << 4);
4288                                         adapter->rx_ring[i]->reg_idx = i << 4;
4289                                 }
4290                                 for ( ; i < dcb_i; i++) {
4291                                         adapter->tx_ring[i]->reg_idx =
4292                                                                  ((i + 8) << 3);
4293                                         adapter->rx_ring[i]->reg_idx = i << 4;
4294                                 }
4295
4296                                 ret = true;
4297                         } else if (dcb_i == 4) {
4298                                 /*
4299                                  * Tx TC0 starts at: descriptor queue 0
4300                                  * Tx TC1 starts at: descriptor queue 64
4301                                  * Tx TC2 starts at: descriptor queue 96
4302                                  * Tx TC3 starts at: descriptor queue 112
4303                                  *
4304                                  * Rx TC0-TC3 are offset by 32 queues each
4305                                  */
4306                                 adapter->tx_ring[0]->reg_idx = 0;
4307                                 adapter->tx_ring[1]->reg_idx = 64;
4308                                 adapter->tx_ring[2]->reg_idx = 96;
4309                                 adapter->tx_ring[3]->reg_idx = 112;
4310                                 for (i = 0 ; i < dcb_i; i++)
4311                                         adapter->rx_ring[i]->reg_idx = i << 5;
4312
4313                                 ret = true;
4314                         } else {
4315                                 ret = false;
4316                         }
4317                 } else {
4318                         ret = false;
4319                 }
4320         } else {
4321                 ret = false;
4322         }
4323
4324         return ret;
4325 }
4326 #endif
4327
4328 /**
4329  * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4330  * @adapter: board private structure to initialize
4331  *
4332  * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4333  *
4334  **/
4335 static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4336 {
4337         int i;
4338         bool ret = false;
4339
4340         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4341             ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4342              (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
4343                 for (i = 0; i < adapter->num_rx_queues; i++)
4344                         adapter->rx_ring[i]->reg_idx = i;
4345                 for (i = 0; i < adapter->num_tx_queues; i++)
4346                         adapter->tx_ring[i]->reg_idx = i;
4347                 ret = true;
4348         }
4349
4350         return ret;
4351 }
4352
4353 #ifdef IXGBE_FCOE
4354 /**
4355  * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4356  * @adapter: board private structure to initialize
4357  *
4358  * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4359  *
4360  */
4361 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4362 {
4363         int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
4364         bool ret = false;
4365         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4366
4367         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4368 #ifdef CONFIG_IXGBE_DCB
4369                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4370                         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4371
4372                         ixgbe_cache_ring_dcb(adapter);
4373                         /* find out queues in TC for FCoE */
4374                         fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4375                         fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4376                         /*
4377                          * In 82599, the number of Tx queues for each traffic
4378                          * class for both 8-TC and 4-TC modes are:
4379                          * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4380                          * 8 TCs:  32  32  16  16   8   8   8   8
4381                          * 4 TCs:  64  64  32  32
4382                          * We have max 8 queues for FCoE, where 8 the is
4383                          * FCoE redirection table size. If TC for FCoE is
4384                          * less than or equal to TC3, we have enough queues
4385                          * to add max of 8 queues for FCoE, so we start FCoE
4386                          * tx descriptor from the next one, i.e., reg_idx + 1.
4387                          * If TC for FCoE is above TC3, implying 8 TC mode,
4388                          * and we need 8 for FCoE, we have to take all queues
4389                          * in that traffic class for FCoE.
4390                          */
4391                         if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4392                                 fcoe_tx_i--;
4393                 }
4394 #endif /* CONFIG_IXGBE_DCB */
4395                 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4396                         if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4397                             (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4398                                 ixgbe_cache_ring_fdir(adapter);
4399                         else
4400                                 ixgbe_cache_ring_rss(adapter);
4401
4402                         fcoe_rx_i = f->mask;
4403                         fcoe_tx_i = f->mask;
4404                 }
4405                 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4406                         adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4407                         adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4408                 }
4409                 ret = true;
4410         }
4411         return ret;
4412 }
4413
4414 #endif /* IXGBE_FCOE */
4415 /**
4416  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4417  * @adapter: board private structure to initialize
4418  *
4419  * SR-IOV doesn't use any descriptor rings but changes the default if
4420  * no other mapping is used.
4421  *
4422  */
4423 static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4424 {
4425         adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4426         adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
4427         if (adapter->num_vfs)
4428                 return true;
4429         else
4430                 return false;
4431 }
4432
4433 /**
4434  * ixgbe_cache_ring_register - Descriptor ring to register mapping
4435  * @adapter: board private structure to initialize
4436  *
4437  * Once we know the feature-set enabled for the device, we'll cache
4438  * the register offset the descriptor ring is assigned to.
4439  *
4440  * Note, the order the various feature calls is important.  It must start with
4441  * the "most" features enabled at the same time, then trickle down to the
4442  * least amount of features turned on at once.
4443  **/
4444 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4445 {
4446         /* start with default case */
4447         adapter->rx_ring[0]->reg_idx = 0;
4448         adapter->tx_ring[0]->reg_idx = 0;
4449
4450         if (ixgbe_cache_ring_sriov(adapter))
4451                 return;
4452
4453 #ifdef IXGBE_FCOE
4454         if (ixgbe_cache_ring_fcoe(adapter))
4455                 return;
4456
4457 #endif /* IXGBE_FCOE */
4458 #ifdef CONFIG_IXGBE_DCB
4459         if (ixgbe_cache_ring_dcb(adapter))
4460                 return;
4461
4462 #endif
4463         if (ixgbe_cache_ring_fdir(adapter))
4464                 return;
4465
4466         if (ixgbe_cache_ring_rss(adapter))
4467                 return;
4468 }
4469
4470 /**
4471  * ixgbe_alloc_queues - Allocate memory for all rings
4472  * @adapter: board private structure to initialize
4473  *
4474  * We allocate one ring per queue at run-time since we don't know the
4475  * number of queues at compile-time.  The polling_netdev array is
4476  * intended for Multiqueue, but should work fine with a single queue.
4477  **/
4478 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4479 {
4480         int i;
4481         int orig_node = adapter->node;
4482
4483         for (i = 0; i < adapter->num_tx_queues; i++) {
4484                 struct ixgbe_ring *ring = adapter->tx_ring[i];
4485                 if (orig_node == -1) {
4486                         int cur_node = next_online_node(adapter->node);
4487                         if (cur_node == MAX_NUMNODES)
4488                                 cur_node = first_online_node;
4489                         adapter->node = cur_node;
4490                 }
4491                 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4492                                     adapter->node);
4493                 if (!ring)
4494                         ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4495                 if (!ring)
4496                         goto err_tx_ring_allocation;
4497                 ring->count = adapter->tx_ring_count;
4498                 ring->queue_index = i;
4499                 ring->numa_node = adapter->node;
4500
4501                 adapter->tx_ring[i] = ring;
4502         }
4503
4504         /* Restore the adapter's original node */
4505         adapter->node = orig_node;
4506
4507         for (i = 0; i < adapter->num_rx_queues; i++) {
4508                 struct ixgbe_ring *ring = adapter->rx_ring[i];
4509                 if (orig_node == -1) {
4510                         int cur_node = next_online_node(adapter->node);
4511                         if (cur_node == MAX_NUMNODES)
4512                                 cur_node = first_online_node;
4513                         adapter->node = cur_node;
4514                 }
4515                 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4516                                     adapter->node);
4517                 if (!ring)
4518                         ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4519                 if (!ring)
4520                         goto err_rx_ring_allocation;
4521                 ring->count = adapter->rx_ring_count;
4522                 ring->queue_index = i;
4523                 ring->numa_node = adapter->node;
4524
4525                 adapter->rx_ring[i] = ring;
4526         }
4527
4528         /* Restore the adapter's original node */
4529         adapter->node = orig_node;
4530
4531         ixgbe_cache_ring_register(adapter);
4532
4533         return 0;
4534
4535 err_rx_ring_allocation:
4536         for (i = 0; i < adapter->num_tx_queues; i++)
4537                 kfree(adapter->tx_ring[i]);
4538 err_tx_ring_allocation:
4539         return -ENOMEM;
4540 }
4541
4542 /**
4543  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4544  * @adapter: board private structure to initialize
4545  *
4546  * Attempt to configure the interrupts using the best available
4547  * capabilities of the hardware and the kernel.
4548  **/
4549 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4550 {
4551         struct ixgbe_hw *hw = &adapter->hw;
4552         int err = 0;
4553         int vector, v_budget;
4554
4555         /*
4556          * It's easy to be greedy for MSI-X vectors, but it really
4557          * doesn't do us much good if we have a lot more vectors
4558          * than CPU's.  So let's be conservative and only ask for
4559          * (roughly) the same number of vectors as there are CPU's.
4560          */
4561         v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
4562                        (int)num_online_cpus()) + NON_Q_VECTORS;
4563
4564         /*
4565          * At the same time, hardware can only support a maximum of
4566          * hw.mac->max_msix_vectors vectors.  With features
4567          * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4568          * descriptor queues supported by our device.  Thus, we cap it off in
4569          * those rare cases where the cpu count also exceeds our vector limit.
4570          */
4571         v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
4572
4573         /* A failure in MSI-X entry allocation isn't fatal, but it does
4574          * mean we disable MSI-X capabilities of the adapter. */
4575         adapter->msix_entries = kcalloc(v_budget,
4576                                         sizeof(struct msix_entry), GFP_KERNEL);
4577         if (adapter->msix_entries) {
4578                 for (vector = 0; vector < v_budget; vector++)
4579                         adapter->msix_entries[vector].entry = vector;
4580
4581                 ixgbe_acquire_msix_vectors(adapter, v_budget);
4582
4583                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4584                         goto out;
4585         }
4586
4587         adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4588         adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4589         adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4590         adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4591         adapter->atr_sample_rate = 0;
4592         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4593                 ixgbe_disable_sriov(adapter);
4594
4595         err = ixgbe_set_num_queues(adapter);
4596         if (err)
4597                 return err;
4598
4599         err = pci_enable_msi(adapter->pdev);
4600         if (!err) {
4601                 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4602         } else {
4603                 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4604                              "Unable to allocate MSI interrupt, "
4605                              "falling back to legacy.  Error: %d\n", err);
4606                 /* reset err */
4607                 err = 0;
4608         }
4609
4610 out:
4611         return err;
4612 }
4613
4614 /**
4615  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4616  * @adapter: board private structure to initialize
4617  *
4618  * We allocate one q_vector per queue interrupt.  If allocation fails we
4619  * return -ENOMEM.
4620  **/
4621 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4622 {
4623         int q_idx, num_q_vectors;
4624         struct ixgbe_q_vector *q_vector;
4625         int napi_vectors;
4626         int (*poll)(struct napi_struct *, int);
4627
4628         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4629                 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4630                 napi_vectors = adapter->num_rx_queues;
4631                 poll = &ixgbe_clean_rxtx_many;
4632         } else {
4633                 num_q_vectors = 1;
4634                 napi_vectors = 1;
4635                 poll = &ixgbe_poll;
4636         }
4637
4638         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4639                 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4640                                         GFP_KERNEL, adapter->node);
4641                 if (!q_vector)
4642                         q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4643                                            GFP_KERNEL);
4644                 if (!q_vector)
4645                         goto err_out;
4646                 q_vector->adapter = adapter;
4647                 if (q_vector->txr_count && !q_vector->rxr_count)
4648                         q_vector->eitr = adapter->tx_eitr_param;
4649                 else
4650                         q_vector->eitr = adapter->rx_eitr_param;
4651                 q_vector->v_idx = q_idx;
4652                 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
4653                 adapter->q_vector[q_idx] = q_vector;
4654         }
4655
4656         return 0;
4657
4658 err_out:
4659         while (q_idx) {
4660                 q_idx--;
4661                 q_vector = adapter->q_vector[q_idx];
4662                 netif_napi_del(&q_vector->napi);
4663                 kfree(q_vector);
4664                 adapter->q_vector[q_idx] = NULL;
4665         }
4666         return -ENOMEM;
4667 }
4668
4669 /**
4670  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4671  * @adapter: board private structure to initialize
4672  *
4673  * This function frees the memory allocated to the q_vectors.  In addition if
4674  * NAPI is enabled it will delete any references to the NAPI struct prior
4675  * to freeing the q_vector.
4676  **/
4677 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4678 {
4679         int q_idx, num_q_vectors;
4680
4681         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4682                 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4683         else
4684                 num_q_vectors = 1;
4685
4686         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4687                 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
4688                 adapter->q_vector[q_idx] = NULL;
4689                 netif_napi_del(&q_vector->napi);
4690                 kfree(q_vector);
4691         }
4692 }
4693
4694 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
4695 {
4696         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4697                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4698                 pci_disable_msix(adapter->pdev);
4699                 kfree(adapter->msix_entries);
4700                 adapter->msix_entries = NULL;
4701         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4702                 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4703                 pci_disable_msi(adapter->pdev);
4704         }
4705 }
4706
4707 /**
4708  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4709  * @adapter: board private structure to initialize
4710  *
4711  * We determine which interrupt scheme to use based on...
4712  * - Kernel support (MSI, MSI-X)
4713  *   - which can be user-defined (via MODULE_PARAM)
4714  * - Hardware queue count (num_*_queues)
4715  *   - defined by miscellaneous hardware support/features (RSS, etc.)
4716  **/
4717 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
4718 {
4719         int err;
4720
4721         /* Number of supported queues */
4722         err = ixgbe_set_num_queues(adapter);
4723         if (err)
4724                 return err;
4725
4726         err = ixgbe_set_interrupt_capability(adapter);
4727         if (err) {
4728                 e_dev_err("Unable to setup interrupt capabilities\n");
4729                 goto err_set_interrupt;
4730         }
4731
4732         err = ixgbe_alloc_q_vectors(adapter);
4733         if (err) {
4734                 e_dev_err("Unable to allocate memory for queue vectors\n");
4735                 goto err_alloc_q_vectors;
4736         }
4737
4738         err = ixgbe_alloc_queues(adapter);
4739         if (err) {
4740                 e_dev_err("Unable to allocate memory for queues\n");
4741                 goto err_alloc_queues;
4742         }
4743
4744         e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
4745                    (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4746                    adapter->num_rx_queues, adapter->num_tx_queues);
4747
4748         set_bit(__IXGBE_DOWN, &adapter->state);
4749
4750         return 0;
4751
4752 err_alloc_queues:
4753         ixgbe_free_q_vectors(adapter);
4754 err_alloc_q_vectors:
4755         ixgbe_reset_interrupt_capability(adapter);
4756 err_set_interrupt:
4757         return err;
4758 }
4759
4760 /**
4761  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4762  * @adapter: board private structure to clear interrupt scheme on
4763  *
4764  * We go through and clear interrupt specific resources and reset the structure
4765  * to pre-load conditions
4766  **/
4767 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4768 {
4769         int i;
4770
4771         for (i = 0; i < adapter->num_tx_queues; i++) {
4772                 kfree(adapter->tx_ring[i]);
4773                 adapter->tx_ring[i] = NULL;
4774         }
4775         for (i = 0; i < adapter->num_rx_queues; i++) {
4776                 kfree(adapter->rx_ring[i]);
4777                 adapter->rx_ring[i] = NULL;
4778         }
4779
4780         ixgbe_free_q_vectors(adapter);
4781         ixgbe_reset_interrupt_capability(adapter);
4782 }
4783
4784 /**
4785  * ixgbe_sfp_timer - worker thread to find a missing module
4786  * @data: pointer to our adapter struct
4787  **/
4788 static void ixgbe_sfp_timer(unsigned long data)
4789 {
4790         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4791
4792         /*
4793          * Do the sfp_timer outside of interrupt context due to the
4794          * delays that sfp+ detection requires
4795          */
4796         schedule_work(&adapter->sfp_task);
4797 }
4798
4799 /**
4800  * ixgbe_sfp_task - worker thread to find a missing module
4801  * @work: pointer to work_struct containing our data
4802  **/
4803 static void ixgbe_sfp_task(struct work_struct *work)
4804 {
4805         struct ixgbe_adapter *adapter = container_of(work,
4806                                                      struct ixgbe_adapter,
4807                                                      sfp_task);
4808         struct ixgbe_hw *hw = &adapter->hw;
4809
4810         if ((hw->phy.type == ixgbe_phy_nl) &&
4811             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4812                 s32 ret = hw->phy.ops.identify_sfp(hw);
4813                 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
4814                         goto reschedule;
4815                 ret = hw->phy.ops.reset(hw);
4816                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4817                         e_dev_err("failed to initialize because an unsupported "
4818                                   "SFP+ module type was detected.\n");
4819                         e_dev_err("Reload the driver after installing a "
4820                                   "supported module.\n");
4821                         unregister_netdev(adapter->netdev);
4822                 } else {
4823                         e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
4824                 }
4825                 /* don't need this routine any more */
4826                 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4827         }
4828         return;
4829 reschedule:
4830         if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4831                 mod_timer(&adapter->sfp_timer,
4832                           round_jiffies(jiffies + (2 * HZ)));
4833 }
4834
4835 /**
4836  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4837  * @adapter: board private structure to initialize
4838  *
4839  * ixgbe_sw_init initializes the Adapter private data structure.
4840  * Fields are initialized based on PCI device information and
4841  * OS network device settings (MTU size).
4842  **/
4843 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4844 {
4845         struct ixgbe_hw *hw = &adapter->hw;
4846         struct pci_dev *pdev = adapter->pdev;
4847         struct net_device *dev = adapter->netdev;
4848         unsigned int rss;
4849 #ifdef CONFIG_IXGBE_DCB
4850         int j;
4851         struct tc_configuration *tc;
4852 #endif
4853
4854         /* PCI config space info */
4855
4856         hw->vendor_id = pdev->vendor;
4857         hw->device_id = pdev->device;
4858         hw->revision_id = pdev->revision;
4859         hw->subsystem_vendor_id = pdev->subsystem_vendor;
4860         hw->subsystem_device_id = pdev->subsystem_device;
4861
4862         /* Set capability flags */
4863         rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
4864         adapter->ring_feature[RING_F_RSS].indices = rss;
4865         adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4866         adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4867         if (hw->mac.type == ixgbe_mac_82598EB) {
4868                 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4869                         adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4870                 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4871         } else if (hw->mac.type == ixgbe_mac_82599EB) {
4872                 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4873                 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4874                 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4875                 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4876                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4877                 if (dev->features & NETIF_F_NTUPLE) {
4878                         /* Flow Director perfect filter enabled */
4879                         adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4880                         adapter->atr_sample_rate = 0;
4881                         spin_lock_init(&adapter->fdir_perfect_lock);
4882                 } else {
4883                         /* Flow Director hash filters enabled */
4884                         adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4885                         adapter->atr_sample_rate = 20;
4886                 }
4887                 adapter->ring_feature[RING_F_FDIR].indices =
4888                                                          IXGBE_MAX_FDIR_INDICES;
4889                 adapter->fdir_pballoc = 0;
4890 #ifdef IXGBE_FCOE
4891                 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4892                 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4893                 adapter->ring_feature[RING_F_FCOE].indices = 0;
4894 #ifdef CONFIG_IXGBE_DCB
4895                 /* Default traffic class to use for FCoE */
4896                 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
4897                 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4898 #endif
4899 #endif /* IXGBE_FCOE */
4900         }
4901
4902 #ifdef CONFIG_IXGBE_DCB
4903         /* Configure DCB traffic classes */
4904         for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4905                 tc = &adapter->dcb_cfg.tc_config[j];
4906                 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4907                 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4908                 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4909                 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4910                 tc->dcb_pfc = pfc_disabled;
4911         }
4912         adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4913         adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4914         adapter->dcb_cfg.rx_pba_cfg = pba_equal;
4915         adapter->dcb_cfg.pfc_mode_enable = false;
4916         adapter->dcb_cfg.round_robin_enable = false;
4917         adapter->dcb_set_bitmap = 0x00;
4918         ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4919                            adapter->ring_feature[RING_F_DCB].indices);
4920
4921 #endif
4922
4923         /* default flow control settings */
4924         hw->fc.requested_mode = ixgbe_fc_full;
4925         hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
4926 #ifdef CONFIG_DCB
4927         adapter->last_lfc_mode = hw->fc.current_mode;
4928 #endif
4929         hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
4930         hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
4931         hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4932         hw->fc.send_xon = true;
4933         hw->fc.disable_fc_autoneg = false;
4934
4935         /* enable itr by default in dynamic mode */
4936         adapter->rx_itr_setting = 1;
4937         adapter->rx_eitr_param = 20000;
4938         adapter->tx_itr_setting = 1;
4939         adapter->tx_eitr_param = 10000;
4940
4941         /* set defaults for eitr in MegaBytes */
4942         adapter->eitr_low = 10;
4943         adapter->eitr_high = 20;
4944
4945         /* set default ring sizes */
4946         adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4947         adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4948
4949         /* initialize eeprom parameters */
4950         if (ixgbe_init_eeprom_params_generic(hw)) {
4951                 e_dev_err("EEPROM initialization failed\n");
4952                 return -EIO;
4953         }
4954
4955         /* enable rx csum by default */
4956         adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
4957
4958         /* get assigned NUMA node */
4959         adapter->node = dev_to_node(&pdev->dev);
4960
4961         set_bit(__IXGBE_DOWN, &adapter->state);
4962
4963         return 0;
4964 }
4965
4966 /**
4967  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4968  * @adapter: board private structure
4969  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
4970  *
4971  * Return 0 on success, negative on failure
4972  **/
4973 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4974                              struct ixgbe_ring *tx_ring)
4975 {
4976         struct pci_dev *pdev = adapter->pdev;
4977         int size;
4978
4979         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4980         tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
4981         if (!tx_ring->tx_buffer_info)
4982                 tx_ring->tx_buffer_info = vmalloc(size);
4983         if (!tx_ring->tx_buffer_info)
4984                 goto err;
4985         memset(tx_ring->tx_buffer_info, 0, size);
4986
4987         /* round up to nearest 4K */
4988         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4989         tx_ring->size = ALIGN(tx_ring->size, 4096);
4990
4991         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
4992                                            &tx_ring->dma, GFP_KERNEL);
4993         if (!tx_ring->desc)
4994                 goto err;
4995
4996         tx_ring->next_to_use = 0;
4997         tx_ring->next_to_clean = 0;
4998         tx_ring->work_limit = tx_ring->count;
4999         return 0;
5000
5001 err:
5002         vfree(tx_ring->tx_buffer_info);
5003         tx_ring->tx_buffer_info = NULL;
5004         e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
5005         return -ENOMEM;
5006 }
5007
5008 /**
5009  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5010  * @adapter: board private structure
5011  *
5012  * If this function returns with an error, then it's possible one or
5013  * more of the rings is populated (while the rest are not).  It is the
5014  * callers duty to clean those orphaned rings.
5015  *
5016  * Return 0 on success, negative on failure
5017  **/
5018 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5019 {
5020         int i, err = 0;
5021
5022         for (i = 0; i < adapter->num_tx_queues; i++) {
5023                 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
5024                 if (!err)
5025                         continue;
5026                 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
5027                 break;
5028         }
5029
5030         return err;
5031 }
5032
5033 /**
5034  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5035  * @adapter: board private structure
5036  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
5037  *
5038  * Returns 0 on success, negative on failure
5039  **/
5040 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
5041                              struct ixgbe_ring *rx_ring)
5042 {
5043         struct pci_dev *pdev = adapter->pdev;
5044         int size;
5045
5046         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5047         rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
5048         if (!rx_ring->rx_buffer_info)
5049                 rx_ring->rx_buffer_info = vmalloc(size);
5050         if (!rx_ring->rx_buffer_info) {
5051                 e_err(probe, "vmalloc allocation failed for the Rx "
5052                       "descriptor ring\n");
5053                 goto alloc_failed;
5054         }
5055         memset(rx_ring->rx_buffer_info, 0, size);
5056
5057         /* Round up to nearest 4K */
5058         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5059         rx_ring->size = ALIGN(rx_ring->size, 4096);
5060
5061         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
5062                                            &rx_ring->dma, GFP_KERNEL);
5063
5064         if (!rx_ring->desc) {
5065                 e_err(probe, "Memory allocation failed for the Rx "
5066                       "descriptor ring\n");
5067                 vfree(rx_ring->rx_buffer_info);
5068                 goto alloc_failed;
5069         }
5070
5071         rx_ring->next_to_clean = 0;
5072         rx_ring->next_to_use = 0;
5073
5074         return 0;
5075
5076 alloc_failed:
5077         return -ENOMEM;
5078 }
5079
5080 /**
5081  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5082  * @adapter: board private structure
5083  *
5084  * If this function returns with an error, then it's possible one or
5085  * more of the rings is populated (while the rest are not).  It is the
5086  * callers duty to clean those orphaned rings.
5087  *
5088  * Return 0 on success, negative on failure
5089  **/
5090
5091 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5092 {
5093         int i, err = 0;
5094
5095         for (i = 0; i < adapter->num_rx_queues; i++) {
5096                 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
5097                 if (!err)
5098                         continue;
5099                 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5100                 break;
5101         }
5102
5103         return err;
5104 }
5105
5106 /**
5107  * ixgbe_free_tx_resources - Free Tx Resources per Queue
5108  * @adapter: board private structure
5109  * @tx_ring: Tx descriptor ring for a specific queue
5110  *
5111  * Free all transmit software resources
5112  **/
5113 void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
5114                              struct ixgbe_ring *tx_ring)
5115 {
5116         struct pci_dev *pdev = adapter->pdev;
5117
5118         ixgbe_clean_tx_ring(adapter, tx_ring);
5119
5120         vfree(tx_ring->tx_buffer_info);
5121         tx_ring->tx_buffer_info = NULL;
5122
5123         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
5124                           tx_ring->dma);
5125
5126         tx_ring->desc = NULL;
5127 }
5128
5129 /**
5130  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5131  * @adapter: board private structure
5132  *
5133  * Free all transmit software resources
5134  **/
5135 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5136 {
5137         int i;
5138
5139         for (i = 0; i < adapter->num_tx_queues; i++)
5140                 if (adapter->tx_ring[i]->desc)
5141                         ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
5142 }
5143
5144 /**
5145  * ixgbe_free_rx_resources - Free Rx Resources
5146  * @adapter: board private structure
5147  * @rx_ring: ring to clean the resources from
5148  *
5149  * Free all receive software resources
5150  **/
5151 void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
5152                              struct ixgbe_ring *rx_ring)
5153 {
5154         struct pci_dev *pdev = adapter->pdev;
5155
5156         ixgbe_clean_rx_ring(adapter, rx_ring);
5157
5158         vfree(rx_ring->rx_buffer_info);
5159         rx_ring->rx_buffer_info = NULL;
5160
5161         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
5162                           rx_ring->dma);
5163
5164         rx_ring->desc = NULL;
5165 }
5166
5167 /**
5168  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5169  * @adapter: board private structure
5170  *
5171  * Free all receive software resources
5172  **/
5173 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5174 {
5175         int i;
5176
5177         for (i = 0; i < adapter->num_rx_queues; i++)
5178                 if (adapter->rx_ring[i]->desc)
5179                         ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
5180 }
5181
5182 /**
5183  * ixgbe_change_mtu - Change the Maximum Transfer Unit
5184  * @netdev: network interface device structure
5185  * @new_mtu: new value for maximum frame size
5186  *
5187  * Returns 0 on success, negative on failure
5188  **/
5189 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5190 {
5191         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5192         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5193
5194         /* MTU < 68 is an error and causes problems on some kernels */
5195         if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5196                 return -EINVAL;
5197
5198         e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5199         /* must set new MTU before calling down or up */
5200         netdev->mtu = new_mtu;
5201
5202         if (netif_running(netdev))
5203                 ixgbe_reinit_locked(adapter);
5204
5205         return 0;
5206 }
5207
5208 /**
5209  * ixgbe_open - Called when a network interface is made active
5210  * @netdev: network interface device structure
5211  *
5212  * Returns 0 on success, negative value on failure
5213  *
5214  * The open entry point is called when a network interface is made
5215  * active by the system (IFF_UP).  At this point all resources needed
5216  * for transmit and receive operations are allocated, the interrupt
5217  * handler is registered with the OS, the watchdog timer is started,
5218  * and the stack is notified that the interface is ready.
5219  **/
5220 static int ixgbe_open(struct net_device *netdev)
5221 {
5222         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5223         int err;
5224
5225         /* disallow open during test */
5226         if (test_bit(__IXGBE_TESTING, &adapter->state))
5227                 return -EBUSY;
5228
5229         netif_carrier_off(netdev);
5230
5231         /* allocate transmit descriptors */
5232         err = ixgbe_setup_all_tx_resources(adapter);
5233         if (err)
5234                 goto err_setup_tx;
5235
5236         /* allocate receive descriptors */
5237         err = ixgbe_setup_all_rx_resources(adapter);
5238         if (err)
5239                 goto err_setup_rx;
5240
5241         ixgbe_configure(adapter);
5242
5243         err = ixgbe_request_irq(adapter);
5244         if (err)
5245                 goto err_req_irq;
5246
5247         err = ixgbe_up_complete(adapter);
5248         if (err)
5249                 goto err_up;
5250
5251         netif_tx_start_all_queues(netdev);
5252
5253         return 0;
5254
5255 err_up:
5256         ixgbe_release_hw_control(adapter);
5257         ixgbe_free_irq(adapter);
5258 err_req_irq:
5259 err_setup_rx:
5260         ixgbe_free_all_rx_resources(adapter);
5261 err_setup_tx:
5262         ixgbe_free_all_tx_resources(adapter);
5263         ixgbe_reset(adapter);
5264
5265         return err;
5266 }
5267
5268 /**
5269  * ixgbe_close - Disables a network interface
5270  * @netdev: network interface device structure
5271  *
5272  * Returns 0, this is not allowed to fail
5273  *
5274  * The close entry point is called when an interface is de-activated
5275  * by the OS.  The hardware is still under the drivers control, but
5276  * needs to be disabled.  A global MAC reset is issued to stop the
5277  * hardware, and all transmit and receive resources are freed.
5278  **/
5279 static int ixgbe_close(struct net_device *netdev)
5280 {
5281         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5282
5283         ixgbe_down(adapter);
5284         ixgbe_free_irq(adapter);
5285
5286         ixgbe_free_all_tx_resources(adapter);
5287         ixgbe_free_all_rx_resources(adapter);
5288
5289         ixgbe_release_hw_control(adapter);
5290
5291         return 0;
5292 }
5293
5294 #ifdef CONFIG_PM
5295 static int ixgbe_resume(struct pci_dev *pdev)
5296 {
5297         struct net_device *netdev = pci_get_drvdata(pdev);
5298         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5299         u32 err;
5300
5301         pci_set_power_state(pdev, PCI_D0);
5302         pci_restore_state(pdev);
5303         /*
5304          * pci_restore_state clears dev->state_saved so call
5305          * pci_save_state to restore it.
5306          */
5307         pci_save_state(pdev);
5308
5309         err = pci_enable_device_mem(pdev);
5310         if (err) {
5311                 e_dev_err("Cannot enable PCI device from suspend\n");
5312                 return err;
5313         }
5314         pci_set_master(pdev);
5315
5316         pci_wake_from_d3(pdev, false);
5317
5318         err = ixgbe_init_interrupt_scheme(adapter);
5319         if (err) {
5320                 e_dev_err("Cannot initialize interrupts for device\n");
5321                 return err;
5322         }
5323
5324         ixgbe_reset(adapter);
5325
5326         IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5327
5328         if (netif_running(netdev)) {
5329                 err = ixgbe_open(adapter->netdev);
5330                 if (err)
5331                         return err;
5332         }
5333
5334         netif_device_attach(netdev);
5335
5336         return 0;
5337 }
5338 #endif /* CONFIG_PM */
5339
5340 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5341 {
5342         struct net_device *netdev = pci_get_drvdata(pdev);
5343         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5344         struct ixgbe_hw *hw = &adapter->hw;
5345         u32 ctrl, fctrl;
5346         u32 wufc = adapter->wol;
5347 #ifdef CONFIG_PM
5348         int retval = 0;
5349 #endif
5350
5351         netif_device_detach(netdev);
5352
5353         if (netif_running(netdev)) {
5354                 ixgbe_down(adapter);
5355                 ixgbe_free_irq(adapter);
5356                 ixgbe_free_all_tx_resources(adapter);
5357                 ixgbe_free_all_rx_resources(adapter);
5358         }
5359
5360 #ifdef CONFIG_PM
5361         retval = pci_save_state(pdev);
5362         if (retval)
5363                 return retval;
5364
5365 #endif
5366         if (wufc) {
5367                 ixgbe_set_rx_mode(netdev);
5368
5369                 /* turn on all-multi mode if wake on multicast is enabled */
5370                 if (wufc & IXGBE_WUFC_MC) {
5371                         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5372                         fctrl |= IXGBE_FCTRL_MPE;
5373                         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5374                 }
5375
5376                 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5377                 ctrl |= IXGBE_CTRL_GIO_DIS;
5378                 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5379
5380                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5381         } else {
5382                 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5383                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5384         }
5385
5386         if (wufc && hw->mac.type == ixgbe_mac_82599EB)
5387                 pci_wake_from_d3(pdev, true);
5388         else
5389                 pci_wake_from_d3(pdev, false);
5390
5391         *enable_wake = !!wufc;
5392
5393         ixgbe_clear_interrupt_scheme(adapter);
5394
5395         ixgbe_release_hw_control(adapter);
5396
5397         pci_disable_device(pdev);
5398
5399         return 0;
5400 }
5401
5402 #ifdef CONFIG_PM
5403 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5404 {
5405         int retval;
5406         bool wake;
5407
5408         retval = __ixgbe_shutdown(pdev, &wake);
5409         if (retval)
5410                 return retval;
5411
5412         if (wake) {
5413                 pci_prepare_to_sleep(pdev);
5414         } else {
5415                 pci_wake_from_d3(pdev, false);
5416                 pci_set_power_state(pdev, PCI_D3hot);
5417         }
5418
5419         return 0;
5420 }
5421 #endif /* CONFIG_PM */
5422
5423 static void ixgbe_shutdown(struct pci_dev *pdev)
5424 {
5425         bool wake;
5426
5427         __ixgbe_shutdown(pdev, &wake);
5428
5429         if (system_state == SYSTEM_POWER_OFF) {
5430                 pci_wake_from_d3(pdev, wake);
5431                 pci_set_power_state(pdev, PCI_D3hot);
5432         }
5433 }
5434
5435 /**
5436  * ixgbe_update_stats - Update the board statistics counters.
5437  * @adapter: board private structure
5438  **/
5439 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5440 {
5441         struct net_device *netdev = adapter->netdev;
5442         struct ixgbe_hw *hw = &adapter->hw;
5443         u64 total_mpc = 0;
5444         u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5445         u64 non_eop_descs = 0, restart_queue = 0;
5446         struct ixgbe_hw_stats *hwstats = &adapter->stats;
5447
5448         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5449             test_bit(__IXGBE_RESETTING, &adapter->state))
5450                 return;
5451
5452         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5453                 u64 rsc_count = 0;
5454                 u64 rsc_flush = 0;
5455                 for (i = 0; i < 16; i++)
5456                         adapter->hw_rx_no_dma_resources +=
5457                                 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5458                 for (i = 0; i < adapter->num_rx_queues; i++) {
5459                         rsc_count += adapter->rx_ring[i]->rsc_count;
5460                         rsc_flush += adapter->rx_ring[i]->rsc_flush;
5461                 }
5462                 adapter->rsc_total_count = rsc_count;
5463                 adapter->rsc_total_flush = rsc_flush;
5464         }
5465
5466         /* gather some stats to the adapter struct that are per queue */
5467         for (i = 0; i < adapter->num_tx_queues; i++)
5468                 restart_queue += adapter->tx_ring[i]->restart_queue;
5469         adapter->restart_queue = restart_queue;
5470
5471         for (i = 0; i < adapter->num_rx_queues; i++)
5472                 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
5473         adapter->non_eop_descs = non_eop_descs;
5474
5475         hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5476         for (i = 0; i < 8; i++) {
5477                 /* for packet buffers not used, the register should read 0 */
5478                 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5479                 missed_rx += mpc;
5480                 hwstats->mpc[i] += mpc;
5481                 total_mpc += hwstats->mpc[i];
5482                 if (hw->mac.type == ixgbe_mac_82598EB)
5483                         hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5484                 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5485                 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5486                 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5487                 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5488                 if (hw->mac.type == ixgbe_mac_82599EB) {
5489                         hwstats->pxonrxc[i] +=
5490                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5491                         hwstats->pxoffrxc[i] +=
5492                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5493                         hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5494                 } else {
5495                         hwstats->pxonrxc[i] +=
5496                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5497                         hwstats->pxoffrxc[i] +=
5498                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
5499                 }
5500                 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5501                 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5502         }
5503         hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5504         /* work around hardware counting issue */
5505         hwstats->gprc -= missed_rx;
5506
5507         /* 82598 hardware only has a 32 bit counter in the high register */
5508         if (hw->mac.type == ixgbe_mac_82599EB) {
5509                 u64 tmp;
5510                 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5511                 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
5512                                                 /* 4 high bits of GORC */
5513                 hwstats->gorc += (tmp << 32);
5514                 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5515                 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
5516                                                 /* 4 high bits of GOTC */
5517                 hwstats->gotc += (tmp << 32);
5518                 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5519                 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5520                 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5521                 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5522                 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5523                 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5524 #ifdef IXGBE_FCOE
5525                 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5526                 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5527                 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5528                 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5529                 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5530                 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5531 #endif /* IXGBE_FCOE */
5532         } else {
5533                 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5534                 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5535                 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5536                 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5537                 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5538         }
5539         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5540         hwstats->bprc += bprc;
5541         hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5542         if (hw->mac.type == ixgbe_mac_82598EB)
5543                 hwstats->mprc -= bprc;
5544         hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5545         hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5546         hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5547         hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5548         hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5549         hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5550         hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5551         hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5552         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5553         hwstats->lxontxc += lxon;
5554         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5555         hwstats->lxofftxc += lxoff;
5556         hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5557         hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5558         hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5559         /*
5560          * 82598 errata - tx of flow control packets is included in tx counters
5561          */
5562         xon_off_tot = lxon + lxoff;
5563         hwstats->gptc -= xon_off_tot;
5564         hwstats->mptc -= xon_off_tot;
5565         hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5566         hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5567         hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5568         hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5569         hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5570         hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5571         hwstats->ptc64 -= xon_off_tot;
5572         hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5573         hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5574         hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5575         hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5576         hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5577         hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5578
5579         /* Fill out the OS statistics structure */
5580         netdev->stats.multicast = hwstats->mprc;
5581
5582         /* Rx Errors */
5583         netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5584         netdev->stats.rx_dropped = 0;
5585         netdev->stats.rx_length_errors = hwstats->rlec;
5586         netdev->stats.rx_crc_errors = hwstats->crcerrs;
5587         netdev->stats.rx_missed_errors = total_mpc;
5588 }
5589
5590 /**
5591  * ixgbe_watchdog - Timer Call-back
5592  * @data: pointer to adapter cast into an unsigned long
5593  **/
5594 static void ixgbe_watchdog(unsigned long data)
5595 {
5596         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5597         struct ixgbe_hw *hw = &adapter->hw;
5598         u64 eics = 0;
5599         int i;
5600
5601         /*
5602          *  Do the watchdog outside of interrupt context due to the lovely
5603          * delays that some of the newer hardware requires
5604          */
5605
5606         if (test_bit(__IXGBE_DOWN, &adapter->state))
5607                 goto watchdog_short_circuit;
5608
5609         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5610                 /*
5611                  * for legacy and MSI interrupts don't set any bits
5612                  * that are enabled for EIAM, because this operation
5613                  * would set *both* EIMS and EICS for any bit in EIAM
5614                  */
5615                 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5616                         (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5617                 goto watchdog_reschedule;
5618         }
5619
5620         /* get one bit for every active tx/rx interrupt vector */
5621         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5622                 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5623                 if (qv->rxr_count || qv->txr_count)
5624                         eics |= ((u64)1 << i);
5625         }
5626
5627         /* Cause software interrupt to ensure rx rings are cleaned */
5628         ixgbe_irq_rearm_queues(adapter, eics);
5629
5630 watchdog_reschedule:
5631         /* Reset the timer */
5632         mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5633
5634 watchdog_short_circuit:
5635         schedule_work(&adapter->watchdog_task);
5636 }
5637
5638 /**
5639  * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5640  * @work: pointer to work_struct containing our data
5641  **/
5642 static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5643 {
5644         struct ixgbe_adapter *adapter = container_of(work,
5645                                                      struct ixgbe_adapter,
5646                                                      multispeed_fiber_task);
5647         struct ixgbe_hw *hw = &adapter->hw;
5648         u32 autoneg;
5649         bool negotiation;
5650
5651         adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
5652         autoneg = hw->phy.autoneg_advertised;
5653         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
5654                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
5655         hw->mac.autotry_restart = false;
5656         if (hw->mac.ops.setup_link)
5657                 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
5658         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5659         adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5660 }
5661
5662 /**
5663  * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5664  * @work: pointer to work_struct containing our data
5665  **/
5666 static void ixgbe_sfp_config_module_task(struct work_struct *work)
5667 {
5668         struct ixgbe_adapter *adapter = container_of(work,
5669                                                      struct ixgbe_adapter,
5670                                                      sfp_config_module_task);
5671         struct ixgbe_hw *hw = &adapter->hw;
5672         u32 err;
5673
5674         adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
5675
5676         /* Time for electrical oscillations to settle down */
5677         msleep(100);
5678         err = hw->phy.ops.identify_sfp(hw);
5679
5680         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5681                 e_dev_err("failed to initialize because an unsupported SFP+ "
5682                           "module type was detected.\n");
5683                 e_dev_err("Reload the driver after installing a supported "
5684                           "module.\n");
5685                 unregister_netdev(adapter->netdev);
5686                 return;
5687         }
5688         hw->mac.ops.setup_sfp(hw);
5689
5690         if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5691                 /* This will also work for DA Twinax connections */
5692                 schedule_work(&adapter->multispeed_fiber_task);
5693         adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5694 }
5695
5696 /**
5697  * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5698  * @work: pointer to work_struct containing our data
5699  **/
5700 static void ixgbe_fdir_reinit_task(struct work_struct *work)
5701 {
5702         struct ixgbe_adapter *adapter = container_of(work,
5703                                                      struct ixgbe_adapter,
5704                                                      fdir_reinit_task);
5705         struct ixgbe_hw *hw = &adapter->hw;
5706         int i;
5707
5708         if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5709                 for (i = 0; i < adapter->num_tx_queues; i++)
5710                         set_bit(__IXGBE_FDIR_INIT_DONE,
5711                                 &(adapter->tx_ring[i]->reinit_state));
5712         } else {
5713                 e_err(probe, "failed to finish FDIR re-initialization, "
5714                       "ignored adding FDIR ATR filters\n");
5715         }
5716         /* Done FDIR Re-initialization, enable transmits */
5717         netif_tx_start_all_queues(adapter->netdev);
5718 }
5719
5720 static DEFINE_MUTEX(ixgbe_watchdog_lock);
5721
5722 /**
5723  * ixgbe_watchdog_task - worker thread to bring link up
5724  * @work: pointer to work_struct containing our data
5725  **/
5726 static void ixgbe_watchdog_task(struct work_struct *work)
5727 {
5728         struct ixgbe_adapter *adapter = container_of(work,
5729                                                      struct ixgbe_adapter,
5730                                                      watchdog_task);
5731         struct net_device *netdev = adapter->netdev;
5732         struct ixgbe_hw *hw = &adapter->hw;
5733         u32 link_speed;
5734         bool link_up;
5735         int i;
5736         struct ixgbe_ring *tx_ring;
5737         int some_tx_pending = 0;
5738
5739         mutex_lock(&ixgbe_watchdog_lock);
5740
5741         link_up = adapter->link_up;
5742         link_speed = adapter->link_speed;
5743
5744         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5745                 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5746                 if (link_up) {
5747 #ifdef CONFIG_DCB
5748                         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5749                                 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
5750                                         hw->mac.ops.fc_enable(hw, i);
5751                         } else {
5752                                 hw->mac.ops.fc_enable(hw, 0);
5753                         }
5754 #else
5755                         hw->mac.ops.fc_enable(hw, 0);
5756 #endif
5757                 }
5758
5759                 if (link_up ||
5760                     time_after(jiffies, (adapter->link_check_timeout +
5761                                          IXGBE_TRY_LINK_TIMEOUT))) {
5762                         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5763                         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5764                 }
5765                 adapter->link_up = link_up;
5766                 adapter->link_speed = link_speed;
5767         }
5768
5769         if (link_up) {
5770                 if (!netif_carrier_ok(netdev)) {
5771                         bool flow_rx, flow_tx;
5772
5773                         if (hw->mac.type == ixgbe_mac_82599EB) {
5774                                 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5775                                 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5776                                 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5777                                 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5778                         } else {
5779                                 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5780                                 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5781                                 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5782                                 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5783                         }
5784
5785                         e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5786                                (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5787                                "10 Gbps" :
5788                                (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5789                                "1 Gbps" : "unknown speed")),
5790                                ((flow_rx && flow_tx) ? "RX/TX" :
5791                                (flow_rx ? "RX" :
5792                                (flow_tx ? "TX" : "None"))));
5793
5794                         netif_carrier_on(netdev);
5795                 } else {
5796                         /* Force detection of hung controller */
5797                         adapter->detect_tx_hung = true;
5798                 }
5799         } else {
5800                 adapter->link_up = false;
5801                 adapter->link_speed = 0;
5802                 if (netif_carrier_ok(netdev)) {
5803                         e_info(drv, "NIC Link is Down\n");
5804                         netif_carrier_off(netdev);
5805                 }
5806         }
5807
5808         if (!netif_carrier_ok(netdev)) {
5809                 for (i = 0; i < adapter->num_tx_queues; i++) {
5810                         tx_ring = adapter->tx_ring[i];
5811                         if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5812                                 some_tx_pending = 1;
5813                                 break;
5814                         }
5815                 }
5816
5817                 if (some_tx_pending) {
5818                         /* We've lost link, so the controller stops DMA,
5819                          * but we've got queued Tx work that's never going
5820                          * to get done, so reset controller to flush Tx.
5821                          * (Do the reset outside of interrupt context).
5822                          */
5823                          schedule_work(&adapter->reset_task);
5824                 }
5825         }
5826
5827         ixgbe_update_stats(adapter);
5828         mutex_unlock(&ixgbe_watchdog_lock);
5829 }
5830
5831 static int ixgbe_tso(struct ixgbe_adapter *adapter,
5832                      struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5833                      u32 tx_flags, u8 *hdr_len)
5834 {
5835         struct ixgbe_adv_tx_context_desc *context_desc;
5836         unsigned int i;
5837         int err;
5838         struct ixgbe_tx_buffer *tx_buffer_info;
5839         u32 vlan_macip_lens = 0, type_tucmd_mlhl;
5840         u32 mss_l4len_idx, l4len;
5841
5842         if (skb_is_gso(skb)) {
5843                 if (skb_header_cloned(skb)) {
5844                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5845                         if (err)
5846                                 return err;
5847                 }
5848                 l4len = tcp_hdrlen(skb);
58