netdrv intel: disable VLAN filtering in promiscous mode
[linux-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2007 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
35 #include <linux/in.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43
44 #include "ixgbe.h"
45 #include "ixgbe_common.h"
46
47 char ixgbe_driver_name[] = "ixgbe";
48 static const char ixgbe_driver_string[] =
49         "Intel(R) 10 Gigabit PCI Express Network Driver";
50
51 #define DRV_VERSION "1.3.18-k2"
52 const char ixgbe_driver_version[] = DRV_VERSION;
53 static const char ixgbe_copyright[] =
54          "Copyright (c) 1999-2007 Intel Corporation.";
55
56 static const struct ixgbe_info *ixgbe_info_tbl[] = {
57         [board_82598]                   = &ixgbe_82598_info,
58 };
59
60 /* ixgbe_pci_tbl - PCI Device ID Table
61  *
62  * Wildcard entries (PCI_ANY_ID) should come last
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static struct pci_device_id ixgbe_pci_tbl[] = {
69         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
70          board_82598 },
71         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
72          board_82598 },
73         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
74          board_82598 },
75         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
76          board_82598 },
77
78         /* required last entry */
79         {0, }
80 };
81 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82
83 #ifdef CONFIG_DCA
84 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85                             void *p);
86 static struct notifier_block dca_notifier = {
87         .notifier_call = ixgbe_notify_dca,
88         .next          = NULL,
89         .priority      = 0
90 };
91 #endif
92
93 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
94 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION);
97
98 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
99
100 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
101 {
102         u32 ctrl_ext;
103
104         /* Let firmware take over control of h/w */
105         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107                         ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
108 }
109
110 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
111 {
112         u32 ctrl_ext;
113
114         /* Let firmware know the driver has taken over */
115         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
116         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
117                         ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
118 }
119
120 #ifdef DEBUG
121 /**
122  * ixgbe_get_hw_dev_name - return device name string
123  * used by hardware layer to print debugging information
124  **/
125 char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
126 {
127         struct ixgbe_adapter *adapter = hw->back;
128         struct net_device *netdev = adapter->netdev;
129         return netdev->name;
130 }
131 #endif
132
133 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
134                            u8 msix_vector)
135 {
136         u32 ivar, index;
137
138         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139         index = (int_alloc_entry >> 2) & 0x1F;
140         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
141         ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
142         ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
143         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
144 }
145
146 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
147                                              struct ixgbe_tx_buffer
148                                              *tx_buffer_info)
149 {
150         if (tx_buffer_info->dma) {
151                 pci_unmap_page(adapter->pdev,
152                                tx_buffer_info->dma,
153                                tx_buffer_info->length, PCI_DMA_TODEVICE);
154                 tx_buffer_info->dma = 0;
155         }
156         if (tx_buffer_info->skb) {
157                 dev_kfree_skb_any(tx_buffer_info->skb);
158                 tx_buffer_info->skb = NULL;
159         }
160         /* tx_buffer_info must be completely set up in the transmit path */
161 }
162
163 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
164                                        struct ixgbe_ring *tx_ring,
165                                        unsigned int eop,
166                                        union ixgbe_adv_tx_desc *eop_desc)
167 {
168         /* Detect a transmit hang in hardware, this serializes the
169          * check with the clearing of time_stamp and movement of i */
170         adapter->detect_tx_hung = false;
171         if (tx_ring->tx_buffer_info[eop].dma &&
172             time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
173             !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
174                 /* detected Tx unit hang */
175                 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
176                         "  TDH                  <%x>\n"
177                         "  TDT                  <%x>\n"
178                         "  next_to_use          <%x>\n"
179                         "  next_to_clean        <%x>\n"
180                         "tx_buffer_info[next_to_clean]\n"
181                         "  time_stamp           <%lx>\n"
182                         "  next_to_watch        <%x>\n"
183                         "  jiffies              <%lx>\n"
184                         "  next_to_watch.status <%x>\n",
185                         readl(adapter->hw.hw_addr + tx_ring->head),
186                         readl(adapter->hw.hw_addr + tx_ring->tail),
187                         tx_ring->next_to_use,
188                         tx_ring->next_to_clean,
189                         tx_ring->tx_buffer_info[eop].time_stamp,
190                         eop, jiffies, eop_desc->wb.status);
191                 return true;
192         }
193
194         return false;
195 }
196
197 #define IXGBE_MAX_TXD_PWR       14
198 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
199
200 /* Tx Descriptors needed, worst case */
201 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
202                          (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
203 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)   /* for context */
205
206 /**
207  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
208  * @adapter: board private structure
209  **/
210 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
211                                     struct ixgbe_ring *tx_ring)
212 {
213         struct net_device *netdev = adapter->netdev;
214         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
215         struct ixgbe_tx_buffer *tx_buffer_info;
216         unsigned int i, eop;
217         bool cleaned = false;
218         unsigned int total_tx_bytes = 0, total_tx_packets = 0;
219
220         i = tx_ring->next_to_clean;
221         eop = tx_ring->tx_buffer_info[i].next_to_watch;
222         eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
223         while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
224                 cleaned = false;
225                 while (!cleaned) {
226                         tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
227                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
228                         cleaned = (i == eop);
229
230                         tx_ring->stats.bytes += tx_buffer_info->length;
231                         if (cleaned) {
232                                 struct sk_buff *skb = tx_buffer_info->skb;
233                                 unsigned int segs, bytecount;
234                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
235                                 /* multiply data chunks by size of headers */
236                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
237                                             skb->len;
238                                 total_tx_packets += segs;
239                                 total_tx_bytes += bytecount;
240                         }
241                         ixgbe_unmap_and_free_tx_resource(adapter,
242                                                          tx_buffer_info);
243                         tx_desc->wb.status = 0;
244
245                         i++;
246                         if (i == tx_ring->count)
247                                 i = 0;
248                 }
249
250                 tx_ring->stats.packets++;
251
252                 eop = tx_ring->tx_buffer_info[i].next_to_watch;
253                 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
254
255                 /* weight of a sort for tx, avoid endless transmit cleanup */
256                 if (total_tx_packets >= tx_ring->work_limit)
257                         break;
258         }
259
260         tx_ring->next_to_clean = i;
261
262 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
263         if (total_tx_packets && netif_carrier_ok(netdev) &&
264             (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
265                 /* Make sure that anybody stopping the queue after this
266                  * sees the new next_to_clean.
267                  */
268                 smp_mb();
269                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
270                     !test_bit(__IXGBE_DOWN, &adapter->state)) {
271                         netif_wake_subqueue(netdev, tx_ring->queue_index);
272                         adapter->restart_queue++;
273                 }
274         }
275
276         if (adapter->detect_tx_hung)
277                 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
278                         netif_stop_subqueue(netdev, tx_ring->queue_index);
279
280         if (total_tx_packets >= tx_ring->work_limit)
281                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
282
283         tx_ring->total_bytes += total_tx_bytes;
284         tx_ring->total_packets += total_tx_packets;
285         adapter->net_stats.tx_bytes += total_tx_bytes;
286         adapter->net_stats.tx_packets += total_tx_packets;
287         cleaned = total_tx_packets ? true : false;
288         return cleaned;
289 }
290
291 #ifdef CONFIG_DCA
292 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
293                                 struct ixgbe_ring *rxr)
294 {
295         u32 rxctrl;
296         int cpu = get_cpu();
297         int q = rxr - adapter->rx_ring;
298
299         if (rxr->cpu != cpu) {
300                 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
301                 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
302                 rxctrl |= dca_get_tag(cpu);
303                 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
304                 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
305                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
306                 rxr->cpu = cpu;
307         }
308         put_cpu();
309 }
310
311 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
312                                 struct ixgbe_ring *txr)
313 {
314         u32 txctrl;
315         int cpu = get_cpu();
316         int q = txr - adapter->tx_ring;
317
318         if (txr->cpu != cpu) {
319                 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
320                 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
321                 txctrl |= dca_get_tag(cpu);
322                 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
323                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
324                 txr->cpu = cpu;
325         }
326         put_cpu();
327 }
328
329 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
330 {
331         int i;
332
333         if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
334                 return;
335
336         for (i = 0; i < adapter->num_tx_queues; i++) {
337                 adapter->tx_ring[i].cpu = -1;
338                 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
339         }
340         for (i = 0; i < adapter->num_rx_queues; i++) {
341                 adapter->rx_ring[i].cpu = -1;
342                 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
343         }
344 }
345
346 static int __ixgbe_notify_dca(struct device *dev, void *data)
347 {
348         struct net_device *netdev = dev_get_drvdata(dev);
349         struct ixgbe_adapter *adapter = netdev_priv(netdev);
350         unsigned long event = *(unsigned long *)data;
351
352         switch (event) {
353         case DCA_PROVIDER_ADD:
354                 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
355                 /* Always use CB2 mode, difference is masked
356                  * in the CB driver. */
357                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
358                 if (dca_add_requester(dev) == 0) {
359                         ixgbe_setup_dca(adapter);
360                         break;
361                 }
362                 /* Fall Through since DCA is disabled. */
363         case DCA_PROVIDER_REMOVE:
364                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
365                         dca_remove_requester(dev);
366                         adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
367                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
368                 }
369                 break;
370         }
371
372         return 0;
373 }
374
375 #endif /* CONFIG_DCA */
376 /**
377  * ixgbe_receive_skb - Send a completed packet up the stack
378  * @adapter: board private structure
379  * @skb: packet to send up
380  * @status: hardware indication of status of receive
381  * @rx_ring: rx descriptor ring (for a specific queue) to setup
382  * @rx_desc: rx descriptor
383  **/
384 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
385                               struct sk_buff *skb, u8 status,
386                               struct ixgbe_ring *ring,
387                               union ixgbe_adv_rx_desc *rx_desc)
388 {
389         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
390         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
391
392         if (adapter->netdev->features & NETIF_F_LRO &&
393             skb->ip_summed == CHECKSUM_UNNECESSARY) {
394                 if (adapter->vlgrp && is_vlan)
395                         lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
396                                                      adapter->vlgrp, tag,
397                                                      rx_desc);
398                 else
399                         lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
400                 ring->lro_used = true;
401         } else {
402                 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
403                         if (adapter->vlgrp && is_vlan)
404                                 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
405                         else
406                                 netif_receive_skb(skb);
407                 } else {
408                         if (adapter->vlgrp && is_vlan)
409                                 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
410                         else
411                                 netif_rx(skb);
412                 }
413         }
414 }
415
416 /**
417  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
418  * @adapter: address of board private structure
419  * @status_err: hardware indication of status of receive
420  * @skb: skb currently being received and modified
421  **/
422 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
423                                          u32 status_err,
424                                          struct sk_buff *skb)
425 {
426         skb->ip_summed = CHECKSUM_NONE;
427
428         /* Ignore Checksum bit is set, or rx csum disabled */
429         if ((status_err & IXGBE_RXD_STAT_IXSM) ||
430             !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
431                 return;
432
433         /* if IP and error */
434         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
435             (status_err & IXGBE_RXDADV_ERR_IPE)) {
436                 adapter->hw_csum_rx_error++;
437                 return;
438         }
439
440         if (!(status_err & IXGBE_RXD_STAT_L4CS))
441                 return;
442
443         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
444                 adapter->hw_csum_rx_error++;
445                 return;
446         }
447
448         /* It must be a TCP or UDP packet with a valid checksum */
449         skb->ip_summed = CHECKSUM_UNNECESSARY;
450         adapter->hw_csum_rx_good++;
451 }
452
453 /**
454  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
455  * @adapter: address of board private structure
456  **/
457 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
458                                        struct ixgbe_ring *rx_ring,
459                                        int cleaned_count)
460 {
461         struct net_device *netdev = adapter->netdev;
462         struct pci_dev *pdev = adapter->pdev;
463         union ixgbe_adv_rx_desc *rx_desc;
464         struct ixgbe_rx_buffer *rx_buffer_info;
465         struct sk_buff *skb;
466         unsigned int i;
467         unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
468
469         i = rx_ring->next_to_use;
470         rx_buffer_info = &rx_ring->rx_buffer_info[i];
471
472         while (cleaned_count--) {
473                 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
474
475                 if (!rx_buffer_info->page &&
476                                 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
477                         rx_buffer_info->page = alloc_page(GFP_ATOMIC);
478                         if (!rx_buffer_info->page) {
479                                 adapter->alloc_rx_page_failed++;
480                                 goto no_buffers;
481                         }
482                         rx_buffer_info->page_dma =
483                             pci_map_page(pdev, rx_buffer_info->page,
484                                          0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
485                 }
486
487                 if (!rx_buffer_info->skb) {
488                         skb = netdev_alloc_skb(netdev, bufsz);
489
490                         if (!skb) {
491                                 adapter->alloc_rx_buff_failed++;
492                                 goto no_buffers;
493                         }
494
495                         /*
496                          * Make buffer alignment 2 beyond a 16 byte boundary
497                          * this will result in a 16 byte aligned IP header after
498                          * the 14 byte MAC header is removed
499                          */
500                         skb_reserve(skb, NET_IP_ALIGN);
501
502                         rx_buffer_info->skb = skb;
503                         rx_buffer_info->dma = pci_map_single(pdev, skb->data,
504                                                           bufsz,
505                                                           PCI_DMA_FROMDEVICE);
506                 }
507                 /* Refresh the desc even if buffer_addrs didn't change because
508                  * each write-back erases this info. */
509                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
510                         rx_desc->read.pkt_addr =
511                             cpu_to_le64(rx_buffer_info->page_dma);
512                         rx_desc->read.hdr_addr =
513                                         cpu_to_le64(rx_buffer_info->dma);
514                 } else {
515                         rx_desc->read.pkt_addr =
516                                         cpu_to_le64(rx_buffer_info->dma);
517                 }
518
519                 i++;
520                 if (i == rx_ring->count)
521                         i = 0;
522                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
523         }
524 no_buffers:
525         if (rx_ring->next_to_use != i) {
526                 rx_ring->next_to_use = i;
527                 if (i-- == 0)
528                         i = (rx_ring->count - 1);
529
530                 /*
531                  * Force memory writes to complete before letting h/w
532                  * know there are new descriptors to fetch.  (Only
533                  * applicable for weak-ordered memory model archs,
534                  * such as IA-64).
535                  */
536                 wmb();
537                 writel(i, adapter->hw.hw_addr + rx_ring->tail);
538         }
539 }
540
541 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
542                                struct ixgbe_ring *rx_ring,
543                                int *work_done, int work_to_do)
544 {
545         struct net_device *netdev = adapter->netdev;
546         struct pci_dev *pdev = adapter->pdev;
547         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
548         struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
549         struct sk_buff *skb;
550         unsigned int i;
551         u32 upper_len, len, staterr;
552         u16 hdr_info;
553         bool cleaned = false;
554         int cleaned_count = 0;
555         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
556
557         i = rx_ring->next_to_clean;
558         upper_len = 0;
559         rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
560         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
561         rx_buffer_info = &rx_ring->rx_buffer_info[i];
562
563         while (staterr & IXGBE_RXD_STAT_DD) {
564                 if (*work_done >= work_to_do)
565                         break;
566                 (*work_done)++;
567
568                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
569                         hdr_info =
570                             le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
571                         len =
572                             ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
573                              IXGBE_RXDADV_HDRBUFLEN_SHIFT);
574                         if (hdr_info & IXGBE_RXDADV_SPH)
575                                 adapter->rx_hdr_split++;
576                         if (len > IXGBE_RX_HDR_SIZE)
577                                 len = IXGBE_RX_HDR_SIZE;
578                         upper_len = le16_to_cpu(rx_desc->wb.upper.length);
579                 } else
580                         len = le16_to_cpu(rx_desc->wb.upper.length);
581
582                 cleaned = true;
583                 skb = rx_buffer_info->skb;
584                 prefetch(skb->data - NET_IP_ALIGN);
585                 rx_buffer_info->skb = NULL;
586
587                 if (len && !skb_shinfo(skb)->nr_frags) {
588                         pci_unmap_single(pdev, rx_buffer_info->dma,
589                                          adapter->rx_buf_len + NET_IP_ALIGN,
590                                          PCI_DMA_FROMDEVICE);
591                         skb_put(skb, len);
592                 }
593
594                 if (upper_len) {
595                         pci_unmap_page(pdev, rx_buffer_info->page_dma,
596                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
597                         rx_buffer_info->page_dma = 0;
598                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
599                                            rx_buffer_info->page, 0, upper_len);
600                         rx_buffer_info->page = NULL;
601
602                         skb->len += upper_len;
603                         skb->data_len += upper_len;
604                         skb->truesize += upper_len;
605                 }
606
607                 i++;
608                 if (i == rx_ring->count)
609                         i = 0;
610                 next_buffer = &rx_ring->rx_buffer_info[i];
611
612                 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
613                 prefetch(next_rxd);
614
615                 cleaned_count++;
616                 if (staterr & IXGBE_RXD_STAT_EOP) {
617                         rx_ring->stats.packets++;
618                         rx_ring->stats.bytes += skb->len;
619                 } else {
620                         rx_buffer_info->skb = next_buffer->skb;
621                         rx_buffer_info->dma = next_buffer->dma;
622                         next_buffer->skb = skb;
623                         adapter->non_eop_descs++;
624                         goto next_desc;
625                 }
626
627                 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
628                         dev_kfree_skb_irq(skb);
629                         goto next_desc;
630                 }
631
632                 ixgbe_rx_checksum(adapter, staterr, skb);
633
634                 /* probably a little skewed due to removing CRC */
635                 total_rx_bytes += skb->len;
636                 total_rx_packets++;
637
638                 skb->protocol = eth_type_trans(skb, netdev);
639                 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
640                 netdev->last_rx = jiffies;
641
642 next_desc:
643                 rx_desc->wb.upper.status_error = 0;
644
645                 /* return some buffers to hardware, one at a time is too slow */
646                 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
647                         ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
648                         cleaned_count = 0;
649                 }
650
651                 /* use prefetched values */
652                 rx_desc = next_rxd;
653                 rx_buffer_info = next_buffer;
654
655                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
656         }
657
658         if (rx_ring->lro_used) {
659                 lro_flush_all(&rx_ring->lro_mgr);
660                 rx_ring->lro_used = false;
661         }
662
663         rx_ring->next_to_clean = i;
664         cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
665
666         if (cleaned_count)
667                 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
668
669         adapter->net_stats.rx_bytes += total_rx_bytes;
670         adapter->net_stats.rx_packets += total_rx_packets;
671
672         rx_ring->total_packets += total_rx_packets;
673         rx_ring->total_bytes += total_rx_bytes;
674         adapter->net_stats.rx_bytes += total_rx_bytes;
675         adapter->net_stats.rx_packets += total_rx_packets;
676
677         return cleaned;
678 }
679
680 static int ixgbe_clean_rxonly(struct napi_struct *, int);
681 /**
682  * ixgbe_configure_msix - Configure MSI-X hardware
683  * @adapter: board private structure
684  *
685  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
686  * interrupts.
687  **/
688 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
689 {
690         struct ixgbe_q_vector *q_vector;
691         int i, j, q_vectors, v_idx, r_idx;
692         u32 mask;
693
694         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
695
696         /* Populate the IVAR table and set the ITR values to the
697          * corresponding register.
698          */
699         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
700                 q_vector = &adapter->q_vector[v_idx];
701                 /* XXX for_each_bit(...) */
702                 r_idx = find_first_bit(q_vector->rxr_idx,
703                                       adapter->num_rx_queues);
704
705                 for (i = 0; i < q_vector->rxr_count; i++) {
706                         j = adapter->rx_ring[r_idx].reg_idx;
707                         ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
708                         r_idx = find_next_bit(q_vector->rxr_idx,
709                                               adapter->num_rx_queues,
710                                               r_idx + 1);
711                 }
712                 r_idx = find_first_bit(q_vector->txr_idx,
713                                        adapter->num_tx_queues);
714
715                 for (i = 0; i < q_vector->txr_count; i++) {
716                         j = adapter->tx_ring[r_idx].reg_idx;
717                         ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
718                         r_idx = find_next_bit(q_vector->txr_idx,
719                                               adapter->num_tx_queues,
720                                               r_idx + 1);
721                 }
722
723                 /* if this is a tx only vector use half the irq (tx) rate */
724                 if (q_vector->txr_count && !q_vector->rxr_count)
725                         q_vector->eitr = adapter->tx_eitr;
726                 else
727                         /* rx only or mixed */
728                         q_vector->eitr = adapter->rx_eitr;
729
730                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
731                                 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
732         }
733
734         ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
735         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
736
737         /* set up to autoclear timer, lsc, and the vectors */
738         mask = IXGBE_EIMS_ENABLE_MASK;
739         mask &= ~IXGBE_EIMS_OTHER;
740         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
741 }
742
743 enum latency_range {
744         lowest_latency = 0,
745         low_latency = 1,
746         bulk_latency = 2,
747         latency_invalid = 255
748 };
749
750 /**
751  * ixgbe_update_itr - update the dynamic ITR value based on statistics
752  * @adapter: pointer to adapter
753  * @eitr: eitr setting (ints per sec) to give last timeslice
754  * @itr_setting: current throttle rate in ints/second
755  * @packets: the number of packets during this measurement interval
756  * @bytes: the number of bytes during this measurement interval
757  *
758  *      Stores a new ITR value based on packets and byte
759  *      counts during the last interrupt.  The advantage of per interrupt
760  *      computation is faster updates and more accurate ITR for the current
761  *      traffic pattern.  Constants in this function were computed
762  *      based on theoretical maximum wire speed and thresholds were set based
763  *      on testing data as well as attempting to minimize response time
764  *      while increasing bulk throughput.
765  *      this functionality is controlled by the InterruptThrottleRate module
766  *      parameter (see ixgbe_param.c)
767  **/
768 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
769                            u32 eitr, u8 itr_setting,
770                            int packets, int bytes)
771 {
772         unsigned int retval = itr_setting;
773         u32 timepassed_us;
774         u64 bytes_perint;
775
776         if (packets == 0)
777                 goto update_itr_done;
778
779
780         /* simple throttlerate management
781          *    0-20MB/s lowest (100000 ints/s)
782          *   20-100MB/s low   (20000 ints/s)
783          *  100-1249MB/s bulk (8000 ints/s)
784          */
785         /* what was last interrupt timeslice? */
786         timepassed_us = 1000000/eitr;
787         bytes_perint = bytes / timepassed_us; /* bytes/usec */
788
789         switch (itr_setting) {
790         case lowest_latency:
791                 if (bytes_perint > adapter->eitr_low)
792                         retval = low_latency;
793                 break;
794         case low_latency:
795                 if (bytes_perint > adapter->eitr_high)
796                         retval = bulk_latency;
797                 else if (bytes_perint <= adapter->eitr_low)
798                         retval = lowest_latency;
799                 break;
800         case bulk_latency:
801                 if (bytes_perint <= adapter->eitr_high)
802                         retval = low_latency;
803                 break;
804         }
805
806 update_itr_done:
807         return retval;
808 }
809
810 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
811 {
812         struct ixgbe_adapter *adapter = q_vector->adapter;
813         struct ixgbe_hw *hw = &adapter->hw;
814         u32 new_itr;
815         u8 current_itr, ret_itr;
816         int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
817                               sizeof(struct ixgbe_q_vector);
818         struct ixgbe_ring *rx_ring, *tx_ring;
819
820         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
821         for (i = 0; i < q_vector->txr_count; i++) {
822                 tx_ring = &(adapter->tx_ring[r_idx]);
823                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
824                                            q_vector->tx_eitr,
825                                            tx_ring->total_packets,
826                                            tx_ring->total_bytes);
827                 /* if the result for this queue would decrease interrupt
828                  * rate for this vector then use that result */
829                 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
830                                     q_vector->tx_eitr - 1 : ret_itr);
831                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
832                                       r_idx + 1);
833         }
834
835         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
836         for (i = 0; i < q_vector->rxr_count; i++) {
837                 rx_ring = &(adapter->rx_ring[r_idx]);
838                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
839                                            q_vector->rx_eitr,
840                                            rx_ring->total_packets,
841                                            rx_ring->total_bytes);
842                 /* if the result for this queue would decrease interrupt
843                  * rate for this vector then use that result */
844                 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
845                                     q_vector->rx_eitr - 1 : ret_itr);
846                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
847                                       r_idx + 1);
848         }
849
850         current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
851
852         switch (current_itr) {
853         /* counts and packets in update_itr are dependent on these numbers */
854         case lowest_latency:
855                 new_itr = 100000;
856                 break;
857         case low_latency:
858                 new_itr = 20000; /* aka hwitr = ~200 */
859                 break;
860         case bulk_latency:
861         default:
862                 new_itr = 8000;
863                 break;
864         }
865
866         if (new_itr != q_vector->eitr) {
867                 u32 itr_reg;
868                 /* do an exponential smoothing */
869                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
870                 q_vector->eitr = new_itr;
871                 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
872                 /* must write high and low 16 bits to reset counter */
873                 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
874                         itr_reg);
875                 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
876         }
877
878         return;
879 }
880
881 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
882 {
883         struct net_device *netdev = data;
884         struct ixgbe_adapter *adapter = netdev_priv(netdev);
885         struct ixgbe_hw *hw = &adapter->hw;
886         u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
887
888         if (eicr & IXGBE_EICR_LSC) {
889                 adapter->lsc_int++;
890                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
891                         mod_timer(&adapter->watchdog_timer, jiffies);
892         }
893
894         if (!test_bit(__IXGBE_DOWN, &adapter->state))
895                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
896
897         return IRQ_HANDLED;
898 }
899
900 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
901 {
902         struct ixgbe_q_vector *q_vector = data;
903         struct ixgbe_adapter  *adapter = q_vector->adapter;
904         struct ixgbe_ring     *txr;
905         int i, r_idx;
906
907         if (!q_vector->txr_count)
908                 return IRQ_HANDLED;
909
910         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
911         for (i = 0; i < q_vector->txr_count; i++) {
912                 txr = &(adapter->tx_ring[r_idx]);
913 #ifdef CONFIG_DCA
914                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
915                         ixgbe_update_tx_dca(adapter, txr);
916 #endif
917                 txr->total_bytes = 0;
918                 txr->total_packets = 0;
919                 ixgbe_clean_tx_irq(adapter, txr);
920                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
921                                       r_idx + 1);
922         }
923
924         return IRQ_HANDLED;
925 }
926
927 /**
928  * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
929  * @irq: unused
930  * @data: pointer to our q_vector struct for this interrupt vector
931  **/
932 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
933 {
934         struct ixgbe_q_vector *q_vector = data;
935         struct ixgbe_adapter  *adapter = q_vector->adapter;
936         struct ixgbe_ring  *rxr;
937         int r_idx;
938
939         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
940         if (!q_vector->rxr_count)
941                 return IRQ_HANDLED;
942
943         rxr = &(adapter->rx_ring[r_idx]);
944         /* disable interrupts on this vector only */
945         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
946         rxr->total_bytes = 0;
947         rxr->total_packets = 0;
948         netif_rx_schedule(adapter->netdev, &q_vector->napi);
949
950         return IRQ_HANDLED;
951 }
952
953 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
954 {
955         ixgbe_msix_clean_rx(irq, data);
956         ixgbe_msix_clean_tx(irq, data);
957
958         return IRQ_HANDLED;
959 }
960
961 /**
962  * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
963  * @napi: napi struct with our devices info in it
964  * @budget: amount of work driver is allowed to do this pass, in packets
965  *
966  **/
967 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
968 {
969         struct ixgbe_q_vector *q_vector =
970                                container_of(napi, struct ixgbe_q_vector, napi);
971         struct ixgbe_adapter *adapter = q_vector->adapter;
972         struct ixgbe_ring *rxr;
973         int work_done = 0;
974         long r_idx;
975
976         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
977         rxr = &(adapter->rx_ring[r_idx]);
978 #ifdef CONFIG_DCA
979         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
980                 ixgbe_update_rx_dca(adapter, rxr);
981 #endif
982
983         ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
984
985         /* If all Rx work done, exit the polling mode */
986         if (work_done < budget) {
987                 netif_rx_complete(adapter->netdev, napi);
988                 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
989                         ixgbe_set_itr_msix(q_vector);
990                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
991                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
992         }
993
994         return work_done;
995 }
996
997 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
998                                      int r_idx)
999 {
1000         a->q_vector[v_idx].adapter = a;
1001         set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
1002         a->q_vector[v_idx].rxr_count++;
1003         a->rx_ring[r_idx].v_idx = 1 << v_idx;
1004 }
1005
1006 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1007                                      int r_idx)
1008 {
1009         a->q_vector[v_idx].adapter = a;
1010         set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1011         a->q_vector[v_idx].txr_count++;
1012         a->tx_ring[r_idx].v_idx = 1 << v_idx;
1013 }
1014
1015 /**
1016  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1017  * @adapter: board private structure to initialize
1018  * @vectors: allotted vector count for descriptor rings
1019  *
1020  * This function maps descriptor rings to the queue-specific vectors
1021  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
1022  * one vector per ring/queue, but on a constrained vector budget, we
1023  * group the rings as "efficiently" as possible.  You would add new
1024  * mapping configurations in here.
1025  **/
1026 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1027                                       int vectors)
1028 {
1029         int v_start = 0;
1030         int rxr_idx = 0, txr_idx = 0;
1031         int rxr_remaining = adapter->num_rx_queues;
1032         int txr_remaining = adapter->num_tx_queues;
1033         int i, j;
1034         int rqpv, tqpv;
1035         int err = 0;
1036
1037         /* No mapping required if MSI-X is disabled. */
1038         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1039                 goto out;
1040
1041         /*
1042          * The ideal configuration...
1043          * We have enough vectors to map one per queue.
1044          */
1045         if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1046                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1047                         map_vector_to_rxq(adapter, v_start, rxr_idx);
1048
1049                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1050                         map_vector_to_txq(adapter, v_start, txr_idx);
1051
1052                 goto out;
1053         }
1054
1055         /*
1056          * If we don't have enough vectors for a 1-to-1
1057          * mapping, we'll have to group them so there are
1058          * multiple queues per vector.
1059          */
1060         /* Re-adjusting *qpv takes care of the remainder. */
1061         for (i = v_start; i < vectors; i++) {
1062                 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1063                 for (j = 0; j < rqpv; j++) {
1064                         map_vector_to_rxq(adapter, i, rxr_idx);
1065                         rxr_idx++;
1066                         rxr_remaining--;
1067                 }
1068         }
1069         for (i = v_start; i < vectors; i++) {
1070                 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1071                 for (j = 0; j < tqpv; j++) {
1072                         map_vector_to_txq(adapter, i, txr_idx);
1073                         txr_idx++;
1074                         txr_remaining--;
1075                 }
1076         }
1077
1078 out:
1079         return err;
1080 }
1081
1082 /**
1083  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1084  * @adapter: board private structure
1085  *
1086  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1087  * interrupts from the kernel.
1088  **/
1089 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1090 {
1091         struct net_device *netdev = adapter->netdev;
1092         irqreturn_t (*handler)(int, void *);
1093         int i, vector, q_vectors, err;
1094
1095         /* Decrement for Other and TCP Timer vectors */
1096         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1097
1098         /* Map the Tx/Rx rings to the vectors we were allotted. */
1099         err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1100         if (err)
1101                 goto out;
1102
1103 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1104                          (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1105                          &ixgbe_msix_clean_many)
1106         for (vector = 0; vector < q_vectors; vector++) {
1107                 handler = SET_HANDLER(&adapter->q_vector[vector]);
1108                 sprintf(adapter->name[vector], "%s:v%d-%s",
1109                         netdev->name, vector,
1110                         (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1111                          ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1112                 err = request_irq(adapter->msix_entries[vector].vector,
1113                                   handler, 0, adapter->name[vector],
1114                                   &(adapter->q_vector[vector]));
1115                 if (err) {
1116                         DPRINTK(PROBE, ERR,
1117                                 "request_irq failed for MSIX interrupt "
1118                                 "Error: %d\n", err);
1119                         goto free_queue_irqs;
1120                 }
1121         }
1122
1123         sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1124         err = request_irq(adapter->msix_entries[vector].vector,
1125                           &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1126         if (err) {
1127                 DPRINTK(PROBE, ERR,
1128                         "request_irq for msix_lsc failed: %d\n", err);
1129                 goto free_queue_irqs;
1130         }
1131
1132         return 0;
1133
1134 free_queue_irqs:
1135         for (i = vector - 1; i >= 0; i--)
1136                 free_irq(adapter->msix_entries[--vector].vector,
1137                          &(adapter->q_vector[i]));
1138         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1139         pci_disable_msix(adapter->pdev);
1140         kfree(adapter->msix_entries);
1141         adapter->msix_entries = NULL;
1142 out:
1143         return err;
1144 }
1145
1146 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1147 {
1148         struct ixgbe_hw *hw = &adapter->hw;
1149         struct ixgbe_q_vector *q_vector = adapter->q_vector;
1150         u8 current_itr;
1151         u32 new_itr = q_vector->eitr;
1152         struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1153         struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1154
1155         q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
1156                                              q_vector->tx_eitr,
1157                                              tx_ring->total_packets,
1158                                              tx_ring->total_bytes);
1159         q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
1160                                              q_vector->rx_eitr,
1161                                              rx_ring->total_packets,
1162                                              rx_ring->total_bytes);
1163
1164         current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
1165
1166         switch (current_itr) {
1167         /* counts and packets in update_itr are dependent on these numbers */
1168         case lowest_latency:
1169                 new_itr = 100000;
1170                 break;
1171         case low_latency:
1172                 new_itr = 20000; /* aka hwitr = ~200 */
1173                 break;
1174         case bulk_latency:
1175                 new_itr = 8000;
1176                 break;
1177         default:
1178                 break;
1179         }
1180
1181         if (new_itr != q_vector->eitr) {
1182                 u32 itr_reg;
1183                 /* do an exponential smoothing */
1184                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1185                 q_vector->eitr = new_itr;
1186                 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1187                 /* must write high and low 16 bits to reset counter */
1188                 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1189         }
1190
1191         return;
1192 }
1193
1194 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
1195
1196 /**
1197  * ixgbe_intr - legacy mode Interrupt Handler
1198  * @irq: interrupt number
1199  * @data: pointer to a network interface device structure
1200  * @pt_regs: CPU registers structure
1201  **/
1202 static irqreturn_t ixgbe_intr(int irq, void *data)
1203 {
1204         struct net_device *netdev = data;
1205         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1206         struct ixgbe_hw *hw = &adapter->hw;
1207         u32 eicr;
1208
1209
1210         /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1211          * therefore no explict interrupt disable is necessary */
1212         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1213         if (!eicr)
1214                 return IRQ_NONE;        /* Not our interrupt */
1215
1216         if (eicr & IXGBE_EICR_LSC) {
1217                 adapter->lsc_int++;
1218                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1219                         mod_timer(&adapter->watchdog_timer, jiffies);
1220         }
1221
1222
1223         if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
1224                 adapter->tx_ring[0].total_packets = 0;
1225                 adapter->tx_ring[0].total_bytes = 0;
1226                 adapter->rx_ring[0].total_packets = 0;
1227                 adapter->rx_ring[0].total_bytes = 0;
1228                 /* would disable interrupts here but EIAM disabled it */
1229                 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
1230         }
1231
1232         return IRQ_HANDLED;
1233 }
1234
1235 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1236 {
1237         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1238
1239         for (i = 0; i < q_vectors; i++) {
1240                 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1241                 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1242                 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1243                 q_vector->rxr_count = 0;
1244                 q_vector->txr_count = 0;
1245         }
1246 }
1247
1248 /**
1249  * ixgbe_request_irq - initialize interrupts
1250  * @adapter: board private structure
1251  *
1252  * Attempts to configure interrupts using the best available
1253  * capabilities of the hardware and kernel.
1254  **/
1255 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1256 {
1257         struct net_device *netdev = adapter->netdev;
1258         int err;
1259
1260         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1261                 err = ixgbe_request_msix_irqs(adapter);
1262         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1263                 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1264                                   netdev->name, netdev);
1265         } else {
1266                 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1267                                   netdev->name, netdev);
1268         }
1269
1270         if (err)
1271                 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1272
1273         return err;
1274 }
1275
1276 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1277 {
1278         struct net_device *netdev = adapter->netdev;
1279
1280         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1281                 int i, q_vectors;
1282
1283                 q_vectors = adapter->num_msix_vectors;
1284
1285                 i = q_vectors - 1;
1286                 free_irq(adapter->msix_entries[i].vector, netdev);
1287
1288                 i--;
1289                 for (; i >= 0; i--) {
1290                         free_irq(adapter->msix_entries[i].vector,
1291                                  &(adapter->q_vector[i]));
1292                 }
1293
1294                 ixgbe_reset_q_vectors(adapter);
1295         } else {
1296                 free_irq(adapter->pdev->irq, netdev);
1297         }
1298 }
1299
1300 /**
1301  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1302  * @adapter: board private structure
1303  **/
1304 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1305 {
1306         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1307         IXGBE_WRITE_FLUSH(&adapter->hw);
1308         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1309                 int i;
1310                 for (i = 0; i < adapter->num_msix_vectors; i++)
1311                         synchronize_irq(adapter->msix_entries[i].vector);
1312         } else {
1313                 synchronize_irq(adapter->pdev->irq);
1314         }
1315 }
1316
1317 /**
1318  * ixgbe_irq_enable - Enable default interrupt generation settings
1319  * @adapter: board private structure
1320  **/
1321 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1322 {
1323         u32 mask;
1324         mask = IXGBE_EIMS_ENABLE_MASK;
1325         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1326         IXGBE_WRITE_FLUSH(&adapter->hw);
1327 }
1328
1329 /**
1330  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1331  *
1332  **/
1333 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1334 {
1335         struct ixgbe_hw *hw = &adapter->hw;
1336
1337         IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1338                         EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
1339
1340         ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
1341         ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
1342
1343         map_vector_to_rxq(adapter, 0, 0);
1344         map_vector_to_txq(adapter, 0, 0);
1345
1346         DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
1347 }
1348
1349 /**
1350  * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
1351  * @adapter: board private structure
1352  *
1353  * Configure the Tx unit of the MAC after a reset.
1354  **/
1355 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1356 {
1357         u64 tdba;
1358         struct ixgbe_hw *hw = &adapter->hw;
1359         u32 i, j, tdlen, txctrl;
1360
1361         /* Setup the HW Tx Head and Tail descriptor pointers */
1362         for (i = 0; i < adapter->num_tx_queues; i++) {
1363                 j = adapter->tx_ring[i].reg_idx;
1364                 tdba = adapter->tx_ring[i].dma;
1365                 tdlen = adapter->tx_ring[i].count *
1366                         sizeof(union ixgbe_adv_tx_desc);
1367                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1368                                 (tdba & DMA_32BIT_MASK));
1369                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1370                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1371                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1372                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1373                 adapter->tx_ring[i].head = IXGBE_TDH(j);
1374                 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1375                 /* Disable Tx Head Writeback RO bit, since this hoses
1376                  * bookkeeping if things aren't delivered in order.
1377                  */
1378                 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1379                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1380                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
1381         }
1382 }
1383
1384 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1385                         (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1386
1387 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT                 2
1388 /**
1389  * ixgbe_get_skb_hdr - helper function for LRO header processing
1390  * @skb: pointer to sk_buff to be added to LRO packet
1391  * @iphdr: pointer to tcp header structure
1392  * @tcph: pointer to tcp header structure
1393  * @hdr_flags: pointer to header flags
1394  * @priv: private data
1395  **/
1396 static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1397                              u64 *hdr_flags, void *priv)
1398 {
1399         union ixgbe_adv_rx_desc *rx_desc = priv;
1400
1401         /* Verify that this is a valid IPv4 TCP packet */
1402         if (!(rx_desc->wb.lower.lo_dword.pkt_info &
1403             (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
1404                 return -1;
1405
1406         /* Set network headers */
1407         skb_reset_network_header(skb);
1408         skb_set_transport_header(skb, ip_hdrlen(skb));
1409         *iphdr = ip_hdr(skb);
1410         *tcph = tcp_hdr(skb);
1411         *hdr_flags = LRO_IPV4 | LRO_TCP;
1412         return 0;
1413 }
1414
1415 /**
1416  * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
1417  * @adapter: board private structure
1418  *
1419  * Configure the Rx unit of the MAC after a reset.
1420  **/
1421 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1422 {
1423         u64 rdba;
1424         struct ixgbe_hw *hw = &adapter->hw;
1425         struct net_device *netdev = adapter->netdev;
1426         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1427         int i, j;
1428         u32 rdlen, rxctrl, rxcsum;
1429         u32 random[10];
1430         u32 fctrl, hlreg0;
1431         u32 pages;
1432         u32 reta = 0, mrqc, srrctl;
1433
1434         /* Decide whether to use packet split mode or not */
1435         if (netdev->mtu > ETH_DATA_LEN)
1436                 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1437         else
1438                 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1439
1440         /* Set the RX buffer length according to the mode */
1441         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1442                 adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
1443         } else {
1444                 if (netdev->mtu <= ETH_DATA_LEN)
1445                         adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1446                 else
1447                         adapter->rx_buf_len = ALIGN(max_frame, 1024);
1448         }
1449
1450         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1451         fctrl |= IXGBE_FCTRL_BAM;
1452         fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
1453         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1454
1455         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1456         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1457                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1458         else
1459                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1460         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1461
1462         pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1463
1464         srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
1465         srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1466         srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1467
1468         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1469                 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1470                 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1471                 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1472                             IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1473                            IXGBE_SRRCTL_BSIZEHDR_MASK);
1474         } else {
1475                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1476
1477                 if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1478                         srrctl |=
1479                              IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1480                 else
1481                         srrctl |=
1482                              adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1483         }
1484         IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
1485
1486         rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1487         /* disable receives while setting up the descriptors */
1488         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1489         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1490
1491         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1492          * the Base and Length of the Rx Descriptor Ring */
1493         for (i = 0; i < adapter->num_rx_queues; i++) {
1494                 rdba = adapter->rx_ring[i].dma;
1495                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
1496                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
1497                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
1498                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
1499                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
1500                 adapter->rx_ring[i].head = IXGBE_RDH(i);
1501                 adapter->rx_ring[i].tail = IXGBE_RDT(i);
1502         }
1503
1504         /* Intitial LRO Settings */
1505         adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1506         adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1507         adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1508         adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1509         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1510                 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1511         adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1512         adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1513         adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1514
1515         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1516                 /* Fill out redirection table */
1517                 for (i = 0, j = 0; i < 128; i++, j++) {
1518                         if (j == adapter->ring_feature[RING_F_RSS].indices)
1519                                 j = 0;
1520                         /* reta = 4-byte sliding window of
1521                          * 0x00..(indices-1)(indices-1)00..etc. */
1522                         reta = (reta << 8) | (j * 0x11);
1523                         if ((i & 3) == 3)
1524                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1525                 }
1526
1527                 /* Fill out hash function seeds */
1528                 /* XXX use a random constant here to glue certain flows */
1529                 get_random_bytes(&random[0], 40);
1530                 for (i = 0; i < 10; i++)
1531                         IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
1532
1533                 mrqc = IXGBE_MRQC_RSSEN
1534                     /* Perform hash on these packet types */
1535                     | IXGBE_MRQC_RSS_FIELD_IPV4
1536                     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1537                     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1538                     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1539                     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1540                     | IXGBE_MRQC_RSS_FIELD_IPV6
1541                     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1542                     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1543                     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1544                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1545         }
1546
1547         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1548
1549         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1550             adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1551                 /* Disable indicating checksum in descriptor, enables
1552                  * RSS hash */
1553                 rxcsum |= IXGBE_RXCSUM_PCSD;
1554         }
1555         if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1556                 /* Enable IPv4 payload checksum for UDP fragments
1557                  * if PCSD is not set */
1558                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1559         }
1560
1561         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1562 }
1563
1564 static void ixgbe_vlan_rx_register(struct net_device *netdev,
1565                                    struct vlan_group *grp)
1566 {
1567         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1568         u32 ctrl;
1569
1570         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1571                 ixgbe_irq_disable(adapter);
1572         adapter->vlgrp = grp;
1573
1574         if (grp) {
1575                 /* enable VLAN tag insert/strip */
1576                 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1577                 ctrl |= IXGBE_VLNCTRL_VME;
1578                 if (!(netdev->flags & IFF_PROMISC))
1579                         ctrl |= IXGBE_VLNCTRL_VFE;
1580                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1581                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1582         }
1583
1584         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1585                 ixgbe_irq_enable(adapter);
1586 }
1587
1588 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1589 {
1590         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1591
1592         /* add VID to filter table */
1593         ixgbe_set_vfta(&adapter->hw, vid, 0, true);
1594 }
1595
1596 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1597 {
1598         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1599
1600         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1601                 ixgbe_irq_disable(adapter);
1602
1603         vlan_group_set_device(adapter->vlgrp, vid, NULL);
1604
1605         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1606                 ixgbe_irq_enable(adapter);
1607
1608         /* remove VID from filter table */
1609         ixgbe_set_vfta(&adapter->hw, vid, 0, false);
1610 }
1611
1612 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1613 {
1614         ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1615
1616         if (adapter->vlgrp) {
1617                 u16 vid;
1618                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1619                         if (!vlan_group_get_device(adapter->vlgrp, vid))
1620                                 continue;
1621                         ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1622                 }
1623         }
1624 }
1625
1626 /**
1627  * ixgbe_set_multi - Multicast and Promiscuous mode set
1628  * @netdev: network interface device structure
1629  *
1630  * The set_multi entry point is called whenever the multicast address
1631  * list or the network interface flags are updated.  This routine is
1632  * responsible for configuring the hardware for proper multicast,
1633  * promiscuous mode, and all-multi behavior.
1634  **/
1635 static void ixgbe_set_multi(struct net_device *netdev)
1636 {
1637         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1638         struct ixgbe_hw *hw = &adapter->hw;
1639         struct dev_mc_list *mc_ptr;
1640         u8 *mta_list;
1641         u32 fctrl;
1642         int i;
1643
1644         /* Check for Promiscuous and All Multicast modes */
1645
1646         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1647
1648         if (netdev->flags & IFF_PROMISC) {
1649                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1650                 fctrl &= ~IXGBE_VLNCTRL_VFE;
1651         } else {
1652                 if (netdev->flags & IFF_ALLMULTI) {
1653                         fctrl |= IXGBE_FCTRL_MPE;
1654                         fctrl &= ~IXGBE_FCTRL_UPE;
1655                 } else {
1656                         fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1657                 }
1658                 if (adapter->vlgrp)
1659                         fctrl |= IXGBE_VLNCTRL_VFE;
1660         }
1661
1662         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1663
1664         if (netdev->mc_count) {
1665                 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
1666                 if (!mta_list)
1667                         return;
1668
1669                 /* Shared function expects packed array of only addresses. */
1670                 mc_ptr = netdev->mc_list;
1671
1672                 for (i = 0; i < netdev->mc_count; i++) {
1673                         if (!mc_ptr)
1674                                 break;
1675                         memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
1676                                ETH_ALEN);
1677                         mc_ptr = mc_ptr->next;
1678                 }
1679
1680                 ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
1681                 kfree(mta_list);
1682         } else {
1683                 ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
1684         }
1685
1686 }
1687
1688 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1689 {
1690         int q_idx;
1691         struct ixgbe_q_vector *q_vector;
1692         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1693
1694         /* legacy and MSI only use one vector */
1695         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1696                 q_vectors = 1;
1697
1698         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1699                 q_vector = &adapter->q_vector[q_idx];
1700                 if (!q_vector->rxr_count)
1701                         continue;
1702                 napi_enable(&q_vector->napi);
1703         }
1704 }
1705
1706 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1707 {
1708         int q_idx;
1709         struct ixgbe_q_vector *q_vector;
1710         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1711
1712         /* legacy and MSI only use one vector */
1713         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1714                 q_vectors = 1;
1715
1716         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1717                 q_vector = &adapter->q_vector[q_idx];
1718                 if (!q_vector->rxr_count)
1719                         continue;
1720                 napi_disable(&q_vector->napi);
1721         }
1722 }
1723
1724 static void ixgbe_configure(struct ixgbe_adapter *adapter)
1725 {
1726         struct net_device *netdev = adapter->netdev;
1727         int i;
1728
1729         ixgbe_set_multi(netdev);
1730
1731         ixgbe_restore_vlan(adapter);
1732
1733         ixgbe_configure_tx(adapter);
1734         ixgbe_configure_rx(adapter);
1735         for (i = 0; i < adapter->num_rx_queues; i++)
1736                 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1737                                            (adapter->rx_ring[i].count - 1));
1738 }
1739
1740 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1741 {
1742         struct net_device *netdev = adapter->netdev;
1743         struct ixgbe_hw *hw = &adapter->hw;
1744         int i, j = 0;
1745         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1746         u32 txdctl, rxdctl, mhadd;
1747         u32 gpie;
1748
1749         ixgbe_get_hw_control(adapter);
1750
1751         if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1752             (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1753                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1754                         gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1755                                 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1756                 } else {
1757                         /* MSI only */
1758                         gpie = 0;
1759                 }
1760                 /* XXX: to interrupt immediately for EICS writes, enable this */
1761                 /* gpie |= IXGBE_GPIE_EIMEN; */
1762                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1763         }
1764
1765         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1766                 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1767                  * specifically only auto mask tx and rx interrupts */
1768                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1769         }
1770
1771         mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1772         if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1773                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1774                 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
1775
1776                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1777         }
1778
1779         for (i = 0; i < adapter->num_tx_queues; i++) {
1780                 j = adapter->tx_ring[i].reg_idx;
1781                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1782                 txdctl |= IXGBE_TXDCTL_ENABLE;
1783                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1784         }
1785
1786         for (i = 0; i < adapter->num_rx_queues; i++) {
1787                 j = adapter->rx_ring[i].reg_idx;
1788                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1789                 /* enable PTHRESH=32 descriptors (half the internal cache)
1790                  * and HTHRESH=0 descriptors (to minimize latency on fetch),
1791                  * this also removes a pesky rx_no_buffer_count increment */
1792                 rxdctl |= 0x0020;
1793                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1794                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
1795         }
1796         /* enable all receives */
1797         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1798         rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
1799         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
1800
1801         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1802                 ixgbe_configure_msix(adapter);
1803         else
1804                 ixgbe_configure_msi_and_legacy(adapter);
1805
1806         clear_bit(__IXGBE_DOWN, &adapter->state);
1807         ixgbe_napi_enable_all(adapter);
1808
1809         /* clear any pending interrupts, may auto mask */
1810         IXGBE_READ_REG(hw, IXGBE_EICR);
1811
1812         ixgbe_irq_enable(adapter);
1813
1814         /* bring the link up in the watchdog, this could race with our first
1815          * link up interrupt but shouldn't be a problem */
1816         mod_timer(&adapter->watchdog_timer, jiffies);
1817         return 0;
1818 }
1819
1820 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1821 {
1822         WARN_ON(in_interrupt());
1823         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1824                 msleep(1);
1825         ixgbe_down(adapter);
1826         ixgbe_up(adapter);
1827         clear_bit(__IXGBE_RESETTING, &adapter->state);
1828 }
1829
1830 int ixgbe_up(struct ixgbe_adapter *adapter)
1831 {
1832         /* hardware has been reset, we need to reload some things */
1833         ixgbe_configure(adapter);
1834
1835         return ixgbe_up_complete(adapter);
1836 }
1837
1838 void ixgbe_reset(struct ixgbe_adapter *adapter)
1839 {
1840         if (ixgbe_init_hw(&adapter->hw))
1841                 DPRINTK(PROBE, ERR, "Hardware Error\n");
1842
1843         /* reprogram the RAR[0] in case user changed it. */
1844         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1845
1846 }
1847
1848 #ifdef CONFIG_PM
1849 static int ixgbe_resume(struct pci_dev *pdev)
1850 {
1851         struct net_device *netdev = pci_get_drvdata(pdev);
1852         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1853         u32 err;
1854
1855         pci_set_power_state(pdev, PCI_D0);
1856         pci_restore_state(pdev);
1857         err = pci_enable_device(pdev);
1858         if (err) {
1859                 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1860                                 "suspend\n");
1861                 return err;
1862         }
1863         pci_set_master(pdev);
1864
1865         pci_enable_wake(pdev, PCI_D3hot, 0);
1866         pci_enable_wake(pdev, PCI_D3cold, 0);
1867
1868         if (netif_running(netdev)) {
1869                 err = ixgbe_request_irq(adapter);
1870                 if (err)
1871                         return err;
1872         }
1873
1874         ixgbe_reset(adapter);
1875
1876         if (netif_running(netdev))
1877                 ixgbe_up(adapter);
1878
1879         netif_device_attach(netdev);
1880
1881         return 0;
1882 }
1883 #endif
1884
1885 /**
1886  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1887  * @adapter: board private structure
1888  * @rx_ring: ring to free buffers from
1889  **/
1890 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1891                                 struct ixgbe_ring *rx_ring)
1892 {
1893         struct pci_dev *pdev = adapter->pdev;
1894         unsigned long size;
1895         unsigned int i;
1896
1897         /* Free all the Rx ring sk_buffs */
1898
1899         for (i = 0; i < rx_ring->count; i++) {
1900                 struct ixgbe_rx_buffer *rx_buffer_info;
1901
1902                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1903                 if (rx_buffer_info->dma) {
1904                         pci_unmap_single(pdev, rx_buffer_info->dma,
1905                                          adapter->rx_buf_len,
1906                                          PCI_DMA_FROMDEVICE);
1907                         rx_buffer_info->dma = 0;
1908                 }
1909                 if (rx_buffer_info->skb) {
1910                         dev_kfree_skb(rx_buffer_info->skb);
1911                         rx_buffer_info->skb = NULL;
1912                 }
1913                 if (!rx_buffer_info->page)
1914                         continue;
1915                 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
1916                                PCI_DMA_FROMDEVICE);
1917                 rx_buffer_info->page_dma = 0;
1918
1919                 put_page(rx_buffer_info->page);
1920                 rx_buffer_info->page = NULL;
1921         }
1922
1923         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
1924         memset(rx_ring->rx_buffer_info, 0, size);
1925
1926         /* Zero out the descriptor ring */
1927         memset(rx_ring->desc, 0, rx_ring->size);
1928
1929         rx_ring->next_to_clean = 0;
1930         rx_ring->next_to_use = 0;
1931
1932         writel(0, adapter->hw.hw_addr + rx_ring->head);
1933         writel(0, adapter->hw.hw_addr + rx_ring->tail);
1934 }
1935
1936 /**
1937  * ixgbe_clean_tx_ring - Free Tx Buffers
1938  * @adapter: board private structure
1939  * @tx_ring: ring to be cleaned
1940  **/
1941 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1942                                 struct ixgbe_ring *tx_ring)
1943 {
1944         struct ixgbe_tx_buffer *tx_buffer_info;
1945         unsigned long size;
1946         unsigned int i;
1947
1948         /* Free all the Tx ring sk_buffs */
1949
1950         for (i = 0; i < tx_ring->count; i++) {
1951                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1952                 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1953         }
1954
1955         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
1956         memset(tx_ring->tx_buffer_info, 0, size);
1957
1958         /* Zero out the descriptor ring */
1959         memset(tx_ring->desc, 0, tx_ring->size);
1960
1961         tx_ring->next_to_use = 0;
1962         tx_ring->next_to_clean = 0;
1963
1964         writel(0, adapter->hw.hw_addr + tx_ring->head);
1965         writel(0, adapter->hw.hw_addr + tx_ring->tail);
1966 }
1967
1968 /**
1969  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1970  * @adapter: board private structure
1971  **/
1972 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
1973 {
1974         int i;
1975
1976         for (i = 0; i < adapter->num_rx_queues; i++)
1977                 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1978 }
1979
1980 /**
1981  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1982  * @adapter: board private structure
1983  **/
1984 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1985 {
1986         int i;
1987
1988         for (i = 0; i < adapter->num_tx_queues; i++)
1989                 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1990 }
1991
1992 void ixgbe_down(struct ixgbe_adapter *adapter)
1993 {
1994         struct net_device *netdev = adapter->netdev;
1995         u32 rxctrl;
1996
1997         /* signal that we are down to the interrupt handler */
1998         set_bit(__IXGBE_DOWN, &adapter->state);
1999
2000         /* disable receives */
2001         rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
2002         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
2003                         rxctrl & ~IXGBE_RXCTRL_RXEN);
2004
2005         netif_tx_disable(netdev);
2006
2007         /* disable transmits in the hardware */
2008
2009         /* flush both disables */
2010         IXGBE_WRITE_FLUSH(&adapter->hw);
2011         msleep(10);
2012
2013         ixgbe_irq_disable(adapter);
2014
2015         ixgbe_napi_disable_all(adapter);
2016         del_timer_sync(&adapter->watchdog_timer);
2017
2018         netif_carrier_off(netdev);
2019         netif_stop_queue(netdev);
2020
2021         if (!pci_channel_offline(adapter->pdev))
2022                 ixgbe_reset(adapter);
2023         ixgbe_clean_all_tx_rings(adapter);
2024         ixgbe_clean_all_rx_rings(adapter);
2025
2026 }
2027
2028 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
2029 {
2030         struct net_device *netdev = pci_get_drvdata(pdev);
2031         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2032 #ifdef CONFIG_PM
2033         int retval = 0;
2034 #endif
2035
2036         netif_device_detach(netdev);
2037
2038         if (netif_running(netdev)) {
2039                 ixgbe_down(adapter);
2040                 ixgbe_free_irq(adapter);
2041         }
2042
2043 #ifdef CONFIG_PM
2044         retval = pci_save_state(pdev);
2045         if (retval)
2046                 return retval;
2047 #endif
2048
2049         pci_enable_wake(pdev, PCI_D3hot, 0);
2050         pci_enable_wake(pdev, PCI_D3cold, 0);
2051
2052         ixgbe_release_hw_control(adapter);
2053
2054         pci_disable_device(pdev);
2055
2056         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2057
2058         return 0;
2059 }
2060
2061 static void ixgbe_shutdown(struct pci_dev *pdev)
2062 {
2063         ixgbe_suspend(pdev, PMSG_SUSPEND);
2064 }
2065
2066 /**
2067  * ixgbe_poll - NAPI Rx polling callback
2068  * @napi: structure for representing this polling device
2069  * @budget: how many packets driver is allowed to clean
2070  *
2071  * This function is used for legacy and MSI, NAPI mode
2072  **/
2073 static int ixgbe_poll(struct napi_struct *napi, int budget)
2074 {
2075         struct ixgbe_q_vector *q_vector = container_of(napi,
2076                                           struct ixgbe_q_vector, napi);
2077         struct ixgbe_adapter *adapter = q_vector->adapter;
2078         int tx_cleaned = 0, work_done = 0;
2079
2080 #ifdef CONFIG_DCA
2081         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2082                 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2083                 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2084         }
2085 #endif
2086
2087         tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
2088         ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
2089
2090         if (tx_cleaned)
2091                 work_done = budget;
2092
2093         /* If budget not fully consumed, exit the polling mode */
2094         if (work_done < budget) {
2095                 netif_rx_complete(adapter->netdev, napi);
2096                 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
2097                         ixgbe_set_itr(adapter);
2098                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2099                         ixgbe_irq_enable(adapter);
2100         }
2101
2102         return work_done;
2103 }
2104
2105 /**
2106  * ixgbe_tx_timeout - Respond to a Tx Hang
2107  * @netdev: network interface device structure
2108  **/
2109 static void ixgbe_tx_timeout(struct net_device *netdev)
2110 {
2111         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2112
2113         /* Do the reset outside of interrupt context */
2114         schedule_work(&adapter->reset_task);
2115 }
2116
2117 static void ixgbe_reset_task(struct work_struct *work)
2118 {
2119         struct ixgbe_adapter *adapter;
2120         adapter = container_of(work, struct ixgbe_adapter, reset_task);
2121
2122         adapter->tx_timeout_count++;
2123
2124         ixgbe_reinit_locked(adapter);
2125 }
2126
2127 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2128                                        int vectors)
2129 {
2130         int err, vector_threshold;
2131
2132         /* We'll want at least 3 (vector_threshold):
2133          * 1) TxQ[0] Cleanup
2134          * 2) RxQ[0] Cleanup
2135          * 3) Other (Link Status Change, etc.)
2136          * 4) TCP Timer (optional)
2137          */
2138         vector_threshold = MIN_MSIX_COUNT;
2139
2140         /* The more we get, the more we will assign to Tx/Rx Cleanup
2141          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2142          * Right now, we simply care about how many we'll get; we'll
2143          * set them up later while requesting irq's.
2144          */
2145         while (vectors >= vector_threshold) {
2146                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2147                                       vectors);
2148                 if (!err) /* Success in acquiring all requested vectors. */
2149                         break;
2150                 else if (err < 0)
2151                         vectors = 0; /* Nasty failure, quit now */
2152                 else /* err == number of vectors we should try again with */
2153                         vectors = err;
2154         }
2155
2156         if (vectors < vector_threshold) {
2157                 /* Can't allocate enough MSI-X interrupts?  Oh well.
2158                  * This just means we'll go with either a single MSI
2159                  * vector or fall back to legacy interrupts.
2160                  */
2161                 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2162                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2163                 kfree(adapter->msix_entries);
2164                 adapter->msix_entries = NULL;
2165                 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2166                 adapter->num_tx_queues = 1;
2167                 adapter->num_rx_queues = 1;
2168         } else {
2169                 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2170                 adapter->num_msix_vectors = vectors;
2171         }
2172 }
2173
2174 static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2175 {
2176         int nrq, ntq;
2177         int feature_mask = 0, rss_i, rss_m;
2178
2179         /* Number of supported queues */
2180         switch (adapter->hw.mac.type) {
2181         case ixgbe_mac_82598EB:
2182                 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2183                 rss_m = 0;
2184                 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2185
2186                 switch (adapter->flags & feature_mask) {
2187                 case (IXGBE_FLAG_RSS_ENABLED):
2188                         rss_m = 0xF;
2189                         nrq = rss_i;
2190                         ntq = rss_i;
2191                         break;
2192                 case 0:
2193                 default:
2194                         rss_i = 0;
2195                         rss_m = 0;
2196                         nrq = 1;
2197                         ntq = 1;
2198                         break;
2199                 }
2200
2201                 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2202                 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2203                 break;
2204         default:
2205                 nrq = 1;
2206                 ntq = 1;
2207                 break;
2208         }
2209
2210         adapter->num_rx_queues = nrq;
2211         adapter->num_tx_queues = ntq;
2212 }
2213
2214 /**
2215  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2216  * @adapter: board private structure to initialize
2217  *
2218  * Once we know the feature-set enabled for the device, we'll cache
2219  * the register offset the descriptor ring is assigned to.
2220  **/
2221 static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2222 {
2223         /* TODO: Remove all uses of the indices in the cases where multiple
2224          *       features are OR'd together, if the feature set makes sense.
2225          */
2226         int feature_mask = 0, rss_i;
2227         int i, txr_idx, rxr_idx;
2228
2229         /* Number of supported queues */
2230         switch (adapter->hw.mac.type) {
2231         case ixgbe_mac_82598EB:
2232                 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2233                 txr_idx = 0;
2234                 rxr_idx = 0;
2235                 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2236                 switch (adapter->flags & feature_mask) {
2237                 case (IXGBE_FLAG_RSS_ENABLED):
2238                         for (i = 0; i < adapter->num_rx_queues; i++)
2239                                 adapter->rx_ring[i].reg_idx = i;
2240                         for (i = 0; i < adapter->num_tx_queues; i++)
2241                                 adapter->tx_ring[i].reg_idx = i;
2242                         break;
2243                 case 0:
2244                 default:
2245                         break;
2246                 }
2247                 break;
2248         default:
2249                 break;
2250         }
2251 }
2252
2253 /**
2254  * ixgbe_alloc_queues - Allocate memory for all rings
2255  * @adapter: board private structure to initialize
2256  *
2257  * We allocate one ring per queue at run-time since we don't know the
2258  * number of queues at compile-time.  The polling_netdev array is
2259  * intended for Multiqueue, but should work fine with a single queue.
2260  **/
2261 static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2262 {
2263         int i;
2264
2265         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2266                                    sizeof(struct ixgbe_ring), GFP_KERNEL);
2267         if (!adapter->tx_ring)
2268                 goto err_tx_ring_allocation;
2269
2270         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2271                                    sizeof(struct ixgbe_ring), GFP_KERNEL);
2272         if (!adapter->rx_ring)
2273                 goto err_rx_ring_allocation;
2274
2275         for (i = 0; i < adapter->num_tx_queues; i++) {
2276                 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
2277                 adapter->tx_ring[i].queue_index = i;
2278         }
2279         for (i = 0; i < adapter->num_rx_queues; i++) {
2280                 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
2281                 adapter->rx_ring[i].queue_index = i;
2282         }
2283
2284         ixgbe_cache_ring_register(adapter);
2285
2286         return 0;
2287
2288 err_rx_ring_allocation:
2289         kfree(adapter->tx_ring);
2290 err_tx_ring_allocation:
2291         return -ENOMEM;
2292 }
2293
2294 /**
2295  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2296  * @adapter: board private structure to initialize
2297  *
2298  * Attempt to configure the interrupts using the best available
2299  * capabilities of the hardware and the kernel.
2300  **/
2301 static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2302                                                     *adapter)
2303 {
2304         int err = 0;
2305         int vector, v_budget;
2306
2307         /*
2308          * It's easy to be greedy for MSI-X vectors, but it really
2309          * doesn't do us much good if we have a lot more vectors
2310          * than CPU's.  So let's be conservative and only ask for
2311          * (roughly) twice the number of vectors as there are CPU's.
2312          */
2313         v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2314                        (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2315
2316         /*
2317          * At the same time, hardware can only support a maximum of
2318          * MAX_MSIX_COUNT vectors.  With features such as RSS and VMDq,
2319          * we can easily reach upwards of 64 Rx descriptor queues and
2320          * 32 Tx queues.  Thus, we cap it off in those rare cases where
2321          * the cpu count also exceeds our vector limit.
2322          */
2323         v_budget = min(v_budget, MAX_MSIX_COUNT);
2324
2325         /* A failure in MSI-X entry allocation isn't fatal, but it does
2326          * mean we disable MSI-X capabilities of the adapter. */
2327         adapter->msix_entries = kcalloc(v_budget,
2328                                         sizeof(struct msix_entry), GFP_KERNEL);
2329         if (!adapter->msix_entries) {
2330                 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2331                 ixgbe_set_num_queues(adapter);
2332                 kfree(adapter->tx_ring);
2333                 kfree(adapter->rx_ring);
2334                 err = ixgbe_alloc_queues(adapter);
2335                 if (err) {
2336                         DPRINTK(PROBE, ERR, "Unable to allocate memory "
2337                                             "for queues\n");
2338                         goto out;
2339                 }
2340
2341                 goto try_msi;
2342         }
2343
2344         for (vector = 0; vector < v_budget; vector++)
2345                 adapter->msix_entries[vector].entry = vector;
2346
2347         ixgbe_acquire_msix_vectors(adapter, v_budget);
2348
2349         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2350                 goto out;
2351
2352 try_msi:
2353         err = pci_enable_msi(adapter->pdev);
2354         if (!err) {
2355                 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2356         } else {
2357                 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2358                                    "falling back to legacy.  Error: %d\n", err);
2359                 /* reset err */
2360                 err = 0;
2361         }
2362
2363 out:
2364         /* Notify the stack of the (possibly) reduced Tx Queue count. */
2365         adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
2366
2367         return err;
2368 }
2369
2370 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2371 {
2372         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2373                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2374                 pci_disable_msix(adapter->pdev);
2375                 kfree(adapter->msix_entries);
2376                 adapter->msix_entries = NULL;
2377         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2378                 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2379                 pci_disable_msi(adapter->pdev);
2380         }
2381         return;
2382 }
2383
2384 /**
2385  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2386  * @adapter: board private structure to initialize
2387  *
2388  * We determine which interrupt scheme to use based on...
2389  * - Kernel support (MSI, MSI-X)
2390  *   - which can be user-defined (via MODULE_PARAM)
2391  * - Hardware queue count (num_*_queues)
2392  *   - defined by miscellaneous hardware support/features (RSS, etc.)
2393  **/
2394 static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2395 {
2396         int err;
2397
2398         /* Number of supported queues */
2399         ixgbe_set_num_queues(adapter);
2400
2401         err = ixgbe_alloc_queues(adapter);
2402         if (err) {
2403                 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2404                 goto err_alloc_queues;
2405         }
2406
2407         err = ixgbe_set_interrupt_capability(adapter);
2408         if (err) {
2409                 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2410                 goto err_set_interrupt;
2411         }
2412
2413         DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2414                            "Tx Queue count = %u\n",
2415                 (adapter->num_rx_queues > 1) ? "Enabled" :
2416                 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2417
2418         set_bit(__IXGBE_DOWN, &adapter->state);
2419
2420         return 0;
2421
2422 err_set_interrupt:
2423         kfree(adapter->tx_ring);
2424         kfree(adapter->rx_ring);
2425 err_alloc_queues:
2426         return err;
2427 }
2428
2429 /**
2430  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2431  * @adapter: board private structure to initialize
2432  *
2433  * ixgbe_sw_init initializes the Adapter private data structure.
2434  * Fields are initialized based on PCI device information and
2435  * OS network device settings (MTU size).
2436  **/
2437 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2438 {
2439         struct ixgbe_hw *hw = &adapter->hw;
2440         struct pci_dev *pdev = adapter->pdev;
2441         unsigned int rss;
2442
2443         /* Set capability flags */
2444         rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2445         adapter->ring_feature[RING_F_RSS].indices = rss;
2446         adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2447
2448         /* Enable Dynamic interrupt throttling by default */
2449         adapter->rx_eitr = 1;
2450         adapter->tx_eitr = 1;
2451
2452         /* default flow control settings */
2453         hw->fc.original_type = ixgbe_fc_full;
2454         hw->fc.type = ixgbe_fc_full;
2455
2456         /* select 10G link by default */
2457         hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2458         if (hw->mac.ops.reset(hw)) {
2459                 dev_err(&pdev->dev, "HW Init failed\n");
2460                 return -EIO;
2461         }
2462         if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
2463                                          false)) {
2464                 dev_err(&pdev->dev, "Link Speed setup failed\n");
2465                 return -EIO;
2466         }
2467
2468         /* initialize eeprom parameters */
2469         if (ixgbe_init_eeprom(hw)) {
2470                 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2471                 return -EIO;
2472         }
2473
2474         /* enable rx csum by default */
2475         adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2476
2477         set_bit(__IXGBE_DOWN, &adapter->state);
2478
2479         return 0;
2480 }
2481
2482 /**
2483  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2484  * @adapter: board private structure
2485  * @txdr:    tx descriptor ring (for a specific queue) to setup
2486  *
2487  * Return 0 on success, negative on failure
2488  **/
2489 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2490                              struct ixgbe_ring *txdr)
2491 {
2492         struct pci_dev *pdev = adapter->pdev;
2493         int size;
2494
2495         size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
2496         txdr->tx_buffer_info = vmalloc(size);
2497         if (!txdr->tx_buffer_info) {
2498                 DPRINTK(PROBE, ERR,
2499                 "Unable to allocate memory for the transmit descriptor ring\n");
2500                 return -ENOMEM;
2501         }
2502         memset(txdr->tx_buffer_info, 0, size);
2503
2504         /* round up to nearest 4K */
2505         txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
2506         txdr->size = ALIGN(txdr->size, 4096);
2507
2508         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2509         if (!txdr->desc) {
2510                 vfree(txdr->tx_buffer_info);
2511                 DPRINTK(PROBE, ERR,
2512                         "Memory allocation failed for the tx desc ring\n");
2513                 return -ENOMEM;
2514         }
2515
2516         txdr->next_to_use = 0;
2517         txdr->next_to_clean = 0;
2518         txdr->work_limit = txdr->count;
2519
2520         return 0;
2521 }
2522
2523 /**
2524  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2525  * @adapter: board private structure
2526  * @rxdr:    rx descriptor ring (for a specific queue) to setup
2527  *
2528  * Returns 0 on success, negative on failure
2529  **/
2530 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2531                              struct ixgbe_ring *rxdr)
2532 {
2533         struct pci_dev *pdev = adapter->pdev;
2534         int size;
2535
2536         size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2537         rxdr->lro_mgr.lro_arr = vmalloc(size);
2538         if (!rxdr->lro_mgr.lro_arr)
2539                 return -ENOMEM;
2540         memset(rxdr->lro_mgr.lro_arr, 0, size);
2541
2542         size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
2543         rxdr->rx_buffer_info = vmalloc(size);
2544         if (!rxdr->rx_buffer_info) {
2545                 DPRINTK(PROBE, ERR,
2546                         "vmalloc allocation failed for the rx desc ring\n");
2547                 goto alloc_failed;
2548         }
2549         memset(rxdr->rx_buffer_info, 0, size);
2550
2551         /* Round up to nearest 4K */
2552         rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
2553         rxdr->size = ALIGN(rxdr->size, 4096);
2554
2555         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
2556
2557         if (!rxdr->desc) {
2558                 DPRINTK(PROBE, ERR,
2559                         "Memory allocation failed for the rx desc ring\n");
2560                 vfree(rxdr->rx_buffer_info);
2561                 goto alloc_failed;
2562         }
2563
2564         rxdr->next_to_clean = 0;
2565         rxdr->next_to_use = 0;
2566
2567         return 0;
2568
2569 alloc_failed:
2570         vfree(rxdr->lro_mgr.lro_arr);
2571         rxdr->lro_mgr.lro_arr = NULL;
2572         return -ENOMEM;
2573 }
2574
2575 /**
2576  * ixgbe_free_tx_resources - Free Tx Resources per Queue
2577  * @adapter: board private structure
2578  * @tx_ring: Tx descriptor ring for a specific queue
2579  *
2580  * Free all transmit software resources
2581  **/
2582 static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2583                                     struct ixgbe_ring *tx_ring)
2584 {
2585         struct pci_dev *pdev = adapter->pdev;
2586
2587         ixgbe_clean_tx_ring(adapter, tx_ring);
2588
2589         vfree(tx_ring->tx_buffer_info);
2590         tx_ring->tx_buffer_info = NULL;
2591
2592         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2593
2594         tx_ring->desc = NULL;
2595 }
2596
2597 /**
2598  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2599  * @adapter: board private structure
2600  *
2601  * Free all transmit software resources
2602  **/
2603 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2604 {
2605         int i;
2606
2607         for (i = 0; i < adapter->num_tx_queues; i++)
2608                 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
2609 }
2610
2611 /**
2612  * ixgbe_free_rx_resources - Free Rx Resources
2613  * @adapter: board private structure
2614  * @rx_ring: ring to clean the resources from
2615  *
2616  * Free all receive software resources
2617  **/
2618 static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2619                                     struct ixgbe_ring *rx_ring)
2620 {
2621         struct pci_dev *pdev = adapter->pdev;
2622
2623         vfree(rx_ring->lro_mgr.lro_arr);
2624         rx_ring->lro_mgr.lro_arr = NULL;
2625
2626         ixgbe_clean_rx_ring(adapter, rx_ring);
2627
2628         vfree(rx_ring->rx_buffer_info);
2629         rx_ring->rx_buffer_info = NULL;
2630
2631         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2632
2633         rx_ring->desc = NULL;
2634 }
2635
2636 /**
2637  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2638  * @adapter: board private structure
2639  *
2640  * Free all receive software resources
2641  **/
2642 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2643 {
2644         int i;
2645
2646         for (i = 0; i < adapter->num_rx_queues; i++)
2647                 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
2648 }
2649
2650 /**
2651  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2652  * @adapter: board private structure
2653  *
2654  * If this function returns with an error, then it's possible one or
2655  * more of the rings is populated (while the rest are not).  It is the
2656  * callers duty to clean those orphaned rings.
2657  *
2658  * Return 0 on success, negative on failure
2659  **/
2660 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2661 {
2662         int i, err = 0;
2663
2664         for (i = 0; i < adapter->num_tx_queues; i++) {
2665                 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2666                 if (err) {
2667                         DPRINTK(PROBE, ERR,
2668                                 "Allocation for Tx Queue %u failed\n", i);
2669                         break;
2670                 }
2671         }
2672
2673         return err;
2674 }
2675
2676 /**
2677  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2678  * @adapter: board private structure
2679  *
2680  * If this function returns with an error, then it's possible one or
2681  * more of the rings is populated (while the rest are not).  It is the
2682  * callers duty to clean those orphaned rings.
2683  *
2684  * Return 0 on success, negative on failure
2685  **/
2686
2687 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2688 {
2689         int i, err = 0;
2690
2691         for (i = 0; i < adapter->num_rx_queues; i++) {
2692                 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2693                 if (err) {
2694                         DPRINTK(PROBE, ERR,
2695                                 "Allocation for Rx Queue %u failed\n", i);
2696                         break;
2697                 }
2698         }
2699
2700         return err;
2701 }
2702
2703 /**
2704  * ixgbe_change_mtu - Change the Maximum Transfer Unit
2705  * @netdev: network interface device structure
2706  * @new_mtu: new value for maximum frame size
2707  *
2708  * Returns 0 on success, negative on failure
2709  **/
2710 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2711 {
2712         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2713         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2714
2715         if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
2716             (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2717                 return -EINVAL;
2718
2719         DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2720                 netdev->mtu, new_mtu);
2721         /* must set new MTU before calling down or up */
2722         netdev->mtu = new_mtu;
2723
2724         if (netif_running(netdev))
2725                 ixgbe_reinit_locked(adapter);
2726
2727         return 0;
2728 }
2729
2730 /**
2731  * ixgbe_open - Called when a network interface is made active
2732  * @netdev: network interface device structure
2733  *
2734  * Returns 0 on success, negative value on failure
2735  *
2736  * The open entry point is called when a network interface is made
2737  * active by the system (IFF_UP).  At this point all resources needed
2738  * for transmit and receive operations are allocated, the interrupt
2739  * handler is registered with the OS, the watchdog timer is started,
2740  * and the stack is notified that the interface is ready.
2741  **/
2742 static int ixgbe_open(struct net_device *netdev)
2743 {
2744         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2745         int err;
2746
2747         /* disallow open during test */
2748         if (test_bit(__IXGBE_TESTING, &adapter->state))
2749                 return -EBUSY;
2750
2751         /* allocate transmit descriptors */
2752         err = ixgbe_setup_all_tx_resources(adapter);
2753         if (err)
2754                 goto err_setup_tx;
2755
2756         /* allocate receive descriptors */
2757         err = ixgbe_setup_all_rx_resources(adapter);
2758         if (err)
2759                 goto err_setup_rx;
2760
2761         ixgbe_configure(adapter);
2762
2763         err = ixgbe_request_irq(adapter);
2764         if (err)
2765                 goto err_req_irq;
2766
2767         err = ixgbe_up_complete(adapter);
2768         if (err)
2769                 goto err_up;
2770
2771         return 0;
2772
2773 err_up:
2774         ixgbe_release_hw_control(adapter);
2775         ixgbe_free_irq(adapter);
2776 err_req_irq:
2777         ixgbe_free_all_rx_resources(adapter);
2778 err_setup_rx:
2779         ixgbe_free_all_tx_resources(adapter);
2780 err_setup_tx:
2781         ixgbe_reset(adapter);
2782
2783         return err;
2784 }
2785
2786 /**
2787  * ixgbe_close - Disables a network interface
2788  * @netdev: network interface device structure
2789  *
2790  * Returns 0, this is not allowed to fail
2791  *
2792  * The close entry point is called when an interface is de-activated
2793  * by the OS.  The hardware is still under the drivers control, but
2794  * needs to be disabled.  A global MAC reset is issued to stop the
2795  * hardware, and all transmit and receive resources are freed.
2796  **/
2797 static int ixgbe_close(struct net_device *netdev)
2798 {
2799         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2800
2801         ixgbe_down(adapter);
2802         ixgbe_free_irq(adapter);
2803
2804         ixgbe_free_all_tx_resources(adapter);
2805         ixgbe_free_all_rx_resources(adapter);
2806
2807         ixgbe_release_hw_control(adapter);
2808
2809         return 0;
2810 }
2811
2812 /**
2813  * ixgbe_update_stats - Update the board statistics counters.
2814  * @adapter: board private structure
2815  **/
2816 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2817 {
2818         struct ixgbe_hw *hw = &adapter->hw;
2819         u64 total_mpc = 0;
2820         u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
2821
2822         adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2823         for (i = 0; i < 8; i++) {
2824                 /* for packet buffers not used, the register should read 0 */
2825                 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2826                 missed_rx += mpc;
2827                 adapter->stats.mpc[i] += mpc;
2828                 total_mpc += adapter->stats.mpc[i];
2829                 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2830         }
2831         adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2832         /* work around hardware counting issue */
2833         adapter->stats.gprc -= missed_rx;
2834
2835         /* 82598 hardware only has a 32 bit counter in the high register */
2836         adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2837         adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2838         adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2839         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2840         adapter->stats.bprc += bprc;
2841         adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2842         adapter->stats.mprc -= bprc;
2843         adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2844         adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2845         adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2846         adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2847         adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2848         adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2849         adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2850         adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2851         adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2852         adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2853         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2854         adapter->stats.lxontxc += lxon;
2855         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2856         adapter->stats.lxofftxc += lxoff;
2857         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2858         adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2859         adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2860         /*
2861          * 82598 errata - tx of flow control packets is included in tx counters
2862          */
2863         xon_off_tot = lxon + lxoff;
2864         adapter->stats.gptc -= xon_off_tot;
2865         adapter->stats.mptc -= xon_off_tot;
2866         adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
2867         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2868         adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2869         adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2870         adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2871         adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2872         adapter->stats.ptc64 -= xon_off_tot;
2873         adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2874         adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2875         adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2876         adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2877         adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2878         adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2879
2880         /* Fill out the OS statistics structure */
2881         adapter->net_stats.multicast = adapter->stats.mprc;
2882
2883         /* Rx Errors */
2884         adapter->net_stats.rx_errors = adapter->stats.crcerrs +
2885                                                 adapter->stats.rlec;
2886         adapter->net_stats.rx_dropped = 0;
2887         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2888         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2889         adapter->net_stats.rx_missed_errors = total_mpc;
2890 }
2891
2892 /**
2893  * ixgbe_watchdog - Timer Call-back
2894  * @data: pointer to adapter cast into an unsigned long
2895  **/
2896 static void ixgbe_watchdog(unsigned long data)
2897 {
2898         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2899         struct net_device *netdev = adapter->netdev;
2900         bool link_up;
2901         u32 link_speed = 0;
2902         int i;
2903
2904         adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
2905
2906         if (link_up) {
2907                 if (!netif_carrier_ok(netdev)) {
2908                         u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2909                         u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
2910 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2911 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2912                         DPRINTK(LINK, INFO, "NIC Link is Up %s, "
2913                                 "Flow Control: %s\n",
2914                                 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
2915                                  "10 Gbps" :
2916                                  (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
2917                                   "1 Gbps" : "unknown speed")),
2918                                 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
2919                                  (FLOW_RX ? "RX" :
2920                                  (FLOW_TX ? "TX" : "None"))));
2921
2922                         netif_carrier_on(netdev);
2923                         netif_wake_queue(netdev);
2924                         for (i = 0; i < adapter->num_tx_queues; i++)
2925                                 netif_wake_subqueue(netdev, i);
2926                 } else {
2927                         /* Force detection of hung controller */
2928                         adapter->detect_tx_hung = true;
2929                 }
2930         } else {
2931                 if (netif_carrier_ok(netdev)) {
2932                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
2933                         netif_carrier_off(netdev);
2934                         netif_stop_queue(netdev);
2935                 }
2936         }
2937
2938         ixgbe_update_stats(adapter);
2939
2940         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2941                 /* Cause software interrupt to ensure rx rings are cleaned */
2942                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2943                         u32 eics =
2944                          (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
2945                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
2946                 } else {
2947                         /* for legacy and MSI interrupts don't set any bits that
2948                          * are enabled for EIAM, because this operation would
2949                          * set *both* EIMS and EICS for any bit in EIAM */
2950                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
2951                                      (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
2952                 }
2953                 /* Reset the timer */
2954                 mod_timer(&adapter->watchdog_timer,
2955                           round_jiffies(jiffies + 2 * HZ));
2956         }
2957 }
2958
2959 static int ixgbe_tso(struct ixgbe_adapter *adapter,
2960                          struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2961                          u32 tx_flags, u8 *hdr_len)
2962 {
2963         struct ixgbe_adv_tx_context_desc *context_desc;
2964         unsigned int i;
2965         int err;
2966         struct ixgbe_tx_buffer *tx_buffer_info;
2967         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2968         u32 mss_l4len_idx = 0, l4len;
2969
2970         if (skb_is_gso(skb)) {
2971                 if (skb_header_cloned(skb)) {
2972                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2973                         if (err)
2974                                 return err;
2975                 }
2976                 l4len = tcp_hdrlen(skb);
2977                 *hdr_len += l4len;
2978
2979                 if (skb->protocol == htons(ETH_P_IP)) {
2980                         struct iphdr *iph = ip_hdr(skb);
2981                         iph->tot_len = 0;
2982                         iph->check = 0;
2983                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2984                                                                  iph->daddr, 0,
2985                                                                  IPPROTO_TCP,
2986                                                                  0);
2987                         adapter->hw_tso_ctxt++;
2988                 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2989                         ipv6_hdr(skb)->payload_len = 0;
2990                         tcp_hdr(skb)->check =
2991                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2992                                              &ipv6_hdr(skb)->daddr,
2993                                              0, IPPROTO_TCP, 0);
2994                         adapter->hw_tso6_ctxt++;
2995                 }
2996
2997                 i = tx_ring->next_to_use;
2998
2999                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3000                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3001
3002                 /* VLAN MACLEN IPLEN */
3003                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3004                         vlan_macip_lens |=
3005                             (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3006                 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3007                                     IXGBE_ADVTXD_MACLEN_SHIFT);
3008                 *hdr_len += skb_network_offset(skb);
3009                 vlan_macip_lens |=
3010                     (skb_transport_header(skb) - skb_network_header(skb));
3011                 *hdr_len +=
3012                     (skb_transport_header(skb) - skb_network_header(skb));
3013                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3014                 context_desc->seqnum_seed = 0;
3015
3016                 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3017                 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3018                                     IXGBE_ADVTXD_DTYP_CTXT);
3019
3020                 if (skb->protocol == htons(ETH_P_IP))
3021                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3022                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3023                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3024
3025                 /* MSS L4LEN IDX */
3026                 mss_l4len_idx |=
3027                     (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3028                 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3029                 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3030
3031                 tx_buffer_info->time_stamp = jiffies;
3032                 tx_buffer_info->next_to_watch = i;
3033
3034                 i++;
3035                 if (i == tx_ring->count)
3036                         i = 0;
3037                 tx_ring->next_to_use = i;
3038
3039                 return true;
3040         }
3041         return false;
3042 }
3043
3044 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3045                                    struct ixgbe_ring *tx_ring,
3046                                    struct sk_buff *skb, u32 tx_flags)
3047 {
3048         struct ixgbe_adv_tx_context_desc *context_desc;
3049         unsigned int i;
3050         struct ixgbe_tx_buffer *tx_buffer_info;
3051         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3052
3053         if (skb->ip_summed == CHECKSUM_PARTIAL ||
3054             (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
3055                 i = tx_ring->next_to_use;
3056                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3057                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3058
3059                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3060                         vlan_macip_lens |=
3061                             (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3062                 vlan_macip_lens |= (skb_network_offset(skb) <<
3063                                     IXGBE_ADVTXD_MACLEN_SHIFT);
3064                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3065                         vlan_macip_lens |= (skb_transport_header(skb) -
3066                                             skb_network_header(skb));
3067
3068                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3069                 context_desc->seqnum_seed = 0;
3070
3071                 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3072                                     IXGBE_ADVTXD_DTYP_CTXT);
3073
3074                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3075                         switch (skb->protocol) {
3076                         case __constant_htons(ETH_P_IP):
3077                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3078                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3079                                         type_tucmd_mlhl |=
3080                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3081                                 break;
3082
3083                         case __constant_htons(ETH_P_IPV6):
3084                                 /* XXX what about other V6 headers?? */
3085                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3086                                         type_tucmd_mlhl |=
3087                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3088                                 break;
3089
3090                         default:
3091                                 if (unlikely(net_ratelimit())) {
3092                                         DPRINTK(PROBE, WARNING,
3093                                          "partial checksum but proto=%x!\n",
3094                                          skb->protocol);
3095                                 }
3096                                 break;
3097                         }
3098                 }
3099
3100                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3101                 context_desc->mss_l4len_idx = 0;
3102
3103                 tx_buffer_info->time_stamp = jiffies;
3104                 tx_buffer_info->next_to_watch = i;
3105                 adapter->hw_csum_tx_good++;
3106                 i++;
3107                 if (i == tx_ring->count)
3108                         i = 0;
3109                 tx_ring->next_to_use = i;
3110
3111                 return true;
3112         }
3113         return false;
3114 }
3115
3116 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3117                         struct ixgbe_ring *tx_ring,
3118                         struct sk_buff *skb, unsigned int first)
3119 {
3120         struct ixgbe_tx_buffer *tx_buffer_info;
3121         unsigned int len = skb->len;
3122         unsigned int offset = 0, size, count = 0, i;
3123         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3124         unsigned int f;
3125
3126         len -= skb->data_len;
3127
3128         i = tx_ring->next_to_use;
3129
3130         while (len) {
3131                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3132                 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3133
3134                 tx_buffer_info->length = size;
3135                 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3136                                                   skb->data + offset,
3137                                                   size, PCI_DMA_TODEVICE);
3138                 tx_buffer_info->time_stamp = jiffies;
3139                 tx_buffer_info->next_to_watch = i;
3140
3141                 len -= size;
3142                 offset += size;
3143                 count++;
3144                 i++;
3145                 if (i == tx_ring->count)
3146                         i = 0;
3147         }
3148
3149         for (f = 0; f < nr_frags; f++) {
3150                 struct skb_frag_struct *frag;
3151
3152                 frag = &skb_shinfo(skb)->frags[f];
3153                 len = frag->size;
3154                 offset = frag->page_offset;
3155
3156                 while (len) {
3157                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
3158                         size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3159
3160                         tx_buffer_info->length = size;
3161                         tx_buffer_info->dma = pci_map_page(adapter->pdev,
3162                                                         frag->page,
3163                                                         offset,
3164                                                         size, PCI_DMA_TODEVICE);
3165                         tx_buffer_info->time_stamp = jiffies;
3166                         tx_buffer_info->next_to_watch = i;
3167
3168                         len -= size;
3169                         offset += size;
3170                         count++;
3171                         i++;
3172                         if (i == tx_ring->count)
3173                                 i = 0;
3174                 }
3175         }
3176         if (i == 0)
3177                 i = tx_ring->count - 1;
3178         else
3179                 i = i - 1;
3180         tx_ring->tx_buffer_info[i].skb = skb;
3181         tx_ring->tx_buffer_info[first].next_to_watch = i;
3182
3183         return count;
3184 }
3185
3186 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3187                                struct ixgbe_ring *tx_ring,
3188                                int tx_flags, int count, u32 paylen, u8 hdr_len)
3189 {
3190         union ixgbe_adv_tx_desc *tx_desc = NULL;
3191         struct ixgbe_tx_buffer *tx_buffer_info;
3192         u32 olinfo_status = 0, cmd_type_len = 0;
3193         unsigned int i;
3194         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3195
3196         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3197
3198         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3199
3200         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3201                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3202
3203         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3204                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3205
3206                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3207                                                 IXGBE_ADVTXD_POPTS_SHIFT;
3208
3209                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3210                         olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3211                                                 IXGBE_ADVTXD_POPTS_SHIFT;
3212
3213         } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3214                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3215                                                 IXGBE_ADVTXD_POPTS_SHIFT;
3216
3217         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3218
3219         i = tx_ring->next_to_use;
3220         while (count--) {
3221                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3222                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3223                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3224                 tx_desc->read.cmd_type_len =
3225                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3226                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3227
3228                 i++;
3229                 if (i == tx_ring->count)
3230                         i = 0;
3231         }
3232
3233         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3234
3235         /*
3236          * Force memory writes to complete before letting h/w
3237          * know there are new descriptors to fetch.  (Only
3238          * applicable for weak-ordered memory model archs,
3239          * such as IA-64).
3240          */
3241         wmb();
3242
3243         tx_ring->next_to_use = i;
3244         writel(i, adapter->hw.hw_addr + tx_ring->tail);
3245 }
3246
3247 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3248                                  struct ixgbe_ring *tx_ring, int size)
3249 {
3250         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3251
3252         netif_stop_subqueue(netdev, tx_ring->queue_index);
3253         /* Herbert's original patch had:
3254          *  smp_mb__after_netif_stop_queue();
3255          * but since that doesn't exist yet, just open code it. */
3256         smp_mb();
3257
3258         /* We need to check again in a case another CPU has just
3259          * made room available. */
3260         if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3261                 return -EBUSY;
3262
3263         /* A reprieve! - use start_queue because it doesn't call schedule */
3264         netif_wake_subqueue(netdev, tx_ring->queue_index);
3265         ++adapter->restart_queue;
3266         return 0;
3267 }
3268
3269 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3270                                struct ixgbe_ring *tx_ring, int size)
3271 {
3272         if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3273                 return 0;
3274         return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3275 }
3276
3277
3278 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3279 {
3280         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3281         struct ixgbe_ring *tx_ring;
3282         unsigned int len = skb->len;
3283         unsigned int first;
3284         unsigned int tx_flags = 0;
3285         u8 hdr_len = 0;
3286         int r_idx = 0, tso;
3287         unsigned int mss = 0;
3288         int count = 0;
3289         unsigned int f;
3290         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3291         len -= skb->data_len;
3292         r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3293         tx_ring = &adapter->tx_ring[r_idx];
3294
3295
3296         if (skb->len <= 0) {
3297                 dev_kfree_skb(skb);
3298                 return NETDEV_TX_OK;
3299         }
3300         mss = skb_shinfo(skb)->gso_size;
3301
3302         if (mss)
3303                 count++;
3304         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3305                 count++;
3306
3307         count += TXD_USE_COUNT(len);
3308         for (f = 0; f < nr_frags; f++)
3309                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3310
3311         if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
3312                 adapter->tx_busy++;
3313                 return NETDEV_TX_BUSY;
3314         }
3315         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3316                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3317                 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3318         }
3319
3320         if (skb->protocol == htons(ETH_P_IP))
3321                 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3322         first = tx_ring->next_to_use;
3323         tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3324         if (tso < 0) {
3325                 dev_kfree_skb_any(skb);
3326                 return NETDEV_TX_OK;
3327         }
3328
3329         if (tso)
3330                 tx_flags |= IXGBE_TX_FLAGS_TSO;
3331         else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3332                  (skb->ip_summed == CHECKSUM_PARTIAL))
3333                 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3334
3335         ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3336                            ixgbe_tx_map(adapter, tx_ring, skb, first),
3337                            skb->len, hdr_len);
3338
3339         netdev->trans_start = jiffies;
3340
3341         ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3342
3343         return NETDEV_TX_OK;
3344 }
3345
3346 /**
3347  * ixgbe_get_stats - Get System Network Statistics
3348  * @netdev: network interface device structure
3349  *
3350  * Returns the address of the device statistics structure.
3351  * The statistics are actually updated from the timer callback.
3352  **/
3353 static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3354 {
3355         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3356
3357         /* only return the current stats */
3358         return &adapter->net_stats;
3359 }
3360
3361 /**
3362  * ixgbe_set_mac - Change the Ethernet Address of the NIC
3363  * @netdev: network interface device structure
3364  * @p: pointer to an address structure
3365  *
3366  * Returns 0 on success, negative on failure
3367  **/
3368 static int ixgbe_set_mac(struct net_device *netdev, void *p)
3369 {
3370         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3371         struct sockaddr *addr = p;
3372
3373         if (!is_valid_ether_addr(addr->sa_data))
3374                 return -EADDRNOTAVAIL;
3375
3376         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3377         memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3378
3379         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3380
3381         return 0;
3382 }
3383
3384 #ifdef CONFIG_NET_POLL_CONTROLLER
3385 /*
3386  * Polling 'interrupt' - used by things like netconsole to send skbs
3387  * without having to re-enable interrupts. It's not called while
3388  * the interrupt routine is executing.
3389  */
3390 static void ixgbe_netpoll(struct net_device *netdev)
3391 {
3392         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3393
3394         disable_irq(adapter->pdev->irq);
3395         adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
3396         ixgbe_intr(adapter->pdev->irq, netdev);
3397         adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
3398         enable_irq(adapter->pdev->irq);
3399 }
3400 #endif
3401
3402 /**
3403  * ixgbe_napi_add_all - prep napi structs for use
3404  * @adapter: private struct
3405  * helper function to napi_add each possible q_vector->napi
3406  */
3407 static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3408 {
3409         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3410         int (*poll)(struct napi_struct *, int);
3411
3412         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3413                 poll = &ixgbe_clean_rxonly;
3414         } else {
3415                 poll = &ixgbe_poll;
3416                 /* only one q_vector for legacy modes */
3417                 q_vectors = 1;
3418         }
3419
3420         for (i = 0; i < q_vectors; i++) {
3421                 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3422                 netif_napi_add(adapter->netdev, &q_vector->napi,
3423                                (*poll), 64);
3424         }
3425 }
3426
3427 /**
3428  * ixgbe_probe - Device Initialization Routine
3429  * @pdev: PCI device information struct
3430  * @ent: entry in ixgbe_pci_tbl
3431  *
3432  * Returns 0 on success, negative on failure
3433  *
3434  * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3435  * The OS initialization, configuring of the adapter private structure,
3436  * and a hardware reset occur.
3437  **/
3438 static int __devinit ixgbe_probe(struct pci_dev *pdev,
3439                                  const struct pci_device_id *ent)
3440 {
3441         struct net_device *netdev;
3442         struct ixgbe_adapter *adapter = NULL;
3443         struct ixgbe_hw *hw;
3444         const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3445         unsigned long mmio_start, mmio_len;
3446         static int cards_found;
3447         int i, err, pci_using_dac;
3448         u16 link_status, link_speed, link_width;
3449         u32 part_num;
3450
3451         err = pci_enable_device(pdev);
3452         if (err)
3453                 return err;
3454
3455         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
3456             !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
3457                 pci_using_dac = 1;
3458         } else {
3459                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3460                 if (err) {
3461                         err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3462                         if (err) {
3463                                 dev_err(&pdev->dev, "No usable DMA "
3464                                         "configuration, aborting\n");
3465                                 goto err_dma;
3466                         }
3467                 }
3468                 pci_using_dac = 0;
3469         }
3470
3471         err = pci_request_regions(pdev, ixgbe_driver_name);
3472         if (err) {
3473                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3474                 goto err_pci_reg;
3475         }
3476
3477         pci_set_master(pdev);
3478         pci_save_state(pdev);
3479
3480         netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
3481         if (!netdev) {
3482                 err = -ENOMEM;
3483                 goto err_alloc_etherdev;
3484         }
3485
3486         SET_NETDEV_DEV(netdev, &pdev->dev);
3487
3488         pci_set_drvdata(pdev, netdev);
3489         adapter = netdev_priv(netdev);
3490
3491         adapter->netdev = netdev;
3492         adapter->pdev = pdev;
3493         hw = &adapter->hw;
3494         hw->back = adapter;
3495         adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3496
3497         mmio_start = pci_resource_start(pdev, 0);
3498         mmio_len = pci_resource_len(pdev, 0);
3499
3500         hw->hw_addr = ioremap(mmio_start, mmio_len);
3501         if (!hw->hw_addr) {
3502                 err = -EIO;
3503                 goto err_ioremap;
3504         }
3505
3506         for (i = 1; i <= 5; i++) {
3507                 if (pci_resource_len(pdev, i) == 0)
3508                         continue;
3509         }
3510
3511         netdev->open = &ixgbe_open;
3512         netdev->stop = &ixgbe_close;
3513         netdev->hard_start_xmit = &ixgbe_xmit_frame;
3514         netdev->get_stats = &ixgbe_get_stats;
3515         netdev->set_multicast_list = &ixgbe_set_multi;
3516         netdev->set_mac_address = &ixgbe_set_mac;
3517         netdev->change_mtu = &ixgbe_change_mtu;
3518         ixgbe_set_ethtool_ops(netdev);
3519         netdev->tx_timeout = &ixgbe_tx_timeout;
3520         netdev->watchdog_timeo = 5 * HZ;
3521         netdev->vlan_rx_register = ixgbe_vlan_rx_register;
3522         netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
3523         netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
3524 #ifdef CONFIG_NET_POLL_CONTROLLER
3525         netdev->poll_controller = ixgbe_netpoll;
3526 #endif
3527         strcpy(netdev->name, pci_name(pdev));
3528
3529         netdev->mem_start = mmio_start;
3530         netdev->mem_end = mmio_start + mmio_len;
3531
3532         adapter->bd_number = cards_found;
3533
3534         /* PCI config space info */
3535         hw->vendor_id = pdev->vendor;
3536         hw->device_id = pdev->device;
3537         hw->revision_id = pdev->revision;
3538         hw->subsystem_vendor_id = pdev->subsystem_vendor;
3539         hw->subsystem_device_id = pdev->subsystem_device;
3540
3541         /* Setup hw api */
3542         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3543         hw->mac.type  = ii->mac;
3544
3545         err = ii->get_invariants(hw);
3546         if (err)
3547                 goto err_hw_init;
3548
3549         /* setup the private structure */
3550         err = ixgbe_sw_init(adapter);
3551         if (err)
3552                 goto err_sw_init;
3553
3554         netdev->features = NETIF_F_SG |
3555                            NETIF_F_HW_CSUM |
3556                            NETIF_F_HW_VLAN_TX |
3557                            NETIF_F_HW_VLAN_RX |
3558                            NETIF_F_HW_VLAN_FILTER;
3559
3560         netdev->features |= NETIF_F_LRO;
3561         netdev->features |= NETIF_F_TSO;
3562         netdev->features |= NETIF_F_TSO6;
3563
3564         netdev->vlan_features |= NETIF_F_TSO;
3565         netdev->vlan_features |= NETIF_F_TSO6;
3566         netdev->vlan_features |= NETIF_F_HW_CSUM;
3567         netdev->vlan_features |= NETIF_F_SG;
3568
3569         if (pci_using_dac)
3570                 netdev->features |= NETIF_F_HIGHDMA;
3571
3572         netdev->features |= NETIF_F_MULTI_QUEUE;
3573
3574         /* make sure the EEPROM is good */
3575         if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
3576                 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3577                 err = -EIO;
3578                 goto err_eeprom;
3579         }
3580
3581         memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3582         memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3583
3584         if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
3585                 err = -EIO;
3586                 goto err_eeprom;
3587         }
3588
3589         init_timer(&adapter->watchdog_timer);
3590         adapter->watchdog_timer.function = &ixgbe_watchdog;
3591         adapter->watchdog_timer.data = (unsigned long)adapter;
3592
3593         INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3594
3595         /* initialize default flow control settings */
3596         hw->fc.original_type = ixgbe_fc_full;
3597         hw->fc.type = ixgbe_fc_full;
3598         hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3599         hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3600         hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3601
3602         err = ixgbe_init_interrupt_scheme(adapter);
3603         if (err)
3604                 goto err_sw_init;
3605
3606         /* print bus type/speed/width info */
3607         pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
3608         link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3609         link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3610         dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3611                  "%02x:%02x:%02x:%02x:%02x:%02x\n",
3612                 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3613                  (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3614                  "Unknown"),
3615                 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3616                  (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3617                  (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3618                  (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3619                  "Unknown"),
3620                 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3621                 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3622         ixgbe_read_part_num(hw, &part_num);
3623         dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3624                  hw->mac.type, hw->phy.type,
3625                  (part_num >> 8), (part_num & 0xff));
3626
3627         if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3628                 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3629                          "this card is not sufficient for optimal "
3630                          "performance.\n");
3631                 dev_warn(&pdev->dev, "For optimal performance a x8 "
3632                          "PCI-Express slot is required.\n");
3633         }
3634
3635         /* reset the hardware with the new settings */
3636         ixgbe_start_hw(hw);
3637
3638         netif_carrier_off(netdev);
3639         netif_stop_queue(netdev);
3640         for (i = 0; i < adapter->num_tx_queues; i++)
3641                 netif_stop_subqueue(netdev, i);
3642
3643         ixgbe_napi_add_all(adapter);
3644
3645         strcpy(netdev->name, "eth%d");
3646         err = register_netdev(netdev);
3647         if (err)
3648                 goto err_register;
3649
3650 #ifdef CONFIG_DCA
3651         if (dca_add_requester(&pdev->dev) == 0) {
3652                 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3653                 /* always use CB2 mode, difference is masked
3654                  * in the CB driver */
3655                 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
3656                 ixgbe_setup_dca(adapter);
3657         }
3658 #endif
3659
3660         dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
3661         cards_found++;
3662         return 0;
3663
3664 err_register:
3665         ixgbe_release_hw_control(adapter);
3666 err_hw_init:
3667 err_sw_init:
3668         ixgbe_reset_interrupt_capability(adapter);
3669 err_eeprom:
3670         iounmap(hw->hw_addr);
3671 err_ioremap:
3672         free_netdev(netdev);
3673 err_alloc_etherdev:
3674         pci_release_regions(pdev);
3675 err_pci_reg:
3676 err_dma:
3677         pci_disable_device(pdev);
3678         return err;
3679 }
3680
3681 /**
3682  * ixgbe_remove - Device Removal Routine
3683  * @pdev: PCI device information struct
3684  *
3685  * ixgbe_remove is called by the PCI subsystem to alert the driver
3686  * that it should release a PCI device.  The could be caused by a
3687  * Hot-Plug event, or because the driver is going to be removed from
3688  * memory.
3689  **/
3690 static void __devexit ixgbe_remove(struct pci_dev *pdev)
3691 {
3692         struct net_device *netdev = pci_get_drvdata(pdev);
3693         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3694
3695         set_bit(__IXGBE_DOWN, &adapter->state);
3696         del_timer_sync(&adapter->watchdog_timer);
3697
3698         flush_scheduled_work();
3699
3700 #ifdef CONFIG_DCA
3701         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3702                 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3703                 dca_remove_requester(&pdev->dev);
3704                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
3705         }
3706
3707 #endif
3708         unregister_netdev(netdev);
3709
3710         ixgbe_reset_interrupt_capability(adapter);
3711
3712         ixgbe_release_hw_control(adapter);
3713
3714         iounmap(adapter->hw.hw_addr);
3715         pci_release_regions(pdev);
3716
3717         DPRINTK(PROBE, INFO, "complete\n");
3718         kfree(adapter->tx_ring);
3719         kfree(adapter->rx_ring);
3720
3721         free_netdev(netdev);
3722
3723         pci_disable_device(pdev);
3724 }
3725
3726 /**
3727  * ixgbe_io_error_detected - called when PCI error is detected
3728  * @pdev: Pointer to PCI device
3729  * @state: The current pci connection state
3730  *
3731  * This function is called after a PCI bus error affecting
3732  * this device has been detected.
3733  */
3734 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3735                                                 pci_channel_state_t state)
3736 {
3737         struct net_device *netdev = pci_get_drvdata(pdev);
3738         struct ixgbe_adapter *adapter = netdev->priv;
3739
3740         netif_device_detach(netdev);
3741
3742         if (netif_running(netdev))
3743                 ixgbe_down(adapter);
3744         pci_disable_device(pdev);
3745
3746         /* Request a slot slot reset. */
3747         return PCI_ERS_RESULT_NEED_RESET;
3748 }
3749
3750 /**
3751  * ixgbe_io_slot_reset - called after the pci bus has been reset.
3752  * @pdev: Pointer to PCI device
3753  *
3754  * Restart the card from scratch, as if from a cold-boot.
3755  */
3756 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3757 {
3758         struct net_device *netdev = pci_get_drvdata(pdev);
3759         struct ixgbe_adapter *adapter = netdev->priv;
3760
3761         if (pci_enable_device(pdev)) {
3762                 DPRINTK(PROBE, ERR,
3763                         "Cannot re-enable PCI device after reset.\n");
3764                 return PCI_ERS_RESULT_DISCONNECT;
3765         }
3766         pci_set_master(pdev);
3767         pci_restore_state(pdev);
3768
3769         pci_enable_wake(pdev, PCI_D3hot, 0);
3770         pci_enable_wake(pdev, PCI_D3cold, 0);
3771
3772         ixgbe_reset(adapter);
3773
3774         return PCI_ERS_RESULT_RECOVERED;
3775 }
3776
3777 /**
3778  * ixgbe_io_resume - called when traffic can start flowing again.
3779  * @pdev: Pointer to PCI device
3780  *
3781  * This callback is called when the error recovery driver tells us that
3782  * its OK to resume normal operation.
3783  */
3784 static void ixgbe_io_resume(struct pci_dev *pdev)
3785 {
3786         struct net_device *netdev = pci_get_drvdata(pdev);
3787         struct ixgbe_adapter *adapter = netdev->priv;
3788
3789         if (netif_running(netdev)) {
3790                 if (ixgbe_up(adapter)) {
3791                         DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
3792                         return;
3793                 }
3794         }
3795
3796         netif_device_attach(netdev);
3797
3798 }
3799
3800 static struct pci_error_handlers ixgbe_err_handler = {
3801         .error_detected = ixgbe_io_error_detected,
3802         .slot_reset = ixgbe_io_slot_reset,
3803         .resume = ixgbe_io_resume,
3804 };
3805
3806 static struct pci_driver ixgbe_driver = {
3807         .name     = ixgbe_driver_name,
3808         .id_table = ixgbe_pci_tbl,
3809         .probe    = ixgbe_probe,
3810         .remove   = __devexit_p(ixgbe_remove),
3811 #ifdef CONFIG_PM
3812         .suspend  = ixgbe_suspend,
3813         .resume   = ixgbe_resume,
3814 #endif
3815         .shutdown = ixgbe_shutdown,
3816         .err_handler = &ixgbe_err_handler
3817 };
3818
3819 /**
3820  * ixgbe_init_module - Driver Registration Routine
3821  *
3822  * ixgbe_init_module is the first routine called when the driver is
3823  * loaded. All it does is register with the PCI subsystem.
3824  **/
3825 static int __init ixgbe_init_module(void)
3826 {
3827         int ret;
3828         printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
3829                ixgbe_driver_string, ixgbe_driver_version);
3830
3831         printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
3832
3833 #ifdef CONFIG_DCA
3834         dca_register_notify(&dca_notifier);
3835
3836 #endif
3837         ret = pci_register_driver(&ixgbe_driver);
3838         return ret;
3839 }
3840 module_init(ixgbe_init_module);
3841
3842 /**
3843  * ixgbe_exit_module - Driver Exit Cleanup Routine
3844  *
3845  * ixgbe_exit_module is called just before the driver is removed
3846  * from memory.
3847  **/
3848 static void __exit ixgbe_exit_module(void)
3849 {
3850 #ifdef CONFIG_DCA
3851         dca_unregister_notify(&dca_notifier);
3852 #endif
3853         pci_unregister_driver(&ixgbe_driver);
3854 }
3855
3856 #ifdef CONFIG_DCA
3857 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
3858                             void *p)
3859 {
3860         int ret_val;
3861
3862         ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
3863                                          __ixgbe_notify_dca);
3864
3865         return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3866 }
3867 #endif /* CONFIG_DCA */
3868
3869 module_exit(ixgbe_exit_module);
3870
3871 /* ixgbe_main.c */