blob: dc78736d30528185a6082687b7fa2b38b228dc7e [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Shannon Nelson8c47eaa2010-01-13 01:49:34 +00004 Copyright(c) 1999 - 2010 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
Lucy Liu60127862009-07-22 14:07:33 +000037#include <linux/pkt_sched.h>
Auke Kok9a799d72007-09-15 14:07:45 -070038#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Auke Kok9a799d72007-09-15 14:07:45 -070040#include <net/checksum.h>
41#include <net/ip6_checksum.h>
42#include <linux/ethtool.h>
43#include <linux/if_vlan.h>
Yi Zoueacd73f2009-05-13 13:11:06 +000044#include <scsi/fc/fc_fcoe.h>
Auke Kok9a799d72007-09-15 14:07:45 -070045
46#include "ixgbe.h"
47#include "ixgbe_common.h"
Don Skidmoreee5f7842009-11-06 12:56:20 +000048#include "ixgbe_dcb_82599.h"
Greg Rose1cdd1ec2010-01-09 02:26:46 +000049#include "ixgbe_sriov.h"
Auke Kok9a799d72007-09-15 14:07:45 -070050
51char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070052static const char ixgbe_driver_string[] =
Joe Perchese8e9f692010-09-07 21:34:53 +000053 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070054
Don Skidmore99faf682010-07-19 14:00:47 +000055#define DRV_VERSION "2.0.84-k2"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070056const char ixgbe_driver_version[] = DRV_VERSION;
Shannon Nelson8c47eaa2010-01-13 01:49:34 +000057static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070058
59static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070060 [board_82598] = &ixgbe_82598_info,
PJ Waskiewicze8e26352009-02-27 15:45:05 +000061 [board_82599] = &ixgbe_82599_info,
Auke Kok9a799d72007-09-15 14:07:45 -070062};
63
64/* ixgbe_pci_tbl - PCI Device ID Table
65 *
66 * Wildcard entries (PCI_ANY_ID) should come last
67 * Last entry must be all 0s
68 *
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
70 * Class, Class Mask, private data (not used) }
71 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000072static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
Don Skidmore1e336d02009-01-26 20:57:51 -080073 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
74 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070075 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070076 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070077 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070078 board_82598 },
Jesse Brandeburg0befdb32008-10-31 00:46:40 -070079 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
80 board_82598 },
Peter P Waskiewicz Jr3845bec2009-07-16 15:50:52 +000081 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
82 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070083 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070084 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070085 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
86 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080087 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
88 board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
90 board_82598 },
Jesse Brandeburgb95f5fc2008-09-11 19:58:59 -070091 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
92 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080093 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
94 board_82598 },
Don Skidmore2f21bdd2009-02-01 01:18:23 -080095 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
96 board_82598 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000097 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
98 board_82599 },
Peter P Waskiewicz Jr1fcf03e2009-05-17 20:58:04 +000099 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
100 board_82599 },
Don Skidmore74757d42009-12-08 07:22:23 +0000101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
102 board_82599 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
104 board_82599 },
Don Skidmore38ad1c82009-10-08 15:35:58 +0000105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
106 board_82599 },
Don Skidmoredbfec662009-10-02 08:58:25 +0000107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
108 board_82599 },
Peter P Waskiewicz Jr89111842009-09-14 07:47:49 +0000109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 },
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -0700111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 },
Don Skidmore312eb932009-10-02 08:58:04 +0000113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 },
Auke Kok9a799d72007-09-15 14:07:45 -0700115
116 /* required last entry */
117 {0, }
118};
119MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400121#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Joe Perchese8e9f692010-09-07 21:34:53 +0000123 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800124static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL,
127 .priority = 0
128};
129#endif
130
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000131#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs;
133module_param(max_vfs, uint, 0);
Joe Perchese8e9f692010-09-07 21:34:53 +0000134MODULE_PARM_DESC(max_vfs,
135 "Maximum number of virtual functions to allocate per physical function");
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000136#endif /* CONFIG_PCI_IOV */
137
Auke Kok9a799d72007-09-15 14:07:45 -0700138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
139MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_VERSION);
142
143#define DEFAULT_DEBUG_LEVEL_SHIFT 3
144
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000145static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
146{
147 struct ixgbe_hw *hw = &adapter->hw;
148 u32 gcr;
149 u32 gpie;
150 u32 vmdctl;
151
152#ifdef CONFIG_PCI_IOV
153 /* disable iov and allow time for transactions to clear */
154 pci_disable_sriov(adapter->pdev);
155#endif
156
157 /* turn off device IOV mode */
158 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
159 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
160 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
161 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
162 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
163 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
164
165 /* set default pool back to 0 */
166 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
167 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
168 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
169
170 /* take a breather then clean up driver data */
171 msleep(100);
Joe Perchese8e9f692010-09-07 21:34:53 +0000172
173 kfree(adapter->vfinfo);
Greg Rose1cdd1ec2010-01-09 02:26:46 +0000174 adapter->vfinfo = NULL;
175
176 adapter->num_vfs = 0;
177 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
178}
179
Taku Izumidcd79ae2010-04-27 14:39:53 +0000180struct ixgbe_reg_info {
181 u32 ofs;
182 char *name;
183};
184
185static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
186
187 /* General Registers */
188 {IXGBE_CTRL, "CTRL"},
189 {IXGBE_STATUS, "STATUS"},
190 {IXGBE_CTRL_EXT, "CTRL_EXT"},
191
192 /* Interrupt Registers */
193 {IXGBE_EICR, "EICR"},
194
195 /* RX Registers */
196 {IXGBE_SRRCTL(0), "SRRCTL"},
197 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
198 {IXGBE_RDLEN(0), "RDLEN"},
199 {IXGBE_RDH(0), "RDH"},
200 {IXGBE_RDT(0), "RDT"},
201 {IXGBE_RXDCTL(0), "RXDCTL"},
202 {IXGBE_RDBAL(0), "RDBAL"},
203 {IXGBE_RDBAH(0), "RDBAH"},
204
205 /* TX Registers */
206 {IXGBE_TDBAL(0), "TDBAL"},
207 {IXGBE_TDBAH(0), "TDBAH"},
208 {IXGBE_TDLEN(0), "TDLEN"},
209 {IXGBE_TDH(0), "TDH"},
210 {IXGBE_TDT(0), "TDT"},
211 {IXGBE_TXDCTL(0), "TXDCTL"},
212
213 /* List Terminator */
214 {}
215};
216
217
218/*
219 * ixgbe_regdump - register printout routine
220 */
221static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
222{
223 int i = 0, j = 0;
224 char rname[16];
225 u32 regs[64];
226
227 switch (reginfo->ofs) {
228 case IXGBE_SRRCTL(0):
229 for (i = 0; i < 64; i++)
230 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
231 break;
232 case IXGBE_DCA_RXCTRL(0):
233 for (i = 0; i < 64; i++)
234 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
235 break;
236 case IXGBE_RDLEN(0):
237 for (i = 0; i < 64; i++)
238 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
239 break;
240 case IXGBE_RDH(0):
241 for (i = 0; i < 64; i++)
242 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
243 break;
244 case IXGBE_RDT(0):
245 for (i = 0; i < 64; i++)
246 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
247 break;
248 case IXGBE_RXDCTL(0):
249 for (i = 0; i < 64; i++)
250 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
251 break;
252 case IXGBE_RDBAL(0):
253 for (i = 0; i < 64; i++)
254 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
255 break;
256 case IXGBE_RDBAH(0):
257 for (i = 0; i < 64; i++)
258 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
259 break;
260 case IXGBE_TDBAL(0):
261 for (i = 0; i < 64; i++)
262 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
263 break;
264 case IXGBE_TDBAH(0):
265 for (i = 0; i < 64; i++)
266 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
267 break;
268 case IXGBE_TDLEN(0):
269 for (i = 0; i < 64; i++)
270 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
271 break;
272 case IXGBE_TDH(0):
273 for (i = 0; i < 64; i++)
274 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
275 break;
276 case IXGBE_TDT(0):
277 for (i = 0; i < 64; i++)
278 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
279 break;
280 case IXGBE_TXDCTL(0):
281 for (i = 0; i < 64; i++)
282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283 break;
284 default:
Joe Perchesc7689572010-09-07 21:35:17 +0000285 pr_info("%-15s %08x\n", reginfo->name,
Taku Izumidcd79ae2010-04-27 14:39:53 +0000286 IXGBE_READ_REG(hw, reginfo->ofs));
287 return;
288 }
289
290 for (i = 0; i < 8; i++) {
291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
Joe Perchesc7689572010-09-07 21:35:17 +0000292 pr_err("%-15s", rname);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000293 for (j = 0; j < 8; j++)
Joe Perchesc7689572010-09-07 21:35:17 +0000294 pr_cont(" %08x", regs[i*8+j]);
295 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000296 }
297
298}
299
300/*
301 * ixgbe_dump - Print registers, tx-rings and rx-rings
302 */
303static void ixgbe_dump(struct ixgbe_adapter *adapter)
304{
305 struct net_device *netdev = adapter->netdev;
306 struct ixgbe_hw *hw = &adapter->hw;
307 struct ixgbe_reg_info *reginfo;
308 int n = 0;
309 struct ixgbe_ring *tx_ring;
310 struct ixgbe_tx_buffer *tx_buffer_info;
311 union ixgbe_adv_tx_desc *tx_desc;
312 struct my_u0 { u64 a; u64 b; } *u0;
313 struct ixgbe_ring *rx_ring;
314 union ixgbe_adv_rx_desc *rx_desc;
315 struct ixgbe_rx_buffer *rx_buffer_info;
316 u32 staterr;
317 int i = 0;
318
319 if (!netif_msg_hw(adapter))
320 return;
321
322 /* Print netdevice Info */
323 if (netdev) {
324 dev_info(&adapter->pdev->dev, "Net device Info\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000325 pr_info("Device Name state "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000326 "trans_start last_rx\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000327 pr_info("%-15s %016lX %016lX %016lX\n",
328 netdev->name,
329 netdev->state,
330 netdev->trans_start,
331 netdev->last_rx);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000332 }
333
334 /* Print Registers */
335 dev_info(&adapter->pdev->dev, "Register Dump\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000336 pr_info(" Register Name Value\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338 reginfo->name; reginfo++) {
339 ixgbe_regdump(hw, reginfo);
340 }
341
342 /* Print TX Ring Summary */
343 if (!netdev || !netif_running(netdev))
344 goto exit;
345
346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000347 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000348 for (n = 0; n < adapter->num_tx_queues; n++) {
349 tx_ring = adapter->tx_ring[n];
350 tx_buffer_info =
351 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
Joe Perchesc7689572010-09-07 21:35:17 +0000352 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
Taku Izumidcd79ae2010-04-27 14:39:53 +0000353 n, tx_ring->next_to_use, tx_ring->next_to_clean,
354 (u64)tx_buffer_info->dma,
355 tx_buffer_info->length,
356 tx_buffer_info->next_to_watch,
357 (u64)tx_buffer_info->time_stamp);
358 }
359
360 /* Print TX Rings */
361 if (!netif_msg_tx_done(adapter))
362 goto rx_ring_summary;
363
364 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
365
366 /* Transmit Descriptor Formats
367 *
368 * Advanced Transmit Descriptor
369 * +--------------------------------------------------------------+
370 * 0 | Buffer Address [63:0] |
371 * +--------------------------------------------------------------+
372 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
373 * +--------------------------------------------------------------+
374 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
375 */
376
377 for (n = 0; n < adapter->num_tx_queues; n++) {
378 tx_ring = adapter->tx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000379 pr_info("------------------------------------\n");
380 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
381 pr_info("------------------------------------\n");
382 pr_info("T [desc] [address 63:0 ] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000383 "[PlPOIdStDDt Ln] [bi->dma ] "
384 "leng ntw timestamp bi->skb\n");
385
386 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
Alexander Duyck31f05a22010-08-19 13:40:31 +0000387 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000388 tx_buffer_info = &tx_ring->tx_buffer_info[i];
389 u0 = (struct my_u0 *)tx_desc;
Joe Perchesc7689572010-09-07 21:35:17 +0000390 pr_info("T [0x%03X] %016llX %016llX %016llX"
Taku Izumidcd79ae2010-04-27 14:39:53 +0000391 " %04X %3X %016llX %p", i,
392 le64_to_cpu(u0->a),
393 le64_to_cpu(u0->b),
394 (u64)tx_buffer_info->dma,
395 tx_buffer_info->length,
396 tx_buffer_info->next_to_watch,
397 (u64)tx_buffer_info->time_stamp,
398 tx_buffer_info->skb);
399 if (i == tx_ring->next_to_use &&
400 i == tx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000401 pr_cont(" NTC/U\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000402 else if (i == tx_ring->next_to_use)
Joe Perchesc7689572010-09-07 21:35:17 +0000403 pr_cont(" NTU\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000404 else if (i == tx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000405 pr_cont(" NTC\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000406 else
Joe Perchesc7689572010-09-07 21:35:17 +0000407 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000408
409 if (netif_msg_pktdata(adapter) &&
410 tx_buffer_info->dma != 0)
411 print_hex_dump(KERN_INFO, "",
412 DUMP_PREFIX_ADDRESS, 16, 1,
413 phys_to_virt(tx_buffer_info->dma),
414 tx_buffer_info->length, true);
415 }
416 }
417
418 /* Print RX Rings Summary */
419rx_ring_summary:
420 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000421 pr_info("Queue [NTU] [NTC]\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000422 for (n = 0; n < adapter->num_rx_queues; n++) {
423 rx_ring = adapter->rx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000424 pr_info("%5d %5X %5X\n",
425 n, rx_ring->next_to_use, rx_ring->next_to_clean);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000426 }
427
428 /* Print RX Rings */
429 if (!netif_msg_rx_status(adapter))
430 goto exit;
431
432 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
433
434 /* Advanced Receive Descriptor (Read) Format
435 * 63 1 0
436 * +-----------------------------------------------------+
437 * 0 | Packet Buffer Address [63:1] |A0/NSE|
438 * +----------------------------------------------+------+
439 * 8 | Header Buffer Address [63:1] | DD |
440 * +-----------------------------------------------------+
441 *
442 *
443 * Advanced Receive Descriptor (Write-Back) Format
444 *
445 * 63 48 47 32 31 30 21 20 16 15 4 3 0
446 * +------------------------------------------------------+
447 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
448 * | Checksum Ident | | | | Type | Type |
449 * +------------------------------------------------------+
450 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
451 * +------------------------------------------------------+
452 * 63 48 47 32 31 20 19 0
453 */
454 for (n = 0; n < adapter->num_rx_queues; n++) {
455 rx_ring = adapter->rx_ring[n];
Joe Perchesc7689572010-09-07 21:35:17 +0000456 pr_info("------------------------------------\n");
457 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
458 pr_info("------------------------------------\n");
459 pr_info("R [desc] [ PktBuf A0] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000460 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
461 "<-- Adv Rx Read format\n");
Joe Perchesc7689572010-09-07 21:35:17 +0000462 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000463 "[vl er S cks ln] ---------------- [bi->skb] "
464 "<-- Adv Rx Write-Back format\n");
465
466 for (i = 0; i < rx_ring->count; i++) {
467 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +0000468 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
Taku Izumidcd79ae2010-04-27 14:39:53 +0000469 u0 = (struct my_u0 *)rx_desc;
470 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
471 if (staterr & IXGBE_RXD_STAT_DD) {
472 /* Descriptor Done */
Joe Perchesc7689572010-09-07 21:35:17 +0000473 pr_info("RWB[0x%03X] %016llX "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000474 "%016llX ---------------- %p", i,
475 le64_to_cpu(u0->a),
476 le64_to_cpu(u0->b),
477 rx_buffer_info->skb);
478 } else {
Joe Perchesc7689572010-09-07 21:35:17 +0000479 pr_info("R [0x%03X] %016llX "
Taku Izumidcd79ae2010-04-27 14:39:53 +0000480 "%016llX %016llX %p", i,
481 le64_to_cpu(u0->a),
482 le64_to_cpu(u0->b),
483 (u64)rx_buffer_info->dma,
484 rx_buffer_info->skb);
485
486 if (netif_msg_pktdata(adapter)) {
487 print_hex_dump(KERN_INFO, "",
488 DUMP_PREFIX_ADDRESS, 16, 1,
489 phys_to_virt(rx_buffer_info->dma),
490 rx_ring->rx_buf_len, true);
491
492 if (rx_ring->rx_buf_len
493 < IXGBE_RXBUFFER_2048)
494 print_hex_dump(KERN_INFO, "",
495 DUMP_PREFIX_ADDRESS, 16, 1,
496 phys_to_virt(
497 rx_buffer_info->page_dma +
498 rx_buffer_info->page_offset
499 ),
500 PAGE_SIZE/2, true);
501 }
502 }
503
504 if (i == rx_ring->next_to_use)
Joe Perchesc7689572010-09-07 21:35:17 +0000505 pr_cont(" NTU\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000506 else if (i == rx_ring->next_to_clean)
Joe Perchesc7689572010-09-07 21:35:17 +0000507 pr_cont(" NTC\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000508 else
Joe Perchesc7689572010-09-07 21:35:17 +0000509 pr_cont("\n");
Taku Izumidcd79ae2010-04-27 14:39:53 +0000510
511 }
512 }
513
514exit:
515 return;
516}
517
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800518static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
519{
520 u32 ctrl_ext;
521
522 /* Let firmware take over control of h/w */
523 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Joe Perchese8e9f692010-09-07 21:34:53 +0000525 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800526}
527
528static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
529{
530 u32 ctrl_ext;
531
532 /* Let firmware know the driver has taken over */
533 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Joe Perchese8e9f692010-09-07 21:34:53 +0000535 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800536}
Auke Kok9a799d72007-09-15 14:07:45 -0700537
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000538/*
539 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
540 * @adapter: pointer to adapter struct
541 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
542 * @queue: queue to map the corresponding interrupt to
543 * @msix_vector: the vector to map to the corresponding queue
544 *
545 */
546static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
Joe Perchese8e9f692010-09-07 21:34:53 +0000547 u8 queue, u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700548{
549 u32 ivar, index;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000550 struct ixgbe_hw *hw = &adapter->hw;
551 switch (hw->mac.type) {
552 case ixgbe_mac_82598EB:
553 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
554 if (direction == -1)
555 direction = 0;
556 index = (((direction * 64) + queue) >> 2) & 0x1F;
557 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
558 ivar &= ~(0xFF << (8 * (queue & 0x3)));
559 ivar |= (msix_vector << (8 * (queue & 0x3)));
560 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
561 break;
562 case ixgbe_mac_82599EB:
563 if (direction == -1) {
564 /* other causes */
565 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
566 index = ((queue & 1) * 8);
567 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
568 ivar &= ~(0xFF << index);
569 ivar |= (msix_vector << index);
570 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
571 break;
572 } else {
573 /* tx or rx causes */
574 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
575 index = ((16 * (queue & 1)) + (8 * direction));
576 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
577 ivar &= ~(0xFF << index);
578 ivar |= (msix_vector << index);
579 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
580 break;
581 }
582 default:
583 break;
584 }
Auke Kok9a799d72007-09-15 14:07:45 -0700585}
586
Alexander Duyckfe49f042009-06-04 16:00:09 +0000587static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +0000588 u64 qmask)
Alexander Duyckfe49f042009-06-04 16:00:09 +0000589{
590 u32 mask;
591
592 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
593 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595 } else {
596 mask = (qmask & 0xFFFFFFFF);
597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598 mask = (qmask >> 32);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
600 }
601}
602
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
604 struct ixgbe_tx_buffer *tx_buffer_info)
Auke Kok9a799d72007-09-15 14:07:45 -0700605{
Alexander Duycke5a43542009-12-02 16:46:56 +0000606 if (tx_buffer_info->dma) {
607 if (tx_buffer_info->mapped_as_page)
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800608 dma_unmap_page(tx_ring->dev,
Alexander Duycke5a43542009-12-02 16:46:56 +0000609 tx_buffer_info->dma,
610 tx_buffer_info->length,
Nick Nunley1b507732010-04-27 13:10:27 +0000611 DMA_TO_DEVICE);
Alexander Duycke5a43542009-12-02 16:46:56 +0000612 else
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800613 dma_unmap_single(tx_ring->dev,
Alexander Duycke5a43542009-12-02 16:46:56 +0000614 tx_buffer_info->dma,
615 tx_buffer_info->length,
Nick Nunley1b507732010-04-27 13:10:27 +0000616 DMA_TO_DEVICE);
Alexander Duycke5a43542009-12-02 16:46:56 +0000617 tx_buffer_info->dma = 0;
618 }
Auke Kok9a799d72007-09-15 14:07:45 -0700619 if (tx_buffer_info->skb) {
620 dev_kfree_skb_any(tx_buffer_info->skb);
621 tx_buffer_info->skb = NULL;
622 }
Alexander Duyck44df32c2009-03-31 21:34:23 +0000623 tx_buffer_info->time_stamp = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700624 /* tx_buffer_info must be completely set up in the transmit path */
625}
626
Yi Zou26f23d82009-11-06 12:56:00 +0000627/**
John Fastabend7483d9d2010-05-18 16:00:10 +0000628 * ixgbe_tx_xon_state - check the tx ring xon state
Yi Zou26f23d82009-11-06 12:56:00 +0000629 * @adapter: the ixgbe adapter
630 * @tx_ring: the corresponding tx_ring
631 *
632 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
633 * corresponding TC of this tx_ring when checking TFCS.
634 *
John Fastabend7483d9d2010-05-18 16:00:10 +0000635 * Returns : true if in xon state (currently not paused)
Yi Zou26f23d82009-11-06 12:56:00 +0000636 */
John Fastabend7483d9d2010-05-18 16:00:10 +0000637static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +0000638 struct ixgbe_ring *tx_ring)
Yi Zou26f23d82009-11-06 12:56:00 +0000639{
Yi Zou26f23d82009-11-06 12:56:00 +0000640 u32 txoff = IXGBE_TFCS_TXOFF;
641
642#ifdef CONFIG_IXGBE_DCB
John Fastabendca739482010-06-03 17:03:45 +0000643 if (adapter->dcb_cfg.pfc_mode_enable) {
Jaswinder Singh Rajput30b768322009-11-20 04:02:27 +0000644 int tc;
Yi Zou26f23d82009-11-06 12:56:00 +0000645 int reg_idx = tx_ring->reg_idx;
646 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
647
PJ Waskiewicz6837e892010-01-06 17:50:29 +0000648 switch (adapter->hw.mac.type) {
649 case ixgbe_mac_82598EB:
Yi Zou26f23d82009-11-06 12:56:00 +0000650 tc = reg_idx >> 2;
651 txoff = IXGBE_TFCS_TXOFF0;
PJ Waskiewicz6837e892010-01-06 17:50:29 +0000652 break;
653 case ixgbe_mac_82599EB:
Yi Zou26f23d82009-11-06 12:56:00 +0000654 tc = 0;
655 txoff = IXGBE_TFCS_TXOFF;
656 if (dcb_i == 8) {
657 /* TC0, TC1 */
658 tc = reg_idx >> 5;
659 if (tc == 2) /* TC2, TC3 */
660 tc += (reg_idx - 64) >> 4;
661 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
662 tc += 1 + ((reg_idx - 96) >> 3);
663 } else if (dcb_i == 4) {
664 /* TC0, TC1 */
665 tc = reg_idx >> 6;
666 if (tc == 1) {
667 tc += (reg_idx - 64) >> 5;
668 if (tc == 2) /* TC2, TC3 */
669 tc += (reg_idx - 96) >> 4;
670 }
671 }
PJ Waskiewicz6837e892010-01-06 17:50:29 +0000672 break;
673 default:
674 tc = 0;
Yi Zou26f23d82009-11-06 12:56:00 +0000675 }
676 txoff <<= tc;
677 }
678#endif
679 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
680}
681
Auke Kok9a799d72007-09-15 14:07:45 -0700682static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +0000683 struct ixgbe_ring *tx_ring,
684 unsigned int eop)
Auke Kok9a799d72007-09-15 14:07:45 -0700685{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700686 struct ixgbe_hw *hw = &adapter->hw;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700687
Auke Kok9a799d72007-09-15 14:07:45 -0700688 /* Detect a transmit hang in hardware, this serializes the
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700689 * check with the clearing of time_stamp and movement of eop */
Auke Kok9a799d72007-09-15 14:07:45 -0700690 adapter->detect_tx_hung = false;
Alexander Duyck44df32c2009-03-31 21:34:23 +0000691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
Auke Kok9a799d72007-09-15 14:07:45 -0700692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
John Fastabend7483d9d2010-05-18 16:00:10 +0000693 ixgbe_tx_xon_state(adapter, tx_ring)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700694 /* detected Tx unit hang */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700695 union ixgbe_adv_tx_desc *tx_desc;
Alexander Duyck31f05a22010-08-19 13:40:31 +0000696 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
Emil Tantilov396e7992010-07-01 20:05:12 +0000697 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov849c4542010-06-03 16:53:41 +0000698 " Tx Queue <%d>\n"
699 " TDH, TDT <%x>, <%x>\n"
700 " next_to_use <%x>\n"
701 " next_to_clean <%x>\n"
702 "tx_buffer_info[next_to_clean]\n"
703 " time_stamp <%lx>\n"
704 " jiffies <%lx>\n",
705 tx_ring->queue_index,
Alexander Duyck84ea2592010-11-16 19:26:49 -0800706 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
707 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
Emil Tantilov849c4542010-06-03 16:53:41 +0000708 tx_ring->next_to_use, eop,
709 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
Auke Kok9a799d72007-09-15 14:07:45 -0700710 return true;
711 }
712
713 return false;
714}
715
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700716#define IXGBE_MAX_TXD_PWR 14
717#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800718
719/* Tx Descriptors needed, worst case */
720#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
721 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
722#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700723 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800724
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700725static void ixgbe_tx_timeout(struct net_device *netdev);
726
Auke Kok9a799d72007-09-15 14:07:45 -0700727/**
728 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfe49f042009-06-04 16:00:09 +0000729 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700730 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700731 **/
Alexander Duyckfe49f042009-06-04 16:00:09 +0000732static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +0000733 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700734{
Alexander Duyckfe49f042009-06-04 16:00:09 +0000735 struct ixgbe_adapter *adapter = q_vector->adapter;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800736 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
737 struct ixgbe_tx_buffer *tx_buffer_info;
738 unsigned int i, eop, count = 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700739 unsigned int total_bytes = 0, total_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700740
741 i = tx_ring->next_to_clean;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800742 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck31f05a22010-08-19 13:40:31 +0000743 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800744
745 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000746 (count < tx_ring->work_limit)) {
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800747 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +0000748 rmb(); /* read buffer_info after eop_desc */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800749 for ( ; !cleaned; count++) {
Alexander Duyck31f05a22010-08-19 13:40:31 +0000750 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -0700751 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700752
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800753 tx_desc->wb.status = 0;
Alexander Duyck8ad494b2010-11-16 19:26:47 -0800754 cleaned = (i == eop);
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800755
Auke Kok9a799d72007-09-15 14:07:45 -0700756 i++;
757 if (i == tx_ring->count)
758 i = 0;
Alexander Duyck8ad494b2010-11-16 19:26:47 -0800759
760 if (cleaned && tx_buffer_info->skb) {
761 total_bytes += tx_buffer_info->bytecount;
762 total_packets += tx_buffer_info->gso_segs;
763 }
764
Alexander Duyckb6ec8952010-11-16 19:26:49 -0800765 ixgbe_unmap_and_free_tx_resource(tx_ring,
Alexander Duyck8ad494b2010-11-16 19:26:47 -0800766 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700767 }
768
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800769 eop = tx_ring->tx_buffer_info[i].next_to_watch;
Alexander Duyck31f05a22010-08-19 13:40:31 +0000770 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800771 }
772
Auke Kok9a799d72007-09-15 14:07:45 -0700773 tx_ring->next_to_clean = i;
774
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800775#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Alexander Duyckfc77dc32010-11-16 19:26:51 -0800776 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
Joe Perchese8e9f692010-09-07 21:34:53 +0000777 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800778 /* Make sure that anybody stopping the queue after this
779 * sees the new next_to_clean.
780 */
781 smp_mb();
Alexander Duyckfc77dc32010-11-16 19:26:51 -0800782 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800783 !test_bit(__IXGBE_DOWN, &adapter->state)) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -0800784 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
Alexander Duyck5b7da512010-11-16 19:26:50 -0800785 ++tx_ring->tx_stats.restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800786 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800787 }
Auke Kok9a799d72007-09-15 14:07:45 -0700788
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700789 if (adapter->detect_tx_hung) {
790 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
791 /* schedule immediate reset if we believe we hung */
Emil Tantilov396e7992010-07-01 20:05:12 +0000792 e_info(probe, "tx hang %d detected, resetting "
793 "adapter\n", adapter->tx_timeout_count + 1);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700794 ixgbe_tx_timeout(adapter->netdev);
795 }
796 }
Auke Kok9a799d72007-09-15 14:07:45 -0700797
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700798 /* re-arm the interrupt */
Alexander Duyckfe49f042009-06-04 16:00:09 +0000799 if (count >= tx_ring->work_limit)
800 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -0700801
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700802 tx_ring->total_bytes += total_bytes;
803 tx_ring->total_packets += total_packets;
Eric Dumazetde1036b2010-10-20 23:00:04 +0000804 u64_stats_update_begin(&tx_ring->syncp);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700805 tx_ring->stats.packets += total_packets;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800806 tx_ring->stats.bytes += total_bytes;
Eric Dumazetde1036b2010-10-20 23:00:04 +0000807 u64_stats_update_end(&tx_ring->syncp);
Eric Dumazet807540b2010-09-23 05:40:09 +0000808 return count < tx_ring->work_limit;
Auke Kok9a799d72007-09-15 14:07:45 -0700809}
810
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400811#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800812static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800813 struct ixgbe_ring *rx_ring,
814 int cpu)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800815{
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800816 struct ixgbe_hw *hw = &adapter->hw;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800817 u32 rxctrl;
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800818 u8 reg_idx = rx_ring->reg_idx;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800819
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800820 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
821 switch (hw->mac.type) {
822 case ixgbe_mac_82598EB:
823 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
824 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
825 break;
826 case ixgbe_mac_82599EB:
827 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
828 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
829 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
830 break;
831 default:
832 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800833 }
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800834 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
835 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
836 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
837 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
838 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
839 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800840}
841
842static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800843 struct ixgbe_ring *tx_ring,
844 int cpu)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800845{
Don Skidmoreee5f7842009-11-06 12:56:20 +0000846 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800847 u32 txctrl;
848 u8 reg_idx = tx_ring->reg_idx;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800849
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800850 switch (hw->mac.type) {
851 case ixgbe_mac_82598EB:
852 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
853 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
854 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
855 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
856 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
857 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
858 break;
859 case ixgbe_mac_82599EB:
860 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
861 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
862 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
863 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
864 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
865 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
866 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
867 break;
868 default:
869 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800870 }
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800871}
872
873static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
874{
875 struct ixgbe_adapter *adapter = q_vector->adapter;
876 int cpu = get_cpu();
877 long r_idx;
878 int i;
879
880 if (q_vector->cpu == cpu)
881 goto out_no_update;
882
883 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
884 for (i = 0; i < q_vector->txr_count; i++) {
885 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
886 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
887 r_idx + 1);
888 }
889
890 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
891 for (i = 0; i < q_vector->rxr_count; i++) {
892 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
893 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
894 r_idx + 1);
895 }
896
897 q_vector->cpu = cpu;
898out_no_update:
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800899 put_cpu();
900}
901
902static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
903{
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800904 int num_q_vectors;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800905 int i;
906
907 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
908 return;
909
Alexander Duycke35ec122009-05-21 13:07:12 +0000910 /* always use CB2 mode, difference is masked in the CB driver */
911 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
912
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800913 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
914 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
915 else
916 num_q_vectors = 1;
917
918 for (i = 0; i < num_q_vectors; i++) {
919 adapter->q_vector[i]->cpu = -1;
920 ixgbe_update_dca(adapter->q_vector[i]);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800921 }
922}
923
924static int __ixgbe_notify_dca(struct device *dev, void *data)
925{
Alexander Duyckc60fbb02010-11-16 19:26:54 -0800926 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800927 unsigned long event = *(unsigned long *)data;
928
Alexander Duyck33cf09c2010-11-16 19:26:55 -0800929 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
930 return 0;
931
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800932 switch (event) {
933 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700934 /* if we're already enabled, don't do it again */
935 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
936 break;
Denis V. Lunev652f0932008-03-27 14:39:17 +0300937 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700938 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800939 ixgbe_setup_dca(adapter);
940 break;
941 }
942 /* Fall Through since DCA is disabled. */
943 case DCA_PROVIDER_REMOVE:
944 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
945 dca_remove_requester(dev);
946 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
948 }
949 break;
950 }
951
Denis V. Lunev652f0932008-03-27 14:39:17 +0300952 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800953}
954
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400955#endif /* CONFIG_IXGBE_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -0700956/**
957 * ixgbe_receive_skb - Send a completed packet up the stack
958 * @adapter: board private structure
959 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700960 * @status: hardware indication of status of receive
961 * @rx_ring: rx descriptor ring (for a specific queue) to setup
962 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700963 **/
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800964static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +0000965 struct sk_buff *skb, u8 status,
966 struct ixgbe_ring *ring,
967 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700968{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800969 struct ixgbe_adapter *adapter = q_vector->adapter;
970 struct napi_struct *napi = &q_vector->napi;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700971 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
972 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700973
Jesse Grossf62bbb52010-10-20 13:56:10 +0000974 if (is_vlan && (tag & VLAN_VID_MASK))
975 __vlan_hwaccel_put_tag(skb, tag);
976
977 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
978 napi_gro_receive(napi, skb);
979 else
980 netif_rx(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700981}
982
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800983/**
984 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
985 * @adapter: address of board private structure
986 * @status_err: hardware indication of status of receive
987 * @skb: skb currently being received and modified
988 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700989static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Don Skidmore8bae1b22009-07-23 18:00:39 +0000990 union ixgbe_adv_rx_desc *rx_desc,
991 struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700992{
Don Skidmore8bae1b22009-07-23 18:00:39 +0000993 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
994
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700995 skb_checksum_none_assert(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700996
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700997 /* Rx csum disabled */
998 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700999 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001000
1001 /* if IP and error */
1002 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
1003 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -07001004 adapter->hw_csum_rx_error++;
1005 return;
1006 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001007
1008 if (!(status_err & IXGBE_RXD_STAT_L4CS))
1009 return;
1010
1011 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
Don Skidmore8bae1b22009-07-23 18:00:39 +00001012 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1013
1014 /*
1015 * 82599 errata, UDP frames with a 0 checksum can be marked as
1016 * checksum errors.
1017 */
1018 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1019 (adapter->hw.mac.type == ixgbe_mac_82599EB))
1020 return;
1021
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001022 adapter->hw_csum_rx_error++;
1023 return;
1024 }
1025
Auke Kok9a799d72007-09-15 14:07:45 -07001026 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -08001027 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -07001028}
1029
Alexander Duyck84ea2592010-11-16 19:26:49 -08001030static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001031{
1032 /*
1033 * Force memory writes to complete before letting h/w
1034 * know there are new descriptors to fetch. (Only
1035 * applicable for weak-ordered memory model archs,
1036 * such as IA-64).
1037 */
1038 wmb();
Alexander Duyck84ea2592010-11-16 19:26:49 -08001039 writel(val, rx_ring->tail);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001040}
1041
Auke Kok9a799d72007-09-15 14:07:45 -07001042/**
1043 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001044 * @rx_ring: ring to place buffers on
1045 * @cleaned_count: number of buffers to replace
Auke Kok9a799d72007-09-15 14:07:45 -07001046 **/
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001047void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -07001048{
Auke Kok9a799d72007-09-15 14:07:45 -07001049 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001050 struct ixgbe_rx_buffer *bi;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001051 struct sk_buff *skb;
1052 u16 i = rx_ring->next_to_use;
Auke Kok9a799d72007-09-15 14:07:45 -07001053
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001054 /* do nothing if no valid netdev defined */
1055 if (!rx_ring->netdev)
1056 return;
1057
Auke Kok9a799d72007-09-15 14:07:45 -07001058 while (cleaned_count--) {
Alexander Duyck31f05a22010-08-19 13:40:31 +00001059 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001060 bi = &rx_ring->rx_buffer_info[i];
1061 skb = bi->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07001062
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001063 if (!skb) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001064 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001065 rx_ring->rx_buf_len);
Auke Kok9a799d72007-09-15 14:07:45 -07001066 if (!skb) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001067 rx_ring->rx_stats.alloc_rx_buff_failed++;
Auke Kok9a799d72007-09-15 14:07:45 -07001068 goto no_buffers;
1069 }
Alexander Duyckd716a7d2010-08-19 13:33:41 +00001070 /* initialize queue mapping */
1071 skb_record_rx_queue(skb, rx_ring->queue_index);
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001072 bi->skb = skb;
Alexander Duyckd716a7d2010-08-19 13:33:41 +00001073 }
Auke Kok9a799d72007-09-15 14:07:45 -07001074
Alexander Duyckd716a7d2010-08-19 13:33:41 +00001075 if (!bi->dma) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001076 bi->dma = dma_map_single(rx_ring->dev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001077 skb->data,
Joe Perchese8e9f692010-09-07 21:34:53 +00001078 rx_ring->rx_buf_len,
Nick Nunley1b507732010-04-27 13:10:27 +00001079 DMA_FROM_DEVICE);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001080 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001081 rx_ring->rx_stats.alloc_rx_buff_failed++;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001082 bi->dma = 0;
1083 goto no_buffers;
1084 }
Auke Kok9a799d72007-09-15 14:07:45 -07001085 }
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001086
Yi Zou6e455b892009-08-06 13:05:44 +00001087 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001088 if (!bi->page) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001089 bi->page = netdev_alloc_page(rx_ring->netdev);
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001090 if (!bi->page) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001091 rx_ring->rx_stats.alloc_rx_page_failed++;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001092 goto no_buffers;
1093 }
1094 }
1095
1096 if (!bi->page_dma) {
1097 /* use a half page if we're re-using */
1098 bi->page_offset ^= PAGE_SIZE / 2;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001099 bi->page_dma = dma_map_page(rx_ring->dev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001100 bi->page,
1101 bi->page_offset,
1102 PAGE_SIZE / 2,
1103 DMA_FROM_DEVICE);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001104 if (dma_mapping_error(rx_ring->dev,
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001105 bi->page_dma)) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08001106 rx_ring->rx_stats.alloc_rx_page_failed++;
Alexander Duyckd5f398e2010-11-16 19:26:48 -08001107 bi->page_dma = 0;
1108 goto no_buffers;
1109 }
1110 }
1111
1112 /* Refresh the desc even if buffer_addrs didn't change
1113 * because each write-back erases this info. */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001114 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1115 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07001116 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001117 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Alexander Duyck84418e32010-08-19 13:40:54 +00001118 rx_desc->read.hdr_addr = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001119 }
1120
1121 i++;
1122 if (i == rx_ring->count)
1123 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001124 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001125
Auke Kok9a799d72007-09-15 14:07:45 -07001126no_buffers:
1127 if (rx_ring->next_to_use != i) {
1128 rx_ring->next_to_use = i;
Alexander Duyck84ea2592010-11-16 19:26:49 -08001129 ixgbe_release_rx_desc(rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07001130 }
1131}
1132
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001133static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
1134{
1135 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1136}
1137
1138static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1139{
1140 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1141}
1142
Alexander Duyckf8212f92009-04-27 22:42:37 +00001143static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1144{
1145 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
Joe Perchese8e9f692010-09-07 21:34:53 +00001146 IXGBE_RXDADV_RSCCNT_MASK) >>
1147 IXGBE_RXDADV_RSCCNT_SHIFT;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001148}
1149
1150/**
1151 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1152 * @skb: pointer to the last skb in the rsc queue
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00001153 * @count: pointer to number of packets coalesced in this context
Alexander Duyckf8212f92009-04-27 22:42:37 +00001154 *
1155 * This function changes a queue full of hw rsc buffers into a completed
1156 * packet. It uses the ->prev pointers to find the first packet and then
1157 * turns it into the frag list owner.
1158 **/
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00001159static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
Joe Perchese8e9f692010-09-07 21:34:53 +00001160 u64 *count)
Alexander Duyckf8212f92009-04-27 22:42:37 +00001161{
1162 unsigned int frag_list_size = 0;
1163
1164 while (skb->prev) {
1165 struct sk_buff *prev = skb->prev;
1166 frag_list_size += skb->len;
1167 skb->prev = NULL;
1168 skb = prev;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00001169 *count += 1;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001170 }
1171
1172 skb_shinfo(skb)->frag_list = skb->next;
1173 skb->next = NULL;
1174 skb->len += frag_list_size;
1175 skb->data_len += frag_list_size;
1176 skb->truesize += frag_list_size;
1177 return skb;
1178}
1179
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001180struct ixgbe_rsc_cb {
1181 dma_addr_t dma;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001182 bool delay_unmap;
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001183};
1184
1185#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1186
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001187static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00001188 struct ixgbe_ring *rx_ring,
1189 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -07001190{
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001191 struct ixgbe_adapter *adapter = q_vector->adapter;
Auke Kok9a799d72007-09-15 14:07:45 -07001192 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1193 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1194 struct sk_buff *skb;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001195 unsigned int i, rsc_count = 0;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001196 u32 len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001197 u16 hdr_info;
1198 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -07001199 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -08001200 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Yi Zou3d8fd382009-06-08 14:38:44 +00001201#ifdef IXGBE_FCOE
1202 int ddp_bytes = 0;
1203#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07001204
1205 i = rx_ring->next_to_clean;
Alexander Duyck31f05a22010-08-19 13:40:31 +00001206 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07001207 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1208 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -07001209
1210 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001211 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001212 if (*work_done >= work_to_do)
1213 break;
1214 (*work_done)++;
1215
Milton Miller3c945e52010-02-19 17:44:42 +00001216 rmb(); /* read descriptor and rx_buffer_info after status DD */
Yi Zou6e455b892009-08-06 13:05:44 +00001217 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001218 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1219 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07001220 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07001221 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Shannon Nelson0b746e02010-05-18 16:00:03 +00001222 if ((len > IXGBE_RX_HDR_SIZE) ||
1223 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1224 len = IXGBE_RX_HDR_SIZE;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001225 } else {
Auke Kok9a799d72007-09-15 14:07:45 -07001226 len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001227 }
Auke Kok9a799d72007-09-15 14:07:45 -07001228
1229 cleaned = true;
1230 skb = rx_buffer_info->skb;
Jesse Brandeburg7ca3bc52009-12-03 11:33:29 +00001231 prefetch(skb->data);
Auke Kok9a799d72007-09-15 14:07:45 -07001232 rx_buffer_info->skb = NULL;
1233
Alexander Duyck21fa4e62009-06-04 15:59:49 +00001234 if (rx_buffer_info->dma) {
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001235 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
1236 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001237 (!(skb->prev))) {
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001238 /*
1239 * When HWRSC is enabled, delay unmapping
1240 * of the first packet. It carries the
1241 * header information, HW may still
1242 * access the header after the writeback.
1243 * Only unmap it when EOP is reached
1244 */
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001245 IXGBE_RSC_CB(skb)->delay_unmap = true;
Mallikarjuna R Chilakala43634e82010-02-25 23:14:37 +00001246 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001247 } else {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001248 dma_unmap_single(rx_ring->dev,
Joe Perchese8e9f692010-09-07 21:34:53 +00001249 rx_buffer_info->dma,
1250 rx_ring->rx_buf_len,
1251 DMA_FROM_DEVICE);
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001252 }
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00001253 rx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001254 skb_put(skb, len);
1255 }
1256
1257 if (upper_len) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001258 dma_unmap_page(rx_ring->dev,
1259 rx_buffer_info->page_dma,
1260 PAGE_SIZE / 2,
1261 DMA_FROM_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07001262 rx_buffer_info->page_dma = 0;
1263 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Joe Perchese8e9f692010-09-07 21:34:53 +00001264 rx_buffer_info->page,
1265 rx_buffer_info->page_offset,
1266 upper_len);
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07001267
1268 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1269 (page_count(rx_buffer_info->page) != 1))
1270 rx_buffer_info->page = NULL;
1271 else
1272 get_page(rx_buffer_info->page);
Auke Kok9a799d72007-09-15 14:07:45 -07001273
1274 skb->len += upper_len;
1275 skb->data_len += upper_len;
1276 skb->truesize += upper_len;
1277 }
1278
1279 i++;
1280 if (i == rx_ring->count)
1281 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001282
Alexander Duyck31f05a22010-08-19 13:40:31 +00001283 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07001284 prefetch(next_rxd);
Auke Kok9a799d72007-09-15 14:07:45 -07001285 cleaned_count++;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001286
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00001287 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
Alexander Duyckf8212f92009-04-27 22:42:37 +00001288 rsc_count = ixgbe_get_rsc_count(rx_desc);
1289
1290 if (rsc_count) {
1291 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1292 IXGBE_RXDADV_NEXTP_SHIFT;
1293 next_buffer = &rx_ring->rx_buffer_info[nextp];
Alexander Duyckf8212f92009-04-27 22:42:37 +00001294 } else {
1295 next_buffer = &rx_ring->rx_buffer_info[i];
1296 }
1297
Auke Kok9a799d72007-09-15 14:07:45 -07001298 if (staterr & IXGBE_RXD_STAT_EOP) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00001299 if (skb->prev)
Joe Perchese8e9f692010-09-07 21:34:53 +00001300 skb = ixgbe_transform_rsc_queue(skb,
Alexander Duyck5b7da512010-11-16 19:26:50 -08001301 &(rx_ring->rx_stats.rsc_count));
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00001302 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001303 if (IXGBE_RSC_CB(skb)->delay_unmap) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08001304 dma_unmap_single(rx_ring->dev,
Nick Nunley1b507732010-04-27 13:10:27 +00001305 IXGBE_RSC_CB(skb)->dma,
Joe Perchese8e9f692010-09-07 21:34:53 +00001306 rx_ring->rx_buf_len,
Nick Nunley1b507732010-04-27 13:10:27 +00001307 DMA_FROM_DEVICE);
Mallikarjuna R Chilakalafd3686a2010-03-19 04:41:33 +00001308 IXGBE_RSC_CB(skb)->dma = 0;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00001309 IXGBE_RSC_CB(skb)->delay_unmap = false;
Mallikarjuna R Chilakalafd3686a2010-03-19 04:41:33 +00001310 }
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00001311 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
Alexander Duyck5b7da512010-11-16 19:26:50 -08001312 rx_ring->rx_stats.rsc_count +=
1313 skb_shinfo(skb)->nr_frags;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00001314 else
Alexander Duyck5b7da512010-11-16 19:26:50 -08001315 rx_ring->rx_stats.rsc_count++;
1316 rx_ring->rx_stats.rsc_flush++;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00001317 }
Eric Dumazetde1036b2010-10-20 23:00:04 +00001318 u64_stats_update_begin(&rx_ring->syncp);
Auke Kok9a799d72007-09-15 14:07:45 -07001319 rx_ring->stats.packets++;
1320 rx_ring->stats.bytes += skb->len;
Eric Dumazetde1036b2010-10-20 23:00:04 +00001321 u64_stats_update_end(&rx_ring->syncp);
Auke Kok9a799d72007-09-15 14:07:45 -07001322 } else {
Yi Zou6e455b892009-08-06 13:05:44 +00001323 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00001324 rx_buffer_info->skb = next_buffer->skb;
1325 rx_buffer_info->dma = next_buffer->dma;
1326 next_buffer->skb = skb;
1327 next_buffer->dma = 0;
1328 } else {
1329 skb->next = next_buffer->skb;
1330 skb->next->prev = skb;
1331 }
Alexander Duyck5b7da512010-11-16 19:26:50 -08001332 rx_ring->rx_stats.non_eop_descs++;
Auke Kok9a799d72007-09-15 14:07:45 -07001333 goto next_desc;
1334 }
1335
1336 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1337 dev_kfree_skb_irq(skb);
1338 goto next_desc;
1339 }
1340
Don Skidmore8bae1b22009-07-23 18:00:39 +00001341 ixgbe_rx_checksum(adapter, rx_desc, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -08001342
1343 /* probably a little skewed due to removing CRC */
1344 total_rx_bytes += skb->len;
1345 total_rx_packets++;
1346
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001347 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Yi Zou332d4a72009-05-13 13:11:53 +00001348#ifdef IXGBE_FCOE
1349 /* if ddp, not passing to ULD unless for FCP_RSP or error */
Yi Zou3d8fd382009-06-08 14:38:44 +00001350 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1351 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1352 if (!ddp_bytes)
Yi Zou332d4a72009-05-13 13:11:53 +00001353 goto next_desc;
Yi Zou3d8fd382009-06-08 14:38:44 +00001354 }
Yi Zou332d4a72009-05-13 13:11:53 +00001355#endif /* IXGBE_FCOE */
Alexander Duyckfdaff1c2009-05-06 10:43:47 +00001356 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -07001357
1358next_desc:
1359 rx_desc->wb.upper.status_error = 0;
1360
1361 /* return some buffers to hardware, one at a time is too slow */
1362 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001363 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9a799d72007-09-15 14:07:45 -07001364 cleaned_count = 0;
1365 }
1366
1367 /* use prefetched values */
1368 rx_desc = next_rxd;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001369 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -07001370
1371 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001372 }
1373
Auke Kok9a799d72007-09-15 14:07:45 -07001374 rx_ring->next_to_clean = i;
1375 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1376
1377 if (cleaned_count)
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001378 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
Auke Kok9a799d72007-09-15 14:07:45 -07001379
Yi Zou3d8fd382009-06-08 14:38:44 +00001380#ifdef IXGBE_FCOE
1381 /* include DDPed FCoE data */
1382 if (ddp_bytes > 0) {
1383 unsigned int mss;
1384
Alexander Duyckfc77dc32010-11-16 19:26:51 -08001385 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
Yi Zou3d8fd382009-06-08 14:38:44 +00001386 sizeof(struct fc_frame_header) -
1387 sizeof(struct fcoe_crc_eof);
1388 if (mss > 512)
1389 mss &= ~511;
1390 total_rx_bytes += ddp_bytes;
1391 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1392 }
1393#endif /* IXGBE_FCOE */
1394
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001395 rx_ring->total_packets += total_rx_packets;
1396 rx_ring->total_bytes += total_rx_bytes;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001397
Auke Kok9a799d72007-09-15 14:07:45 -07001398 return cleaned;
1399}
1400
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001401static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -07001402/**
1403 * ixgbe_configure_msix - Configure MSI-X hardware
1404 * @adapter: board private structure
1405 *
1406 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1407 * interrupts.
1408 **/
1409static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1410{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001411 struct ixgbe_q_vector *q_vector;
1412 int i, j, q_vectors, v_idx, r_idx;
1413 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -07001414
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001415 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1416
Jesse Brandeburg4df10462009-03-13 22:15:31 +00001417 /*
1418 * Populate the IVAR table and set the ITR values to the
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001419 * corresponding register.
1420 */
1421 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001422 q_vector = adapter->q_vector[v_idx];
Akinobu Mita984b3f52010-03-05 13:41:37 -08001423 /* XXX for_each_set_bit(...) */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001424 r_idx = find_first_bit(q_vector->rxr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001425 adapter->num_rx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001426
1427 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001428 j = adapter->rx_ring[r_idx]->reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001429 ixgbe_set_ivar(adapter, 0, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001430 r_idx = find_next_bit(q_vector->rxr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001431 adapter->num_rx_queues,
1432 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001433 }
1434 r_idx = find_first_bit(q_vector->txr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001435 adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001436
1437 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001438 j = adapter->tx_ring[r_idx]->reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001439 ixgbe_set_ivar(adapter, 1, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001440 r_idx = find_next_bit(q_vector->txr_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00001441 adapter->num_tx_queues,
1442 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001443 }
1444
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001445 if (q_vector->txr_count && !q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001446 /* tx only */
1447 q_vector->eitr = adapter->tx_eitr_param;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001448 else if (q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001449 /* rx or mixed */
1450 q_vector->eitr = adapter->rx_eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001451
Alexander Duyckfe49f042009-06-04 16:00:09 +00001452 ixgbe_write_eitr(q_vector);
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00001453 /* If Flow Director is enabled, set interrupt affinity */
1454 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1455 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1456 /*
1457 * Allocate the affinity_hint cpumask, assign the mask
1458 * for this vector, and set our affinity_hint for
1459 * this irq.
1460 */
1461 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1462 GFP_KERNEL))
1463 return;
1464 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1465 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1466 q_vector->affinity_mask);
1467 }
Auke Kok9a799d72007-09-15 14:07:45 -07001468 }
1469
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001470 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1471 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
Joe Perchese8e9f692010-09-07 21:34:53 +00001472 v_idx);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001473 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1474 ixgbe_set_ivar(adapter, -1, 1, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001475 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -07001476
Jesse Brandeburg41fb9242008-09-11 19:55:58 -07001477 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001478 mask = IXGBE_EIMS_ENABLE_MASK;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00001479 if (adapter->num_vfs)
1480 mask &= ~(IXGBE_EIMS_OTHER |
1481 IXGBE_EIMS_MAILBOX |
1482 IXGBE_EIMS_LSC);
1483 else
1484 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001485 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -07001486}
1487
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001488enum latency_range {
1489 lowest_latency = 0,
1490 low_latency = 1,
1491 bulk_latency = 2,
1492 latency_invalid = 255
1493};
1494
1495/**
1496 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1497 * @adapter: pointer to adapter
1498 * @eitr: eitr setting (ints per sec) to give last timeslice
1499 * @itr_setting: current throttle rate in ints/second
1500 * @packets: the number of packets during this measurement interval
1501 * @bytes: the number of bytes during this measurement interval
1502 *
1503 * Stores a new ITR value based on packets and byte
1504 * counts during the last interrupt. The advantage of per interrupt
1505 * computation is faster updates and more accurate ITR for the current
1506 * traffic pattern. Constants in this function were computed
1507 * based on theoretical maximum wire speed and thresholds were set based
1508 * on testing data as well as attempting to minimize response time
1509 * while increasing bulk throughput.
1510 * this functionality is controlled by the InterruptThrottleRate module
1511 * parameter (see ixgbe_param.c)
1512 **/
1513static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00001514 u32 eitr, u8 itr_setting,
1515 int packets, int bytes)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001516{
1517 unsigned int retval = itr_setting;
1518 u32 timepassed_us;
1519 u64 bytes_perint;
1520
1521 if (packets == 0)
1522 goto update_itr_done;
1523
1524
1525 /* simple throttlerate management
1526 * 0-20MB/s lowest (100000 ints/s)
1527 * 20-100MB/s low (20000 ints/s)
1528 * 100-1249MB/s bulk (8000 ints/s)
1529 */
1530 /* what was last interrupt timeslice? */
1531 timepassed_us = 1000000/eitr;
1532 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1533
1534 switch (itr_setting) {
1535 case lowest_latency:
1536 if (bytes_perint > adapter->eitr_low)
1537 retval = low_latency;
1538 break;
1539 case low_latency:
1540 if (bytes_perint > adapter->eitr_high)
1541 retval = bulk_latency;
1542 else if (bytes_perint <= adapter->eitr_low)
1543 retval = lowest_latency;
1544 break;
1545 case bulk_latency:
1546 if (bytes_perint <= adapter->eitr_high)
1547 retval = low_latency;
1548 break;
1549 }
1550
1551update_itr_done:
1552 return retval;
1553}
1554
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001555/**
1556 * ixgbe_write_eitr - write EITR register in hardware specific way
Alexander Duyckfe49f042009-06-04 16:00:09 +00001557 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001558 *
1559 * This function is made to be called by ethtool and by the driver
1560 * when it needs to update EITR registers at runtime. Hardware
1561 * specific quirks/differences are taken care of here.
1562 */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001563void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001564{
Alexander Duyckfe49f042009-06-04 16:00:09 +00001565 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001566 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001567 int v_idx = q_vector->v_idx;
1568 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1569
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001570 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1571 /* must write high and low 16 bits to reset counter */
1572 itr_reg |= (itr_reg << 16);
1573 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1574 /*
Jesse Brandeburgf8d1dca2010-04-27 01:37:20 +00001575 * 82599 can support a value of zero, so allow it for
1576 * max interrupt rate, but there is an errata where it can
1577 * not be zero with RSC
1578 */
1579 if (itr_reg == 8 &&
1580 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1581 itr_reg = 0;
1582
1583 /*
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001584 * set the WDIS bit to not clear the timer bits and cause an
1585 * immediate assertion of the interrupt
1586 */
1587 itr_reg |= IXGBE_EITR_CNT_WDIS;
1588 }
1589 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1590}
1591
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001592static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1593{
1594 struct ixgbe_adapter *adapter = q_vector->adapter;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001595 u32 new_itr;
1596 u8 current_itr, ret_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001597 int i, r_idx;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001598 struct ixgbe_ring *rx_ring, *tx_ring;
1599
1600 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1601 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001602 tx_ring = adapter->tx_ring[r_idx];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001603 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Joe Perchese8e9f692010-09-07 21:34:53 +00001604 q_vector->tx_itr,
1605 tx_ring->total_packets,
1606 tx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001607 /* if the result for this queue would decrease interrupt
1608 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001609 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
Joe Perchese8e9f692010-09-07 21:34:53 +00001610 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001611 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001612 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001613 }
1614
1615 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1616 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001617 rx_ring = adapter->rx_ring[r_idx];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001618 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Joe Perchese8e9f692010-09-07 21:34:53 +00001619 q_vector->rx_itr,
1620 rx_ring->total_packets,
1621 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001622 /* if the result for this queue would decrease interrupt
1623 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001624 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
Joe Perchese8e9f692010-09-07 21:34:53 +00001625 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001626 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001627 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001628 }
1629
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001630 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001631
1632 switch (current_itr) {
1633 /* counts and packets in update_itr are dependent on these numbers */
1634 case lowest_latency:
1635 new_itr = 100000;
1636 break;
1637 case low_latency:
1638 new_itr = 20000; /* aka hwitr = ~200 */
1639 break;
1640 case bulk_latency:
1641 default:
1642 new_itr = 8000;
1643 break;
1644 }
1645
1646 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001647 /* do an exponential smoothing */
1648 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001649
1650 /* save the algorithm value here, not the smoothed one */
1651 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001652
1653 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001654 }
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001655}
1656
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001657/**
1658 * ixgbe_check_overtemp_task - worker thread to check over tempurature
1659 * @work: pointer to work_struct containing our data
1660 **/
1661static void ixgbe_check_overtemp_task(struct work_struct *work)
1662{
1663 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00001664 struct ixgbe_adapter,
1665 check_overtemp_task);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001666 struct ixgbe_hw *hw = &adapter->hw;
1667 u32 eicr = adapter->interrupt_event;
1668
Joe Perches7ca647b2010-09-07 21:35:40 +00001669 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1670 return;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001671
Joe Perches7ca647b2010-09-07 21:35:40 +00001672 switch (hw->device_id) {
1673 case IXGBE_DEV_ID_82599_T3_LOM: {
1674 u32 autoneg;
1675 bool link_up = false;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001676
Joe Perches7ca647b2010-09-07 21:35:40 +00001677 if (hw->mac.ops.check_link)
1678 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1679
1680 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1681 (eicr & IXGBE_EICR_LSC))
1682 /* Check if this is due to overtemp */
1683 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1684 break;
1685 return;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001686 }
Joe Perches7ca647b2010-09-07 21:35:40 +00001687 default:
1688 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1689 return;
1690 break;
1691 }
1692 e_crit(drv,
1693 "Network adapter has been stopped because it has over heated. "
1694 "Restart the computer. If the problem persists, "
1695 "power off the system and replace the adapter\n");
1696 /* write to clear the interrupt */
1697 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001698}
1699
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001700static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1701{
1702 struct ixgbe_hw *hw = &adapter->hw;
1703
1704 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1705 (eicr & IXGBE_EICR_GPI_SDP1)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00001706 e_crit(probe, "Fan has stopped, replace the adapter\n");
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001707 /* write to clear the interrupt */
1708 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1709 }
1710}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001711
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001712static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1713{
1714 struct ixgbe_hw *hw = &adapter->hw;
1715
1716 if (eicr & IXGBE_EICR_GPI_SDP1) {
1717 /* Clear the interrupt */
1718 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1719 schedule_work(&adapter->multispeed_fiber_task);
1720 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1721 /* Clear the interrupt */
1722 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1723 schedule_work(&adapter->sfp_config_module_task);
1724 } else {
1725 /* Interrupt isn't for us... */
1726 return;
1727 }
1728}
1729
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001730static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1731{
1732 struct ixgbe_hw *hw = &adapter->hw;
1733
1734 adapter->lsc_int++;
1735 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1736 adapter->link_check_timeout = jiffies;
1737 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1738 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
Nelson, Shannon8a0717f2009-11-12 18:47:11 +00001739 IXGBE_WRITE_FLUSH(hw);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001740 schedule_work(&adapter->watchdog_task);
1741 }
1742}
1743
Auke Kok9a799d72007-09-15 14:07:45 -07001744static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1745{
1746 struct net_device *netdev = data;
1747 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1748 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore54037502009-02-21 15:42:56 -08001749 u32 eicr;
1750
1751 /*
1752 * Workaround for Silicon errata. Use clear-by-write instead
1753 * of clear-by-read. Reading with EICS will return the
1754 * interrupt causes without clearing, which later be done
1755 * with the write to EICR.
1756 */
1757 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1758 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
Auke Kok9a799d72007-09-15 14:07:45 -07001759
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001760 if (eicr & IXGBE_EICR_LSC)
1761 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001762
Greg Rose1cdd1ec2010-01-09 02:26:46 +00001763 if (eicr & IXGBE_EICR_MAILBOX)
1764 ixgbe_msg_task(adapter);
1765
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001766 if (hw->mac.type == ixgbe_mac_82598EB)
1767 ixgbe_check_fan_failure(adapter, eicr);
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001768
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001769 if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001770 ixgbe_check_sfp_event(adapter, eicr);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07001771 adapter->interrupt_event = eicr;
1772 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1773 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1774 schedule_work(&adapter->check_overtemp_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001775
1776 /* Handle Flow Director Full threshold interrupt */
1777 if (eicr & IXGBE_EICR_FLOW_DIR) {
1778 int i;
1779 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1780 /* Disable transmits before FDIR Re-initialization */
1781 netif_tx_stop_all_queues(netdev);
1782 for (i = 0; i < adapter->num_tx_queues; i++) {
1783 struct ixgbe_ring *tx_ring =
Joe Perchese8e9f692010-09-07 21:34:53 +00001784 adapter->tx_ring[i];
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001785 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
Joe Perchese8e9f692010-09-07 21:34:53 +00001786 &tx_ring->reinit_state))
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001787 schedule_work(&adapter->fdir_reinit_task);
1788 }
1789 }
1790 }
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001791 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1792 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -07001793
1794 return IRQ_HANDLED;
1795}
1796
Alexander Duyckfe49f042009-06-04 16:00:09 +00001797static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1798 u64 qmask)
1799{
1800 u32 mask;
1801
1802 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1803 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1804 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1805 } else {
1806 mask = (qmask & 0xFFFFFFFF);
1807 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1808 mask = (qmask >> 32);
1809 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1810 }
1811 /* skip the flush */
1812}
1813
1814static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00001815 u64 qmask)
Alexander Duyckfe49f042009-06-04 16:00:09 +00001816{
1817 u32 mask;
1818
1819 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1820 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1821 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1822 } else {
1823 mask = (qmask & 0xFFFFFFFF);
1824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1825 mask = (qmask >> 32);
1826 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1827 }
1828 /* skip the flush */
1829}
1830
Auke Kok9a799d72007-09-15 14:07:45 -07001831static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1832{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001833 struct ixgbe_q_vector *q_vector = data;
1834 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001835 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001836 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001837
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001838 if (!q_vector->txr_count)
1839 return IRQ_HANDLED;
1840
1841 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1842 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001843 tx_ring = adapter->tx_ring[r_idx];
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001844 tx_ring->total_bytes = 0;
1845 tx_ring->total_packets = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001846 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001847 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001848 }
1849
Jesse Brandeburg9b471442009-12-03 11:33:54 +00001850 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyck91281fd2009-06-04 16:00:27 +00001851 napi_schedule(&q_vector->napi);
1852
Auke Kok9a799d72007-09-15 14:07:45 -07001853 return IRQ_HANDLED;
1854}
1855
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001856/**
1857 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1858 * @irq: unused
1859 * @data: pointer to our q_vector struct for this interrupt vector
1860 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001861static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1862{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001863 struct ixgbe_q_vector *q_vector = data;
1864 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001865 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001866 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001867 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07001868
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001869#ifdef CONFIG_IXGBE_DCA
1870 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1871 ixgbe_update_dca(q_vector);
1872#endif
1873
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001874 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001875 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001876 rx_ring = adapter->rx_ring[r_idx];
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001877 rx_ring->total_bytes = 0;
1878 rx_ring->total_packets = 0;
1879 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001880 r_idx + 1);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001881 }
1882
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001883 if (!q_vector->rxr_count)
1884 return IRQ_HANDLED;
1885
Jesse Brandeburg9b471442009-12-03 11:33:54 +00001886 /* EIAM disabled interrupts (on this vector) for us */
Ben Hutchings288379f2009-01-19 16:43:59 -08001887 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001888
Auke Kok9a799d72007-09-15 14:07:45 -07001889 return IRQ_HANDLED;
1890}
1891
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001892static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1893{
Alexander Duyck91281fd2009-06-04 16:00:27 +00001894 struct ixgbe_q_vector *q_vector = data;
1895 struct ixgbe_adapter *adapter = q_vector->adapter;
1896 struct ixgbe_ring *ring;
1897 int r_idx;
1898 int i;
1899
1900 if (!q_vector->txr_count && !q_vector->rxr_count)
1901 return IRQ_HANDLED;
1902
1903 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1904 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001905 ring = adapter->tx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00001906 ring->total_bytes = 0;
1907 ring->total_packets = 0;
1908 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001909 r_idx + 1);
Alexander Duyck91281fd2009-06-04 16:00:27 +00001910 }
1911
1912 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1913 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001914 ring = adapter->rx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00001915 ring->total_bytes = 0;
1916 ring->total_packets = 0;
1917 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001918 r_idx + 1);
Alexander Duyck91281fd2009-06-04 16:00:27 +00001919 }
1920
Jesse Brandeburg9b471442009-12-03 11:33:54 +00001921 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyck91281fd2009-06-04 16:00:27 +00001922 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001923
1924 return IRQ_HANDLED;
1925}
1926
1927/**
1928 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1929 * @napi: napi struct with our devices info in it
1930 * @budget: amount of work driver is allowed to do this pass, in packets
1931 *
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001932 * This function is optimized for cleaning one queue only on a single
1933 * q_vector!!!
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001934 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001935static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1936{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001937 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00001938 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001939 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001940 struct ixgbe_ring *rx_ring = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07001941 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001942 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001943
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001944#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001945 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001946 ixgbe_update_dca(q_vector);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001947#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001948
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001949 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1950 rx_ring = adapter->rx_ring[r_idx];
1951
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001952 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001953
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001954 /* If all Rx work done, exit the polling mode */
1955 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001956 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001957 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001958 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001959 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00001960 ixgbe_irq_enable_queues(adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00001961 ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -07001962 }
1963
1964 return work_done;
1965}
1966
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001967/**
Alexander Duyck91281fd2009-06-04 16:00:27 +00001968 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001969 * @napi: napi struct with our devices info in it
1970 * @budget: amount of work driver is allowed to do this pass, in packets
1971 *
1972 * This function will clean more than one rx queue associated with a
1973 * q_vector.
1974 **/
Alexander Duyck91281fd2009-06-04 16:00:27 +00001975static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001976{
1977 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00001978 container_of(napi, struct ixgbe_q_vector, napi);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001979 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001980 struct ixgbe_ring *ring = NULL;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001981 int work_done = 0, i;
1982 long r_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001983 bool tx_clean_complete = true;
1984
Alexander Duyck33cf09c2010-11-16 19:26:55 -08001985#ifdef CONFIG_IXGBE_DCA
1986 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1987 ixgbe_update_dca(q_vector);
1988#endif
1989
Alexander Duyck91281fd2009-06-04 16:00:27 +00001990 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1991 for (i = 0; i < q_vector->txr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00001992 ring = adapter->tx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00001993 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1994 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00001995 r_idx + 1);
Alexander Duyck91281fd2009-06-04 16:00:27 +00001996 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001997
1998 /* attempt to distribute budget to each queue fairly, but don't allow
1999 * the budget to go below 1 because we'll exit polling */
2000 budget /= (q_vector->rxr_count ?: 1);
2001 budget = max(budget, 1);
2002 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2003 for (i = 0; i < q_vector->rxr_count; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002004 ring = adapter->rx_ring[r_idx];
Alexander Duyck91281fd2009-06-04 16:00:27 +00002005 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002006 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00002007 r_idx + 1);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002008 }
2009
2010 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002011 ring = adapter->rx_ring[r_idx];
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002012 /* If all Rx work done, exit the polling mode */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002013 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002014 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00002015 if (adapter->rx_itr_setting & 1)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002016 ixgbe_set_itr_msix(q_vector);
2017 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00002018 ixgbe_irq_enable_queues(adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002019 ((u64)1 << q_vector->v_idx));
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002020 return 0;
2021 }
2022
2023 return work_done;
2024}
Alexander Duyck91281fd2009-06-04 16:00:27 +00002025
2026/**
2027 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
2028 * @napi: napi struct with our devices info in it
2029 * @budget: amount of work driver is allowed to do this pass, in packets
2030 *
2031 * This function is optimized for cleaning one queue only on a single
2032 * q_vector!!!
2033 **/
2034static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2035{
2036 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00002037 container_of(napi, struct ixgbe_q_vector, napi);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002038 struct ixgbe_adapter *adapter = q_vector->adapter;
2039 struct ixgbe_ring *tx_ring = NULL;
2040 int work_done = 0;
2041 long r_idx;
2042
Alexander Duyck91281fd2009-06-04 16:00:27 +00002043#ifdef CONFIG_IXGBE_DCA
2044 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002045 ixgbe_update_dca(q_vector);
Alexander Duyck91281fd2009-06-04 16:00:27 +00002046#endif
2047
Alexander Duyck33cf09c2010-11-16 19:26:55 -08002048 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2049 tx_ring = adapter->tx_ring[r_idx];
2050
Alexander Duyck91281fd2009-06-04 16:00:27 +00002051 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2052 work_done = budget;
2053
Nelson, Shannonf7554a22009-09-18 09:46:06 +00002054 /* If all Tx work done, exit the polling mode */
Alexander Duyck91281fd2009-06-04 16:00:27 +00002055 if (work_done < budget) {
2056 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00002057 if (adapter->tx_itr_setting & 1)
Alexander Duyck91281fd2009-06-04 16:00:27 +00002058 ixgbe_set_itr_msix(q_vector);
2059 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Joe Perchese8e9f692010-09-07 21:34:53 +00002060 ixgbe_irq_enable_queues(adapter,
2061 ((u64)1 << q_vector->v_idx));
Alexander Duyck91281fd2009-06-04 16:00:27 +00002062 }
2063
2064 return work_done;
2065}
2066
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002067static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00002068 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07002069{
Alexander Duyck7a921c92009-05-06 10:43:28 +00002070 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2071
2072 set_bit(r_idx, q_vector->rxr_idx);
2073 q_vector->rxr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002074}
Auke Kok9a799d72007-09-15 14:07:45 -07002075
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002076static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
Joe Perchese8e9f692010-09-07 21:34:53 +00002077 int t_idx)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002078{
Alexander Duyck7a921c92009-05-06 10:43:28 +00002079 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2080
2081 set_bit(t_idx, q_vector->txr_idx);
2082 q_vector->txr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002083}
Auke Kok9a799d72007-09-15 14:07:45 -07002084
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002085/**
2086 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2087 * @adapter: board private structure to initialize
2088 * @vectors: allotted vector count for descriptor rings
2089 *
2090 * This function maps descriptor rings to the queue-specific vectors
2091 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2092 * one vector per ring/queue, but on a constrained vector budget, we
2093 * group the rings as "efficiently" as possible. You would add new
2094 * mapping configurations in here.
2095 **/
2096static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002097 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002098{
2099 int v_start = 0;
2100 int rxr_idx = 0, txr_idx = 0;
2101 int rxr_remaining = adapter->num_rx_queues;
2102 int txr_remaining = adapter->num_tx_queues;
2103 int i, j;
2104 int rqpv, tqpv;
2105 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002106
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002107 /* No mapping required if MSI-X is disabled. */
2108 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07002109 goto out;
2110
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002111 /*
2112 * The ideal configuration...
2113 * We have enough vectors to map one per queue.
2114 */
2115 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2116 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2117 map_vector_to_rxq(adapter, v_start, rxr_idx);
2118
2119 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2120 map_vector_to_txq(adapter, v_start, txr_idx);
2121
2122 goto out;
2123 }
2124
2125 /*
2126 * If we don't have enough vectors for a 1-to-1
2127 * mapping, we'll have to group them so there are
2128 * multiple queues per vector.
2129 */
2130 /* Re-adjusting *qpv takes care of the remainder. */
2131 for (i = v_start; i < vectors; i++) {
2132 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
2133 for (j = 0; j < rqpv; j++) {
2134 map_vector_to_rxq(adapter, i, rxr_idx);
2135 rxr_idx++;
2136 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07002137 }
Auke Kok9a799d72007-09-15 14:07:45 -07002138 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002139 for (i = v_start; i < vectors; i++) {
2140 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2141 for (j = 0; j < tqpv; j++) {
2142 map_vector_to_txq(adapter, i, txr_idx);
2143 txr_idx++;
2144 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07002145 }
Auke Kok9a799d72007-09-15 14:07:45 -07002146 }
2147
Auke Kok9a799d72007-09-15 14:07:45 -07002148out:
Auke Kok9a799d72007-09-15 14:07:45 -07002149 return err;
2150}
2151
2152/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002153 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2154 * @adapter: board private structure
2155 *
2156 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2157 * interrupts from the kernel.
2158 **/
2159static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2160{
2161 struct net_device *netdev = adapter->netdev;
2162 irqreturn_t (*handler)(int, void *);
2163 int i, vector, q_vectors, err;
Joe Perchese8e9f692010-09-07 21:34:53 +00002164 int ri = 0, ti = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002165
2166 /* Decrement for Other and TCP Timer vectors */
2167 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2168
2169 /* Map the Tx/Rx rings to the vectors we were allotted. */
2170 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2171 if (err)
2172 goto out;
2173
2174#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
Joe Perchese8e9f692010-09-07 21:34:53 +00002175 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2176 &ixgbe_msix_clean_many)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002177 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00002178 handler = SET_HANDLER(adapter->q_vector[vector]);
Robert Olssoncb13fc22008-11-25 16:43:52 -08002179
Joe Perchese8e9f692010-09-07 21:34:53 +00002180 if (handler == &ixgbe_msix_clean_rx) {
Robert Olssoncb13fc22008-11-25 16:43:52 -08002181 sprintf(adapter->name[vector], "%s-%s-%d",
2182 netdev->name, "rx", ri++);
Joe Perchese8e9f692010-09-07 21:34:53 +00002183 } else if (handler == &ixgbe_msix_clean_tx) {
Robert Olssoncb13fc22008-11-25 16:43:52 -08002184 sprintf(adapter->name[vector], "%s-%s-%d",
2185 netdev->name, "tx", ti++);
Joe Perchese8e9f692010-09-07 21:34:53 +00002186 } else
Robert Olssoncb13fc22008-11-25 16:43:52 -08002187 sprintf(adapter->name[vector], "%s-%s-%d",
2188 netdev->name, "TxRx", vector);
2189
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002190 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00002191 handler, 0, adapter->name[vector],
2192 adapter->q_vector[vector]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002193 if (err) {
Emil Tantilov396e7992010-07-01 20:05:12 +00002194 e_err(probe, "request_irq failed for MSIX interrupt "
Emil Tantilov849c4542010-06-03 16:53:41 +00002195 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002196 goto free_queue_irqs;
2197 }
2198 }
2199
2200 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2201 err = request_irq(adapter->msix_entries[vector].vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00002202 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002203 if (err) {
Emil Tantilov396e7992010-07-01 20:05:12 +00002204 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002205 goto free_queue_irqs;
2206 }
2207
2208 return 0;
2209
2210free_queue_irqs:
2211 for (i = vector - 1; i >= 0; i--)
2212 free_irq(adapter->msix_entries[--vector].vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00002213 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002214 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2215 pci_disable_msix(adapter->pdev);
2216 kfree(adapter->msix_entries);
2217 adapter->msix_entries = NULL;
2218out:
2219 return err;
2220}
2221
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002222static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2223{
Alexander Duyck7a921c92009-05-06 10:43:28 +00002224 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002225 u8 current_itr;
2226 u32 new_itr = q_vector->eitr;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002227 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2228 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002229
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002230 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
Joe Perchese8e9f692010-09-07 21:34:53 +00002231 q_vector->tx_itr,
2232 tx_ring->total_packets,
2233 tx_ring->total_bytes);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002234 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
Joe Perchese8e9f692010-09-07 21:34:53 +00002235 q_vector->rx_itr,
2236 rx_ring->total_packets,
2237 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002238
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002239 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002240
2241 switch (current_itr) {
2242 /* counts and packets in update_itr are dependent on these numbers */
2243 case lowest_latency:
2244 new_itr = 100000;
2245 break;
2246 case low_latency:
2247 new_itr = 20000; /* aka hwitr = ~200 */
2248 break;
2249 case bulk_latency:
2250 new_itr = 8000;
2251 break;
2252 default:
2253 break;
2254 }
2255
2256 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00002257 /* do an exponential smoothing */
2258 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002259
2260 /* save the algorithm value here, not the smoothed one */
2261 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00002262
2263 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002264 }
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002265}
2266
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002267/**
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002268 * ixgbe_irq_enable - Enable default interrupt generation settings
2269 * @adapter: board private structure
2270 **/
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002271static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2272 bool flush)
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002273{
2274 u32 mask;
Nelson, Shannon835462f2009-04-27 22:42:54 +00002275
2276 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002277 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2278 mask |= IXGBE_EIMS_GPI_SDP0;
David S. Miller6ab33d52008-11-20 16:44:00 -08002279 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2280 mask |= IXGBE_EIMS_GPI_SDP1;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002281 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002282 mask |= IXGBE_EIMS_ECC;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002283 mask |= IXGBE_EIMS_GPI_SDP1;
2284 mask |= IXGBE_EIMS_GPI_SDP2;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002285 if (adapter->num_vfs)
2286 mask |= IXGBE_EIMS_MAILBOX;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002287 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002288 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2289 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2290 mask |= IXGBE_EIMS_FLOW_DIR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002291
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002292 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002293 if (queues)
2294 ixgbe_irq_enable_queues(adapter, ~0);
2295 if (flush)
2296 IXGBE_WRITE_FLUSH(&adapter->hw);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002297
2298 if (adapter->num_vfs > 32) {
2299 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2300 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2301 }
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08002302}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002303
2304/**
2305 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07002306 * @irq: interrupt number
2307 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07002308 **/
2309static irqreturn_t ixgbe_intr(int irq, void *data)
2310{
2311 struct net_device *netdev = data;
2312 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2313 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck7a921c92009-05-06 10:43:28 +00002314 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07002315 u32 eicr;
2316
Don Skidmore54037502009-02-21 15:42:56 -08002317 /*
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002318 * Workaround for silicon errata on 82598. Mask the interrupts
Don Skidmore54037502009-02-21 15:42:56 -08002319 * before the read of EICR.
2320 */
2321 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2322
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002323 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2324 * therefore no explict interrupt disable is necessary */
2325 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002326 if (!eicr) {
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002327 /*
2328 * shared interrupt alert!
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002329 * make sure interrupts are enabled because the read will
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002330 * have disabled interrupts due to EIAM
2331 * finish the workaround of silicon errata on 82598. Unmask
2332 * the interrupt that we masked before the EICR read.
2333 */
2334 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2335 ixgbe_irq_enable(adapter, true, true);
Auke Kok9a799d72007-09-15 14:07:45 -07002336 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07002337 }
Auke Kok9a799d72007-09-15 14:07:45 -07002338
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002339 if (eicr & IXGBE_EICR_LSC)
2340 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002341
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002342 if (hw->mac.type == ixgbe_mac_82599EB)
2343 ixgbe_check_sfp_event(adapter, eicr);
2344
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002345 ixgbe_check_fan_failure(adapter, eicr);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07002346 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2347 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2348 schedule_work(&adapter->check_overtemp_task);
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002349
Alexander Duyck7a921c92009-05-06 10:43:28 +00002350 if (napi_schedule_prep(&(q_vector->napi))) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002351 adapter->tx_ring[0]->total_packets = 0;
2352 adapter->tx_ring[0]->total_bytes = 0;
2353 adapter->rx_ring[0]->total_packets = 0;
2354 adapter->rx_ring[0]->total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002355 /* would disable interrupts here but EIAM disabled it */
Alexander Duyck7a921c92009-05-06 10:43:28 +00002356 __napi_schedule(&(q_vector->napi));
Auke Kok9a799d72007-09-15 14:07:45 -07002357 }
2358
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00002359 /*
2360 * re-enable link(maybe) and non-queue interrupts, no flush.
2361 * ixgbe_poll will re-enable the queue interrupts
2362 */
2363
2364 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2365 ixgbe_irq_enable(adapter, false, false);
2366
Auke Kok9a799d72007-09-15 14:07:45 -07002367 return IRQ_HANDLED;
2368}
2369
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002370static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2371{
2372 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2373
2374 for (i = 0; i < q_vectors; i++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00002375 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002376 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2377 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2378 q_vector->rxr_count = 0;
2379 q_vector->txr_count = 0;
2380 }
2381}
2382
Auke Kok9a799d72007-09-15 14:07:45 -07002383/**
2384 * ixgbe_request_irq - initialize interrupts
2385 * @adapter: board private structure
2386 *
2387 * Attempts to configure interrupts using the best available
2388 * capabilities of the hardware and kernel.
2389 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002390static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07002391{
2392 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002393 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002394
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002395 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2396 err = ixgbe_request_msix_irqs(adapter);
2397 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
Joe Perchesa0607fd2009-11-18 23:29:17 -08002398 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
Joe Perchese8e9f692010-09-07 21:34:53 +00002399 netdev->name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002400 } else {
Joe Perchesa0607fd2009-11-18 23:29:17 -08002401 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
Joe Perchese8e9f692010-09-07 21:34:53 +00002402 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002403 }
2404
Auke Kok9a799d72007-09-15 14:07:45 -07002405 if (err)
Emil Tantilov396e7992010-07-01 20:05:12 +00002406 e_err(probe, "request_irq failed, Error %d\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07002407
Auke Kok9a799d72007-09-15 14:07:45 -07002408 return err;
2409}
2410
2411static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2412{
2413 struct net_device *netdev = adapter->netdev;
2414
2415 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002416 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07002417
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002418 q_vectors = adapter->num_msix_vectors;
2419
2420 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07002421 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002422
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002423 i--;
2424 for (; i >= 0; i--) {
2425 free_irq(adapter->msix_entries[i].vector,
Joe Perchese8e9f692010-09-07 21:34:53 +00002426 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002427 }
2428
2429 ixgbe_reset_q_vectors(adapter);
2430 } else {
2431 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002432 }
2433}
2434
2435/**
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002436 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2437 * @adapter: board private structure
2438 **/
2439static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2440{
Nelson, Shannon835462f2009-04-27 22:42:54 +00002441 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2442 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2443 } else {
2444 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2445 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002446 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002447 if (adapter->num_vfs > 32)
2448 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002449 }
2450 IXGBE_WRITE_FLUSH(&adapter->hw);
2451 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2452 int i;
2453 for (i = 0; i < adapter->num_msix_vectors; i++)
2454 synchronize_irq(adapter->msix_entries[i].vector);
2455 } else {
2456 synchronize_irq(adapter->pdev->irq);
2457 }
2458}
2459
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00002460/**
Auke Kok9a799d72007-09-15 14:07:45 -07002461 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2462 *
2463 **/
2464static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2465{
Auke Kok9a799d72007-09-15 14:07:45 -07002466 struct ixgbe_hw *hw = &adapter->hw;
2467
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002468 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Joe Perchese8e9f692010-09-07 21:34:53 +00002469 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07002470
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002471 ixgbe_set_ivar(adapter, 0, 0, 0);
2472 ixgbe_set_ivar(adapter, 1, 0, 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002473
2474 map_vector_to_rxq(adapter, 0, 0);
2475 map_vector_to_txq(adapter, 0, 0);
2476
Emil Tantilov396e7992010-07-01 20:05:12 +00002477 e_info(hw, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07002478}
2479
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002480/**
2481 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2482 * @adapter: board private structure
2483 * @ring: structure containing ring specific data
2484 *
2485 * Configure the Tx descriptor ring after a reset.
2486 **/
Alexander Duyck84418e32010-08-19 13:40:54 +00002487void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2488 struct ixgbe_ring *ring)
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002489{
2490 struct ixgbe_hw *hw = &adapter->hw;
2491 u64 tdba = ring->dma;
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002492 int wait_loop = 10;
2493 u32 txdctl;
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002494 u16 reg_idx = ring->reg_idx;
2495
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002496 /* disable queue to avoid issues while updating state */
2497 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2498 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2499 txdctl & ~IXGBE_TXDCTL_ENABLE);
2500 IXGBE_WRITE_FLUSH(hw);
2501
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002502 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
Joe Perchese8e9f692010-09-07 21:34:53 +00002503 (tdba & DMA_BIT_MASK(32)));
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002504 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2505 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2506 ring->count * sizeof(union ixgbe_adv_tx_desc));
2507 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2508 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
Alexander Duyck84ea2592010-11-16 19:26:49 -08002509 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002510
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002511 /* configure fetching thresholds */
2512 if (adapter->rx_itr_setting == 0) {
2513 /* cannot set wthresh when itr==0 */
2514 txdctl &= ~0x007F0000;
2515 } else {
2516 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2517 txdctl |= (8 << 16);
2518 }
2519 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2520 /* PThresh workaround for Tx hang with DFP enabled. */
2521 txdctl |= 32;
2522 }
2523
2524 /* reinitialize flowdirector state */
2525 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
2526
2527 /* enable queue */
2528 txdctl |= IXGBE_TXDCTL_ENABLE;
2529 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2530
2531 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2532 if (hw->mac.type == ixgbe_mac_82598EB &&
2533 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2534 return;
2535
2536 /* poll to verify queue is enabled */
2537 do {
2538 msleep(1);
2539 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2540 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2541 if (!wait_loop)
2542 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002543}
2544
Alexander Duyck120ff942010-08-19 13:34:50 +00002545static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2546{
2547 struct ixgbe_hw *hw = &adapter->hw;
2548 u32 rttdcs;
2549 u32 mask;
2550
2551 if (hw->mac.type == ixgbe_mac_82598EB)
2552 return;
2553
2554 /* disable the arbiter while setting MTQC */
2555 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2556 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2557 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2558
2559 /* set transmit pool layout */
2560 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2561 switch (adapter->flags & mask) {
2562
2563 case (IXGBE_FLAG_SRIOV_ENABLED):
2564 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2565 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2566 break;
2567
2568 case (IXGBE_FLAG_DCB_ENABLED):
2569 /* We enable 8 traffic classes, DCB only */
2570 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2571 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2572 break;
2573
2574 default:
2575 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2576 break;
2577 }
2578
2579 /* re-enable the arbiter */
2580 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2581 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2582}
2583
Auke Kok9a799d72007-09-15 14:07:45 -07002584/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002585 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07002586 * @adapter: board private structure
2587 *
2588 * Configure the Tx unit of the MAC after a reset.
2589 **/
2590static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2591{
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002592 struct ixgbe_hw *hw = &adapter->hw;
2593 u32 dmatxctl;
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002594 u32 i;
Auke Kok9a799d72007-09-15 14:07:45 -07002595
Alexander Duyck2f1860b2010-08-19 13:39:43 +00002596 ixgbe_setup_mtqc(adapter);
2597
2598 if (hw->mac.type != ixgbe_mac_82598EB) {
2599 /* DMATXCTL.EN must be before Tx queues are enabled */
2600 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2601 dmatxctl |= IXGBE_DMATXCTL_TE;
2602 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2603 }
2604
Auke Kok9a799d72007-09-15 14:07:45 -07002605 /* Setup the HW Tx Head and Tail descriptor pointers */
Alexander Duyck43e69bf2010-08-19 13:35:12 +00002606 for (i = 0; i < adapter->num_tx_queues; i++)
2607 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07002608}
2609
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002610#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07002611
Yi Zoua6616b42009-08-06 13:05:23 +00002612static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00002613 struct ixgbe_ring *rx_ring)
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002614{
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002615 u32 srrctl;
Yi Zoua6616b42009-08-06 13:05:23 +00002616 int index;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002617 struct ixgbe_ring_feature *feature = adapter->ring_feature;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002618
Yi Zoua6616b42009-08-06 13:05:23 +00002619 index = rx_ring->reg_idx;
2620 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2621 unsigned long mask;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002622 mask = (unsigned long) feature[RING_F_RSS].mask;
Alexander Duyck3be1adf2008-08-30 00:29:10 -07002623 index = index & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002624 }
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002625 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
2626
2627 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2628 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
Alexander Duyck9e10e042010-08-19 13:40:06 +00002629 if (adapter->num_vfs)
2630 srrctl |= IXGBE_SRRCTL_DROP_EN;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002631
Alexander Duyckafafd5b2009-05-07 10:38:56 +00002632 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2633 IXGBE_SRRCTL_BSIZEHDR_MASK;
2634
Yi Zou6e455b892009-08-06 13:05:44 +00002635 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00002636#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2637 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2638#else
2639 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2640#endif
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002641 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002642 } else {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00002643 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2644 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002645 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002646 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002647
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002648 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2649}
2650
Alexander Duyck05abb122010-08-19 13:35:41 +00002651static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002652{
Alexander Duyck05abb122010-08-19 13:35:41 +00002653 struct ixgbe_hw *hw = &adapter->hw;
2654 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
Joe Perchese8e9f692010-09-07 21:34:53 +00002655 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2656 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Alexander Duyck05abb122010-08-19 13:35:41 +00002657 u32 mrqc = 0, reta = 0;
2658 u32 rxcsum;
2659 int i, j;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002660 int mask;
2661
Alexander Duyck05abb122010-08-19 13:35:41 +00002662 /* Fill out hash function seeds */
2663 for (i = 0; i < 10; i++)
2664 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002665
Alexander Duyck05abb122010-08-19 13:35:41 +00002666 /* Fill out redirection table */
2667 for (i = 0, j = 0; i < 128; i++, j++) {
2668 if (j == adapter->ring_feature[RING_F_RSS].indices)
2669 j = 0;
2670 /* reta = 4-byte sliding window of
2671 * 0x00..(indices-1)(indices-1)00..etc. */
2672 reta = (reta << 8) | (j * 0x11);
2673 if ((i & 3) == 3)
2674 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2675 }
2676
2677 /* Disable indicating checksum in descriptor, enables RSS hash */
2678 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2679 rxcsum |= IXGBE_RXCSUM_PCSD;
2680 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2681
2682 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2683 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2684 else
2685 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002686#ifdef CONFIG_IXGBE_DCB
Alexander Duyck05abb122010-08-19 13:35:41 +00002687 | IXGBE_FLAG_DCB_ENABLED
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002688#endif
Alexander Duyck05abb122010-08-19 13:35:41 +00002689 | IXGBE_FLAG_SRIOV_ENABLED
2690 );
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002691
2692 switch (mask) {
2693 case (IXGBE_FLAG_RSS_ENABLED):
2694 mrqc = IXGBE_MRQC_RSSEN;
2695 break;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002696 case (IXGBE_FLAG_SRIOV_ENABLED):
2697 mrqc = IXGBE_MRQC_VMDQEN;
2698 break;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002699#ifdef CONFIG_IXGBE_DCB
2700 case (IXGBE_FLAG_DCB_ENABLED):
2701 mrqc = IXGBE_MRQC_RT8TCEN;
2702 break;
2703#endif /* CONFIG_IXGBE_DCB */
2704 default:
2705 break;
2706 }
2707
Alexander Duyck05abb122010-08-19 13:35:41 +00002708 /* Perform hash on these packet types */
2709 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2710 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2711 | IXGBE_MRQC_RSS_FIELD_IPV6
2712 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2713
2714 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002715}
2716
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002717/**
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002718 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2719 * @adapter: address of board private structure
2720 * @index: index of ring to set
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002721 **/
Alexander Duyck73670962010-08-19 13:38:34 +00002722static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2723 struct ixgbe_ring *ring)
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002724{
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002725 struct ixgbe_hw *hw = &adapter->hw;
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002726 u32 rscctrl;
Mallikarjuna R Chilakalaedd2ea52009-11-23 10:45:11 -08002727 int rx_buf_len;
Alexander Duyck73670962010-08-19 13:38:34 +00002728 u16 reg_idx = ring->reg_idx;
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002729
Alexander Duyck73670962010-08-19 13:38:34 +00002730 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
2731 return;
2732
2733 rx_buf_len = ring->rx_buf_len;
2734 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002735 rscctrl |= IXGBE_RSCCTL_RSCEN;
2736 /*
2737 * we must limit the number of descriptors so that the
2738 * total size of max desc * buf_len is not greater
2739 * than 65535
2740 */
Alexander Duyck73670962010-08-19 13:38:34 +00002741 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002742#if (MAX_SKB_FRAGS > 16)
2743 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2744#elif (MAX_SKB_FRAGS > 8)
2745 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2746#elif (MAX_SKB_FRAGS > 4)
2747 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2748#else
2749 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2750#endif
2751 } else {
2752 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2753 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2754 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2755 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2756 else
2757 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2758 }
Alexander Duyck73670962010-08-19 13:38:34 +00002759 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002760}
2761
Alexander Duyck9e10e042010-08-19 13:40:06 +00002762/**
2763 * ixgbe_set_uta - Set unicast filter table address
2764 * @adapter: board private structure
2765 *
2766 * The unicast table address is a register array of 32-bit registers.
2767 * The table is meant to be used in a way similar to how the MTA is used
2768 * however due to certain limitations in the hardware it is necessary to
2769 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2770 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2771 **/
2772static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2773{
2774 struct ixgbe_hw *hw = &adapter->hw;
2775 int i;
2776
2777 /* The UTA table only exists on 82599 hardware and newer */
2778 if (hw->mac.type < ixgbe_mac_82599EB)
2779 return;
2780
2781 /* we only need to do this if VMDq is enabled */
2782 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2783 return;
2784
2785 for (i = 0; i < 128; i++)
2786 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2787}
2788
2789#define IXGBE_MAX_RX_DESC_POLL 10
2790static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2791 struct ixgbe_ring *ring)
2792{
2793 struct ixgbe_hw *hw = &adapter->hw;
2794 int reg_idx = ring->reg_idx;
2795 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2796 u32 rxdctl;
2797
2798 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2799 if (hw->mac.type == ixgbe_mac_82598EB &&
2800 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2801 return;
2802
2803 do {
2804 msleep(1);
2805 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2806 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2807
2808 if (!wait_loop) {
2809 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2810 "the polling period\n", reg_idx);
2811 }
2812}
2813
Alexander Duyck84418e32010-08-19 13:40:54 +00002814void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2815 struct ixgbe_ring *ring)
Alexander Duyckacd37172010-08-19 13:36:05 +00002816{
2817 struct ixgbe_hw *hw = &adapter->hw;
2818 u64 rdba = ring->dma;
Alexander Duyck9e10e042010-08-19 13:40:06 +00002819 u32 rxdctl;
Alexander Duyckacd37172010-08-19 13:36:05 +00002820 u16 reg_idx = ring->reg_idx;
2821
Alexander Duyck9e10e042010-08-19 13:40:06 +00002822 /* disable queue to avoid issues while updating state */
2823 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2824 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
2825 rxdctl & ~IXGBE_RXDCTL_ENABLE);
2826 IXGBE_WRITE_FLUSH(hw);
2827
Alexander Duyckacd37172010-08-19 13:36:05 +00002828 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2829 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2830 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2831 ring->count * sizeof(union ixgbe_adv_rx_desc));
2832 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2833 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
Alexander Duyck84ea2592010-11-16 19:26:49 -08002834 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
Alexander Duyck9e10e042010-08-19 13:40:06 +00002835
2836 ixgbe_configure_srrctl(adapter, ring);
2837 ixgbe_configure_rscctl(adapter, ring);
2838
2839 if (hw->mac.type == ixgbe_mac_82598EB) {
2840 /*
2841 * enable cache line friendly hardware writes:
2842 * PTHRESH=32 descriptors (half the internal cache),
2843 * this also removes ugly rx_no_buffer_count increment
2844 * HTHRESH=4 descriptors (to minimize latency on fetch)
2845 * WTHRESH=8 burst writeback up to two cache lines
2846 */
2847 rxdctl &= ~0x3FFFFF;
2848 rxdctl |= 0x080420;
2849 }
2850
2851 /* enable receive descriptor ring */
2852 rxdctl |= IXGBE_RXDCTL_ENABLE;
2853 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2854
2855 ixgbe_rx_desc_queue_enable(adapter, ring);
Alexander Duyckfc77dc32010-11-16 19:26:51 -08002856 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
Alexander Duyckacd37172010-08-19 13:36:05 +00002857}
2858
Alexander Duyck48654522010-08-19 13:36:27 +00002859static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2860{
2861 struct ixgbe_hw *hw = &adapter->hw;
2862 int p;
2863
2864 /* PSRTYPE must be initialized in non 82598 adapters */
2865 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
Joe Perchese8e9f692010-09-07 21:34:53 +00002866 IXGBE_PSRTYPE_UDPHDR |
2867 IXGBE_PSRTYPE_IPV4HDR |
Alexander Duyck48654522010-08-19 13:36:27 +00002868 IXGBE_PSRTYPE_L2HDR |
Joe Perchese8e9f692010-09-07 21:34:53 +00002869 IXGBE_PSRTYPE_IPV6HDR;
Alexander Duyck48654522010-08-19 13:36:27 +00002870
2871 if (hw->mac.type == ixgbe_mac_82598EB)
2872 return;
2873
2874 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2875 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2876
2877 for (p = 0; p < adapter->num_rx_pools; p++)
2878 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2879 psrtype);
2880}
2881
Alexander Duyckf5b4a522010-08-19 13:38:57 +00002882static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2883{
2884 struct ixgbe_hw *hw = &adapter->hw;
2885 u32 gcr_ext;
2886 u32 vt_reg_bits;
2887 u32 reg_offset, vf_shift;
2888 u32 vmdctl;
2889
2890 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2891 return;
2892
2893 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2894 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2895 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2896 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2897
2898 vf_shift = adapter->num_vfs % 32;
2899 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2900
2901 /* Enable only the PF's pool for Tx/Rx */
2902 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2903 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2904 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2905 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2906 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2907
2908 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2909 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2910
2911 /*
2912 * Set up VF register offsets for selected VT Mode,
2913 * i.e. 32 or 64 VFs for SR-IOV
2914 */
2915 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2916 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2917 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2918 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2919
2920 /* enable Tx loopback for VF/PF communication */
2921 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2922}
2923
Alexander Duyck477de6e2010-08-19 13:38:11 +00002924static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07002925{
Auke Kok9a799d72007-09-15 14:07:45 -07002926 struct ixgbe_hw *hw = &adapter->hw;
2927 struct net_device *netdev = adapter->netdev;
2928 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002929 int rx_buf_len;
Alexander Duyck477de6e2010-08-19 13:38:11 +00002930 struct ixgbe_ring *rx_ring;
2931 int i;
2932 u32 mhadd, hlreg0;
Alexander Duyck48654522010-08-19 13:36:27 +00002933
Auke Kok9a799d72007-09-15 14:07:45 -07002934 /* Decide whether to use packet split mode or not */
Greg Rose1cdd1ec2010-01-09 02:26:46 +00002935 /* Do not use packet split if we're in SR-IOV Mode */
2936 if (!adapter->num_vfs)
2937 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07002938
2939 /* Set the RX buffer length according to the mode */
2940 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002941 rx_buf_len = IXGBE_RX_HDR_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07002942 } else {
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00002943 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
Alexander Duyckf8212f92009-04-27 22:42:37 +00002944 (netdev->mtu <= ETH_DATA_LEN))
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002945 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07002946 else
Alexander Duyck477de6e2010-08-19 13:38:11 +00002947 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
2948 }
2949
2950#ifdef IXGBE_FCOE
2951 /* adjust max frame to be able to do baby jumbo for FCoE */
2952 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2953 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2954 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2955
2956#endif /* IXGBE_FCOE */
2957 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2958 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2959 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2960 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2961
2962 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
Auke Kok9a799d72007-09-15 14:07:45 -07002963 }
2964
Auke Kok9a799d72007-09-15 14:07:45 -07002965 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
Alexander Duyck477de6e2010-08-19 13:38:11 +00002966 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2967 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
Auke Kok9a799d72007-09-15 14:07:45 -07002968 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2969
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002970 /*
2971 * Setup the HW Rx Head and Tail Descriptor Pointers and
2972 * the Base and Length of the Rx Descriptor Ring
2973 */
Auke Kok9a799d72007-09-15 14:07:45 -07002974 for (i = 0; i < adapter->num_rx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00002975 rx_ring = adapter->rx_ring[i];
Yi Zoua6616b42009-08-06 13:05:23 +00002976 rx_ring->rx_buf_len = rx_buf_len;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002977
Yi Zou6e455b892009-08-06 13:05:44 +00002978 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2979 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
Peter P Waskiewicz Jr1b3ff022009-09-14 07:47:27 +00002980 else
2981 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002982
Yi Zou63f39bd2009-05-17 12:34:35 +00002983#ifdef IXGBE_FCOE
Joe Perchese8e9f692010-09-07 21:34:53 +00002984 if (netdev->features & NETIF_F_FCOE_MTU) {
Yi Zou63f39bd2009-05-17 12:34:35 +00002985 struct ixgbe_ring_feature *f;
2986 f = &adapter->ring_feature[RING_F_FCOE];
Yi Zou6e455b892009-08-06 13:05:44 +00002987 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2988 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2989 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2990 rx_ring->rx_buf_len =
Joe Perchese8e9f692010-09-07 21:34:53 +00002991 IXGBE_FCOE_JUMBO_FRAME_SIZE;
Yi Zou6e455b892009-08-06 13:05:44 +00002992 }
Yi Zou63f39bd2009-05-17 12:34:35 +00002993 }
Yi Zou63f39bd2009-05-17 12:34:35 +00002994#endif /* IXGBE_FCOE */
Alexander Duyck477de6e2010-08-19 13:38:11 +00002995 }
2996
2997}
2998
Alexander Duyck73670962010-08-19 13:38:34 +00002999static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3000{
3001 struct ixgbe_hw *hw = &adapter->hw;
3002 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3003
3004 switch (hw->mac.type) {
3005 case ixgbe_mac_82598EB:
3006 /*
3007 * For VMDq support of different descriptor types or
3008 * buffer sizes through the use of multiple SRRCTL
3009 * registers, RDRXCTL.MVMEN must be set to 1
3010 *
3011 * also, the manual doesn't mention it clearly but DCA hints
3012 * will only use queue 0's tags unless this bit is set. Side
3013 * effects of setting this bit are only that SRRCTL must be
3014 * fully programmed [0..15]
3015 */
3016 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3017 break;
3018 case ixgbe_mac_82599EB:
3019 /* Disable RSC for ACK packets */
3020 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3021 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3022 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3023 /* hardware requires some bits to be set by default */
3024 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3025 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3026 break;
3027 default:
3028 /* We should do nothing since we don't know this hardware */
3029 return;
3030 }
3031
3032 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3033}
3034
Alexander Duyck477de6e2010-08-19 13:38:11 +00003035/**
3036 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3037 * @adapter: board private structure
3038 *
3039 * Configure the Rx unit of the MAC after a reset.
3040 **/
3041static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3042{
3043 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003044 int i;
3045 u32 rxctrl;
Alexander Duyck477de6e2010-08-19 13:38:11 +00003046
3047 /* disable receives while setting up the descriptors */
3048 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3049 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3050
3051 ixgbe_setup_psrtype(adapter);
Alexander Duyck73670962010-08-19 13:38:34 +00003052 ixgbe_setup_rdrxctl(adapter);
Alexander Duyck477de6e2010-08-19 13:38:11 +00003053
Alexander Duyck9e10e042010-08-19 13:40:06 +00003054 /* Program registers for the distribution of queues */
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003055 ixgbe_setup_mrqc(adapter);
Alexander Duyckf5b4a522010-08-19 13:38:57 +00003056
Alexander Duyck9e10e042010-08-19 13:40:06 +00003057 ixgbe_set_uta(adapter);
3058
Alexander Duyck477de6e2010-08-19 13:38:11 +00003059 /* set_rx_buffer_len must be called before ring initialization */
3060 ixgbe_set_rx_buffer_len(adapter);
3061
3062 /*
3063 * Setup the HW Rx Head and Tail Descriptor Pointers and
3064 * the Base and Length of the Rx Descriptor Ring
3065 */
Alexander Duyck9e10e042010-08-19 13:40:06 +00003066 for (i = 0; i < adapter->num_rx_queues; i++)
3067 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003068
Alexander Duyck9e10e042010-08-19 13:40:06 +00003069 /* disable drop enable for 82598 parts */
3070 if (hw->mac.type == ixgbe_mac_82598EB)
3071 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3072
3073 /* enable all receives */
3074 rxctrl |= IXGBE_RXCTRL_RXEN;
3075 hw->mac.ops.enable_rx_dma(hw, rxctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07003076}
3077
Auke Kok9a799d72007-09-15 14:07:45 -07003078static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3079{
3080 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003081 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose1ada1b12010-01-22 22:45:43 +00003082 int pool_ndx = adapter->num_vfs;
Auke Kok9a799d72007-09-15 14:07:45 -07003083
3084 /* add VID to filter table */
Greg Rose1ada1b12010-01-22 22:45:43 +00003085 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003086 set_bit(vid, adapter->active_vlans);
Auke Kok9a799d72007-09-15 14:07:45 -07003087}
3088
3089static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3090{
3091 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003092 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose1ada1b12010-01-22 22:45:43 +00003093 int pool_ndx = adapter->num_vfs;
Auke Kok9a799d72007-09-15 14:07:45 -07003094
Auke Kok9a799d72007-09-15 14:07:45 -07003095 /* remove VID from filter table */
Greg Rose1ada1b12010-01-22 22:45:43 +00003096 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003097 clear_bit(vid, adapter->active_vlans);
Auke Kok9a799d72007-09-15 14:07:45 -07003098}
3099
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003100/**
3101 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3102 * @adapter: driver data
3103 */
3104static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3105{
3106 struct ixgbe_hw *hw = &adapter->hw;
Jesse Grossf62bbb52010-10-20 13:56:10 +00003107 u32 vlnctrl;
3108
3109 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3110 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3111 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3112}
3113
3114/**
3115 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3116 * @adapter: driver data
3117 */
3118static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3119{
3120 struct ixgbe_hw *hw = &adapter->hw;
3121 u32 vlnctrl;
3122
3123 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3124 vlnctrl |= IXGBE_VLNCTRL_VFE;
3125 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3126 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3127}
3128
3129/**
3130 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3131 * @adapter: driver data
3132 */
3133static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3134{
3135 struct ixgbe_hw *hw = &adapter->hw;
3136 u32 vlnctrl;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003137 int i, j;
3138
3139 switch (hw->mac.type) {
3140 case ixgbe_mac_82598EB:
Jesse Grossf62bbb52010-10-20 13:56:10 +00003141 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3142 vlnctrl &= ~IXGBE_VLNCTRL_VME;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003143 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3144 break;
3145 case ixgbe_mac_82599EB:
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003146 for (i = 0; i < adapter->num_rx_queues; i++) {
3147 j = adapter->rx_ring[i]->reg_idx;
3148 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3149 vlnctrl &= ~IXGBE_RXDCTL_VME;
3150 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3151 }
3152 break;
3153 default:
3154 break;
3155 }
3156}
3157
3158/**
Jesse Grossf62bbb52010-10-20 13:56:10 +00003159 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003160 * @adapter: driver data
3161 */
Jesse Grossf62bbb52010-10-20 13:56:10 +00003162static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003163{
3164 struct ixgbe_hw *hw = &adapter->hw;
Jesse Grossf62bbb52010-10-20 13:56:10 +00003165 u32 vlnctrl;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003166 int i, j;
3167
3168 switch (hw->mac.type) {
3169 case ixgbe_mac_82598EB:
Jesse Grossf62bbb52010-10-20 13:56:10 +00003170 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3171 vlnctrl |= IXGBE_VLNCTRL_VME;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003172 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3173 break;
3174 case ixgbe_mac_82599EB:
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003175 for (i = 0; i < adapter->num_rx_queues; i++) {
3176 j = adapter->rx_ring[i]->reg_idx;
3177 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3178 vlnctrl |= IXGBE_RXDCTL_VME;
3179 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3180 }
3181 break;
3182 default:
3183 break;
3184 }
3185}
3186
Auke Kok9a799d72007-09-15 14:07:45 -07003187static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3188{
Jesse Grossf62bbb52010-10-20 13:56:10 +00003189 u16 vid;
Auke Kok9a799d72007-09-15 14:07:45 -07003190
Jesse Grossf62bbb52010-10-20 13:56:10 +00003191 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3192
3193 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3194 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
Auke Kok9a799d72007-09-15 14:07:45 -07003195}
3196
3197/**
Alexander Duyck28500622010-06-15 09:25:48 +00003198 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3199 * @netdev: network interface device structure
3200 *
3201 * Writes unicast address list to the RAR table.
3202 * Returns: -ENOMEM on failure/insufficient address space
3203 * 0 on no addresses written
3204 * X on writing X addresses to the RAR table
3205 **/
3206static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3207{
3208 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3209 struct ixgbe_hw *hw = &adapter->hw;
3210 unsigned int vfn = adapter->num_vfs;
3211 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3212 int count = 0;
3213
3214 /* return ENOMEM indicating insufficient memory for addresses */
3215 if (netdev_uc_count(netdev) > rar_entries)
3216 return -ENOMEM;
3217
3218 if (!netdev_uc_empty(netdev) && rar_entries) {
3219 struct netdev_hw_addr *ha;
3220 /* return error if we do not support writing to RAR table */
3221 if (!hw->mac.ops.set_rar)
3222 return -ENOMEM;
3223
3224 netdev_for_each_uc_addr(ha, netdev) {
3225 if (!rar_entries)
3226 break;
3227 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3228 vfn, IXGBE_RAH_AV);
3229 count++;
3230 }
3231 }
3232 /* write the addresses in reverse order to avoid write combining */
3233 for (; rar_entries > 0 ; rar_entries--)
3234 hw->mac.ops.clear_rar(hw, rar_entries);
3235
3236 return count;
3237}
3238
3239/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07003240 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07003241 * @netdev: network interface device structure
3242 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07003243 * The set_rx_method entry point is called whenever the unicast/multicast
3244 * address list or the network interface flags are updated. This routine is
3245 * responsible for configuring the hardware for proper unicast, multicast and
3246 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07003247 **/
Greg Rose7f870472010-01-09 02:25:29 +00003248void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07003249{
3250 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3251 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck28500622010-06-15 09:25:48 +00003252 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3253 int count;
Auke Kok9a799d72007-09-15 14:07:45 -07003254
3255 /* Check for Promiscuous and All Multicast modes */
3256
3257 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3258
Alexander Duyckf5dc4422010-08-19 13:36:49 +00003259 /* set all bits that we expect to always be set */
3260 fctrl |= IXGBE_FCTRL_BAM;
3261 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3262 fctrl |= IXGBE_FCTRL_PMCF;
3263
Alexander Duyck28500622010-06-15 09:25:48 +00003264 /* clear the bits we are changing the status of */
3265 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3266
Auke Kok9a799d72007-09-15 14:07:45 -07003267 if (netdev->flags & IFF_PROMISC) {
Emil Tantilove433ea12010-05-13 17:33:00 +00003268 hw->addr_ctrl.user_set_promisc = true;
Auke Kok9a799d72007-09-15 14:07:45 -07003269 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck28500622010-06-15 09:25:48 +00003270 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003271 /* don't hardware filter vlans in promisc mode */
3272 ixgbe_vlan_filter_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003273 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07003274 if (netdev->flags & IFF_ALLMULTI) {
3275 fctrl |= IXGBE_FCTRL_MPE;
Alexander Duyck28500622010-06-15 09:25:48 +00003276 vmolr |= IXGBE_VMOLR_MPE;
3277 } else {
3278 /*
3279 * Write addresses to the MTA, if the attempt fails
3280 * then we should just turn on promiscous mode so
3281 * that we can at least receive multicast traffic
3282 */
3283 hw->mac.ops.update_mc_addr_list(hw, netdev);
3284 vmolr |= IXGBE_VMOLR_ROMPE;
Patrick McHardy746b9f02008-07-16 20:15:45 -07003285 }
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003286 ixgbe_vlan_filter_enable(adapter);
Emil Tantilove433ea12010-05-13 17:33:00 +00003287 hw->addr_ctrl.user_set_promisc = false;
Alexander Duyck28500622010-06-15 09:25:48 +00003288 /*
3289 * Write addresses to available RAR registers, if there is not
3290 * sufficient space to store all the addresses then enable
3291 * unicast promiscous mode
3292 */
3293 count = ixgbe_write_uc_addr_list(netdev);
3294 if (count < 0) {
3295 fctrl |= IXGBE_FCTRL_UPE;
3296 vmolr |= IXGBE_VMOLR_ROPE;
3297 }
3298 }
3299
3300 if (adapter->num_vfs) {
3301 ixgbe_restore_vf_multicasts(adapter);
3302 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3303 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3304 IXGBE_VMOLR_ROPE);
3305 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
Auke Kok9a799d72007-09-15 14:07:45 -07003306 }
3307
3308 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Jesse Grossf62bbb52010-10-20 13:56:10 +00003309
3310 if (netdev->features & NETIF_F_HW_VLAN_RX)
3311 ixgbe_vlan_strip_enable(adapter);
3312 else
3313 ixgbe_vlan_strip_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003314}
3315
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003316static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3317{
3318 int q_idx;
3319 struct ixgbe_q_vector *q_vector;
3320 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3321
3322 /* legacy and MSI only use one vector */
3323 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3324 q_vectors = 1;
3325
3326 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Jesse Brandeburgf0848272008-09-11 19:59:42 -07003327 struct napi_struct *napi;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003328 q_vector = adapter->q_vector[q_idx];
Jesse Brandeburgf0848272008-09-11 19:59:42 -07003329 napi = &q_vector->napi;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003330 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3331 if (!q_vector->rxr_count || !q_vector->txr_count) {
3332 if (q_vector->txr_count == 1)
3333 napi->poll = &ixgbe_clean_txonly;
3334 else if (q_vector->rxr_count == 1)
3335 napi->poll = &ixgbe_clean_rxonly;
3336 }
3337 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07003338
3339 napi_enable(napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003340 }
3341}
3342
3343static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3344{
3345 int q_idx;
3346 struct ixgbe_q_vector *q_vector;
3347 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3348
3349 /* legacy and MSI only use one vector */
3350 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3351 q_vectors = 1;
3352
3353 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00003354 q_vector = adapter->q_vector[q_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003355 napi_disable(&q_vector->napi);
3356 }
3357}
3358
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003359#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003360/*
3361 * ixgbe_configure_dcb - Configure DCB hardware
3362 * @adapter: ixgbe adapter struct
3363 *
3364 * This is called by the driver on open to configure the DCB hardware.
3365 * This is also called by the gennetlink interface when reconfiguring
3366 * the DCB state.
3367 */
3368static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3369{
3370 struct ixgbe_hw *hw = &adapter->hw;
John Fastabend98063072010-10-28 00:59:57 +00003371 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003372
Alexander Duyck67ebd792010-08-19 13:34:04 +00003373 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3374 if (hw->mac.type == ixgbe_mac_82598EB)
3375 netif_set_gso_max_size(adapter->netdev, 65536);
3376 return;
3377 }
3378
3379 if (hw->mac.type == ixgbe_mac_82598EB)
3380 netif_set_gso_max_size(adapter->netdev, 32768);
3381
John Fastabend98063072010-10-28 00:59:57 +00003382#ifdef CONFIG_FCOE
3383 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3384 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3385#endif
3386
John Fastabend80ab1932010-11-16 19:26:45 -08003387 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
John Fastabend98063072010-10-28 00:59:57 +00003388 DCB_TX_CONFIG);
John Fastabend80ab1932010-11-16 19:26:45 -08003389 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
John Fastabend98063072010-10-28 00:59:57 +00003390 DCB_RX_CONFIG);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003391
Alexander Duyck2f90b862008-11-20 20:52:10 -08003392 /* Enable VLAN tag insert/strip */
Jesse Grossf62bbb52010-10-20 13:56:10 +00003393 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
Jesse Brandeburg5f6c0182010-04-14 16:04:23 -07003394
Alexander Duyck2f90b862008-11-20 20:52:10 -08003395 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
Alexander Duyck01fa7d92010-11-16 19:26:53 -08003396
3397 /* reconfigure the hardware */
3398 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003399}
3400
3401#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003402static void ixgbe_configure(struct ixgbe_adapter *adapter)
3403{
3404 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003405 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003406 int i;
3407
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003408#ifdef CONFIG_IXGBE_DCB
Alexander Duyck67ebd792010-08-19 13:34:04 +00003409 ixgbe_configure_dcb(adapter);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003410#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003411
Jesse Grossf62bbb52010-10-20 13:56:10 +00003412 ixgbe_set_rx_mode(netdev);
3413 ixgbe_restore_vlan(adapter);
3414
Yi Zoueacd73f2009-05-13 13:11:06 +00003415#ifdef IXGBE_FCOE
3416 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3417 ixgbe_configure_fcoe(adapter);
3418
3419#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003420 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3421 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00003422 adapter->tx_ring[i]->atr_sample_rate =
Joe Perchese8e9f692010-09-07 21:34:53 +00003423 adapter->atr_sample_rate;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003424 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3425 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3426 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3427 }
Alexander Duyck933d41f2010-09-07 21:34:29 +00003428 ixgbe_configure_virtualization(adapter);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003429
Auke Kok9a799d72007-09-15 14:07:45 -07003430 ixgbe_configure_tx(adapter);
3431 ixgbe_configure_rx(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003432}
3433
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003434static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3435{
3436 switch (hw->phy.type) {
3437 case ixgbe_phy_sfp_avago:
3438 case ixgbe_phy_sfp_ftl:
3439 case ixgbe_phy_sfp_intel:
3440 case ixgbe_phy_sfp_unknown:
Don Skidmoreea0a04d2010-05-18 16:00:13 +00003441 case ixgbe_phy_sfp_passive_tyco:
3442 case ixgbe_phy_sfp_passive_unknown:
3443 case ixgbe_phy_sfp_active_unknown:
3444 case ixgbe_phy_sfp_ftl_active:
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003445 return true;
3446 default:
3447 return false;
3448 }
3449}
3450
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003451/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003452 * ixgbe_sfp_link_config - set up SFP+ link
3453 * @adapter: pointer to private adapter struct
3454 **/
3455static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3456{
3457 struct ixgbe_hw *hw = &adapter->hw;
3458
3459 if (hw->phy.multispeed_fiber) {
3460 /*
3461 * In multispeed fiber setups, the device may not have
3462 * had a physical connection when the driver loaded.
3463 * If that's the case, the initial link configuration
3464 * couldn't get the MAC into 10G or 1G mode, so we'll
3465 * never have a link status change interrupt fire.
3466 * We need to try and force an autonegotiation
3467 * session, then bring up link.
3468 */
3469 hw->mac.ops.setup_sfp(hw);
3470 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3471 schedule_work(&adapter->multispeed_fiber_task);
3472 } else {
3473 /*
3474 * Direct Attach Cu and non-multispeed fiber modules
3475 * still need to be configured properly prior to
3476 * attempting link.
3477 */
3478 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3479 schedule_work(&adapter->sfp_config_module_task);
3480 }
3481}
3482
3483/**
3484 * ixgbe_non_sfp_link_config - set up non-SFP+ link
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003485 * @hw: pointer to private hardware struct
3486 *
3487 * Returns 0 on success, negative on failure
3488 **/
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003489static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003490{
3491 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00003492 bool negotiation, link_up = false;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003493 u32 ret = IXGBE_ERR_LINK_SETUP;
3494
3495 if (hw->mac.ops.check_link)
3496 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3497
3498 if (ret)
3499 goto link_cfg_out;
3500
3501 if (hw->mac.ops.get_link_capabilities)
Joe Perchese8e9f692010-09-07 21:34:53 +00003502 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3503 &negotiation);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003504 if (ret)
3505 goto link_cfg_out;
3506
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00003507 if (hw->mac.ops.setup_link)
3508 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003509link_cfg_out:
3510 return ret;
3511}
3512
Alexander Duycka34bcff2010-08-19 13:39:20 +00003513static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003514{
Auke Kok9a799d72007-09-15 14:07:45 -07003515 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003516 u32 gpie = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003517
Jesse Brandeburg9b471442009-12-03 11:33:54 +00003518 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Alexander Duycka34bcff2010-08-19 13:39:20 +00003519 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3520 IXGBE_GPIE_OCD;
3521 gpie |= IXGBE_GPIE_EIAME;
Jesse Brandeburg9b471442009-12-03 11:33:54 +00003522 /*
3523 * use EIAM to auto-mask when MSI-X interrupt is asserted
3524 * this saves a register write for every interrupt
3525 */
3526 switch (hw->mac.type) {
3527 case ixgbe_mac_82598EB:
3528 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3529 break;
3530 default:
3531 case ixgbe_mac_82599EB:
3532 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3533 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3534 break;
3535 }
3536 } else {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003537 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3538 * specifically only auto mask tx and rx interrupts */
3539 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07003540 }
3541
Alexander Duycka34bcff2010-08-19 13:39:20 +00003542 /* XXX: to interrupt immediately for EICS writes, enable this */
3543 /* gpie |= IXGBE_GPIE_EIMEN; */
3544
3545 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3546 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3547 gpie |= IXGBE_GPIE_VTMODE_64;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07003548 }
3549
Alexander Duycka34bcff2010-08-19 13:39:20 +00003550 /* Enable fan failure interrupt */
3551 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07003552 gpie |= IXGBE_SDP1_GPIEN;
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07003553
Alexander Duycka34bcff2010-08-19 13:39:20 +00003554 if (hw->mac.type == ixgbe_mac_82599EB)
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003555 gpie |= IXGBE_SDP1_GPIEN;
3556 gpie |= IXGBE_SDP2_GPIEN;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003557
3558 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3559}
3560
3561static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3562{
3563 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003564 int err;
Alexander Duycka34bcff2010-08-19 13:39:20 +00003565 u32 ctrl_ext;
3566
3567 ixgbe_get_hw_control(adapter);
3568 ixgbe_setup_gpie(adapter);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003569
Auke Kok9a799d72007-09-15 14:07:45 -07003570 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3571 ixgbe_configure_msix(adapter);
3572 else
3573 ixgbe_configure_msi_and_legacy(adapter);
3574
Peter Waskiewicz61fac742010-04-27 00:38:15 +00003575 /* enable the optics */
3576 if (hw->phy.multispeed_fiber)
3577 hw->mac.ops.enable_tx_laser(hw);
3578
Auke Kok9a799d72007-09-15 14:07:45 -07003579 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003580 ixgbe_napi_enable_all(adapter);
3581
3582 /* clear any pending interrupts, may auto mask */
3583 IXGBE_READ_REG(hw, IXGBE_EICR);
Emil Tantilov6af3b9e2010-09-29 21:35:23 +00003584 ixgbe_irq_enable(adapter, true, true);
Auke Kok9a799d72007-09-15 14:07:45 -07003585
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003586 /*
Don Skidmorebf069c92009-05-07 10:39:54 +00003587 * If this adapter has a fan, check to see if we had a failure
3588 * before we enabled the interrupt.
3589 */
3590 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3591 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3592 if (esdp & IXGBE_ESDP_SDP1)
Emil Tantilov396e7992010-07-01 20:05:12 +00003593 e_crit(drv, "Fan has stopped, replace the adapter\n");
Don Skidmorebf069c92009-05-07 10:39:54 +00003594 }
3595
3596 /*
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003597 * For hot-pluggable SFP+ devices, a new SFP+ module may have
Don Skidmore19343de2009-07-02 12:50:31 +00003598 * arrived before interrupts were enabled but after probe. Such
3599 * devices wouldn't have their type identified yet. We need to
3600 * kick off the SFP+ module setup first, then try to bring up link.
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003601 * If we're not hot-pluggable SFP+, we just need to configure link
3602 * and bring it up.
3603 */
Don Skidmore19343de2009-07-02 12:50:31 +00003604 if (hw->phy.type == ixgbe_phy_unknown) {
3605 err = hw->phy.ops.identify(hw);
3606 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore5da43c12009-07-02 12:50:52 +00003607 /*
3608 * Take the device down and schedule the sfp tasklet
3609 * which will unregister_netdev and log it.
3610 */
Don Skidmore19343de2009-07-02 12:50:31 +00003611 ixgbe_down(adapter);
Don Skidmore5da43c12009-07-02 12:50:52 +00003612 schedule_work(&adapter->sfp_config_module_task);
Don Skidmore19343de2009-07-02 12:50:31 +00003613 return err;
3614 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003615 }
3616
3617 if (ixgbe_is_sfp(hw)) {
3618 ixgbe_sfp_link_config(adapter);
3619 } else {
3620 err = ixgbe_non_sfp_link_config(hw);
3621 if (err)
Emil Tantilov396e7992010-07-01 20:05:12 +00003622 e_err(probe, "link_config FAILED %d\n", err);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003623 }
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08003624
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08003625 /* enable transmits */
Alexander Duyck477de6e2010-08-19 13:38:11 +00003626 netif_tx_start_all_queues(adapter->netdev);
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08003627
Auke Kok9a799d72007-09-15 14:07:45 -07003628 /* bring the link up in the watchdog, this could race with our first
3629 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003630 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3631 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07003632 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rosec9205692010-01-22 22:46:22 +00003633
3634 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3635 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3636 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3637 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3638
Auke Kok9a799d72007-09-15 14:07:45 -07003639 return 0;
3640}
3641
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003642void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3643{
3644 WARN_ON(in_interrupt());
3645 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3646 msleep(1);
3647 ixgbe_down(adapter);
Greg Rose5809a1a2010-03-24 09:36:08 +00003648 /*
3649 * If SR-IOV enabled then wait a bit before bringing the adapter
3650 * back up to give the VFs time to respond to the reset. The
3651 * two second wait is based upon the watchdog timer cycle in
3652 * the VF driver.
3653 */
3654 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3655 msleep(2000);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003656 ixgbe_up(adapter);
3657 clear_bit(__IXGBE_RESETTING, &adapter->state);
3658}
3659
Auke Kok9a799d72007-09-15 14:07:45 -07003660int ixgbe_up(struct ixgbe_adapter *adapter)
3661{
3662 /* hardware has been reset, we need to reload some things */
3663 ixgbe_configure(adapter);
3664
3665 return ixgbe_up_complete(adapter);
3666}
3667
3668void ixgbe_reset(struct ixgbe_adapter *adapter)
3669{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003670 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore8ca783a2009-05-26 20:40:47 -07003671 int err;
3672
3673 err = hw->mac.ops.init_hw(hw);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003674 switch (err) {
3675 case 0:
3676 case IXGBE_ERR_SFP_NOT_PRESENT:
3677 break;
3678 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
Emil Tantilov849c4542010-06-03 16:53:41 +00003679 e_dev_err("master disable timed out\n");
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003680 break;
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00003681 case IXGBE_ERR_EEPROM_VERSION:
3682 /* We are running on a pre-production device, log a warning */
Emil Tantilov849c4542010-06-03 16:53:41 +00003683 e_dev_warn("This device is a pre-production adapter/LOM. "
3684 "Please be aware there may be issuesassociated with "
3685 "your hardware. If you are experiencing problems "
3686 "please contact your Intel or hardware "
3687 "representative who provided you with this "
3688 "hardware.\n");
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00003689 break;
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003690 default:
Emil Tantilov849c4542010-06-03 16:53:41 +00003691 e_dev_err("Hardware Error: %d\n", err);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00003692 }
Auke Kok9a799d72007-09-15 14:07:45 -07003693
3694 /* reprogram the RAR[0] in case user changed it. */
Greg Rose1cdd1ec2010-01-09 02:26:46 +00003695 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3696 IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07003697}
3698
Auke Kok9a799d72007-09-15 14:07:45 -07003699/**
3700 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
Auke Kok9a799d72007-09-15 14:07:45 -07003701 * @rx_ring: ring to free buffers from
3702 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003703static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003704{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003705 struct device *dev = rx_ring->dev;
Auke Kok9a799d72007-09-15 14:07:45 -07003706 unsigned long size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003707 u16 i;
Auke Kok9a799d72007-09-15 14:07:45 -07003708
Alexander Duyck84418e32010-08-19 13:40:54 +00003709 /* ring already cleared, nothing to do */
3710 if (!rx_ring->rx_buffer_info)
3711 return;
Auke Kok9a799d72007-09-15 14:07:45 -07003712
Alexander Duyck84418e32010-08-19 13:40:54 +00003713 /* Free all the Rx ring sk_buffs */
Auke Kok9a799d72007-09-15 14:07:45 -07003714 for (i = 0; i < rx_ring->count; i++) {
3715 struct ixgbe_rx_buffer *rx_buffer_info;
3716
3717 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3718 if (rx_buffer_info->dma) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003719 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
Joe Perchese8e9f692010-09-07 21:34:53 +00003720 rx_ring->rx_buf_len,
Nick Nunley1b507732010-04-27 13:10:27 +00003721 DMA_FROM_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07003722 rx_buffer_info->dma = 0;
3723 }
3724 if (rx_buffer_info->skb) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00003725 struct sk_buff *skb = rx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07003726 rx_buffer_info->skb = NULL;
Alexander Duyckf8212f92009-04-27 22:42:37 +00003727 do {
3728 struct sk_buff *this = skb;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00003729 if (IXGBE_RSC_CB(this)->delay_unmap) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003730 dma_unmap_single(dev,
Nick Nunley1b507732010-04-27 13:10:27 +00003731 IXGBE_RSC_CB(this)->dma,
Joe Perchese8e9f692010-09-07 21:34:53 +00003732 rx_ring->rx_buf_len,
Nick Nunley1b507732010-04-27 13:10:27 +00003733 DMA_FROM_DEVICE);
Mallikarjuna R Chilakalafd3686a2010-03-19 04:41:33 +00003734 IXGBE_RSC_CB(this)->dma = 0;
Mallikarjuna R Chilakalae8171aa2010-05-13 17:33:21 +00003735 IXGBE_RSC_CB(skb)->delay_unmap = false;
Mallikarjuna R Chilakalafd3686a2010-03-19 04:41:33 +00003736 }
Alexander Duyckf8212f92009-04-27 22:42:37 +00003737 skb = skb->prev;
3738 dev_kfree_skb(this);
3739 } while (skb);
Auke Kok9a799d72007-09-15 14:07:45 -07003740 }
3741 if (!rx_buffer_info->page)
3742 continue;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00003743 if (rx_buffer_info->page_dma) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003744 dma_unmap_page(dev, rx_buffer_info->page_dma,
Nick Nunley1b507732010-04-27 13:10:27 +00003745 PAGE_SIZE / 2, DMA_FROM_DEVICE);
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00003746 rx_buffer_info->page_dma = 0;
3747 }
Auke Kok9a799d72007-09-15 14:07:45 -07003748 put_page(rx_buffer_info->page);
3749 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07003750 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003751 }
3752
3753 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3754 memset(rx_ring->rx_buffer_info, 0, size);
3755
3756 /* Zero out the descriptor ring */
3757 memset(rx_ring->desc, 0, rx_ring->size);
3758
3759 rx_ring->next_to_clean = 0;
3760 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003761}
3762
3763/**
3764 * ixgbe_clean_tx_ring - Free Tx Buffers
Auke Kok9a799d72007-09-15 14:07:45 -07003765 * @tx_ring: ring to be cleaned
3766 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003767static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003768{
3769 struct ixgbe_tx_buffer *tx_buffer_info;
3770 unsigned long size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003771 u16 i;
Auke Kok9a799d72007-09-15 14:07:45 -07003772
Alexander Duyck84418e32010-08-19 13:40:54 +00003773 /* ring already cleared, nothing to do */
3774 if (!tx_ring->tx_buffer_info)
3775 return;
Auke Kok9a799d72007-09-15 14:07:45 -07003776
Alexander Duyck84418e32010-08-19 13:40:54 +00003777 /* Free all the Tx ring sk_buffs */
Auke Kok9a799d72007-09-15 14:07:45 -07003778 for (i = 0; i < tx_ring->count; i++) {
3779 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003780 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -07003781 }
3782
3783 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3784 memset(tx_ring->tx_buffer_info, 0, size);
3785
3786 /* Zero out the descriptor ring */
3787 memset(tx_ring->desc, 0, tx_ring->size);
3788
3789 tx_ring->next_to_use = 0;
3790 tx_ring->next_to_clean = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003791}
3792
3793/**
Auke Kok9a799d72007-09-15 14:07:45 -07003794 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
3795 * @adapter: board private structure
3796 **/
3797static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3798{
3799 int i;
3800
3801 for (i = 0; i < adapter->num_rx_queues; i++)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003802 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07003803}
3804
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003805/**
3806 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
3807 * @adapter: board private structure
3808 **/
3809static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3810{
3811 int i;
3812
3813 for (i = 0; i < adapter->num_tx_queues; i++)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08003814 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003815}
3816
Auke Kok9a799d72007-09-15 14:07:45 -07003817void ixgbe_down(struct ixgbe_adapter *adapter)
3818{
3819 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003820 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003821 u32 rxctrl;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003822 u32 txdctl;
3823 int i, j;
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00003824 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Auke Kok9a799d72007-09-15 14:07:45 -07003825
3826 /* signal that we are down to the interrupt handler */
3827 set_bit(__IXGBE_DOWN, &adapter->state);
3828
Greg Rose767081a2010-01-22 22:46:40 +00003829 /* disable receive for all VFs and wait one second */
3830 if (adapter->num_vfs) {
Greg Rose767081a2010-01-22 22:46:40 +00003831 /* ping all the active vfs to let them know we are going down */
3832 ixgbe_ping_all_vfs(adapter);
Greg Rose581d1aa2010-03-24 09:36:27 +00003833
Greg Rose767081a2010-01-22 22:46:40 +00003834 /* Disable all VFTE/VFRE TX/RX */
3835 ixgbe_disable_tx_rx(adapter);
Greg Rose581d1aa2010-03-24 09:36:27 +00003836
3837 /* Mark all the VFs as inactive */
3838 for (i = 0 ; i < adapter->num_vfs; i++)
3839 adapter->vfinfo[i].clear_to_send = 0;
Greg Rose767081a2010-01-22 22:46:40 +00003840 }
3841
Auke Kok9a799d72007-09-15 14:07:45 -07003842 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003843 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3844 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07003845
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003846 IXGBE_WRITE_FLUSH(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07003847 msleep(10);
3848
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003849 netif_tx_stop_all_queues(netdev);
3850
Don Skidmore0a1f87c2009-09-18 09:45:43 +00003851 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3852 del_timer_sync(&adapter->sfp_timer);
Auke Kok9a799d72007-09-15 14:07:45 -07003853 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003854 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07003855
John Fastabendc0dfb902010-04-27 02:13:39 +00003856 netif_carrier_off(netdev);
3857 netif_tx_disable(netdev);
3858
3859 ixgbe_irq_disable(adapter);
3860
3861 ixgbe_napi_disable_all(adapter);
3862
Peter Waskiewiczb25ebfd2010-10-05 01:27:49 +00003863 /* Cleanup the affinity_hint CPU mask memory and callback */
3864 for (i = 0; i < num_q_vectors; i++) {
3865 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
3866 /* clear the affinity_mask in the IRQ descriptor */
3867 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
3868 /* release the CPU mask memory */
3869 free_cpumask_var(q_vector->affinity_mask);
3870 }
3871
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003872 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3873 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3874 cancel_work_sync(&adapter->fdir_reinit_task);
3875
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07003876 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3877 cancel_work_sync(&adapter->check_overtemp_task);
3878
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003879 /* disable transmits in the hardware now that interrupts are off */
3880 for (i = 0; i < adapter->num_tx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00003881 j = adapter->tx_ring[i]->reg_idx;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003882 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3883 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
Joe Perchese8e9f692010-09-07 21:34:53 +00003884 (txdctl & ~IXGBE_TXDCTL_ENABLE));
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003885 }
PJ Waskiewicz88512532009-03-13 22:15:10 +00003886 /* Disable the Tx DMA engine on 82599 */
3887 if (hw->mac.type == ixgbe_mac_82599EB)
3888 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
Joe Perchese8e9f692010-09-07 21:34:53 +00003889 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3890 ~IXGBE_DMATXCTL_TE));
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003891
John Fastabend9f756f02010-06-29 18:28:36 +00003892 /* power down the optics */
3893 if (hw->phy.multispeed_fiber)
3894 hw->mac.ops.disable_tx_laser(hw);
3895
Peter Waskiewicz9a713e72010-02-10 16:07:54 +00003896 /* clear n-tuple filters that are cached */
3897 ethtool_ntuple_flush(netdev);
3898
Paul Larson6f4a0e42008-06-24 17:00:56 -07003899 if (!pci_channel_offline(adapter->pdev))
3900 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003901 ixgbe_clean_all_tx_rings(adapter);
3902 ixgbe_clean_all_rx_rings(adapter);
3903
Jeff Garzik5dd2d332008-10-16 05:09:31 -04003904#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07003905 /* since we reset the hardware DCA settings were cleared */
Alexander Duycke35ec122009-05-21 13:07:12 +00003906 ixgbe_setup_dca(adapter);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07003907#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003908}
3909
Auke Kok9a799d72007-09-15 14:07:45 -07003910/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003911 * ixgbe_poll - NAPI Rx polling callback
3912 * @napi: structure for representing this polling device
3913 * @budget: how many packets driver is allowed to clean
3914 *
3915 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07003916 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003917static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07003918{
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003919 struct ixgbe_q_vector *q_vector =
Joe Perchese8e9f692010-09-07 21:34:53 +00003920 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003921 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003922 int tx_clean_complete, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003923
Jeff Garzik5dd2d332008-10-16 05:09:31 -04003924#ifdef CONFIG_IXGBE_DCA
Alexander Duyck33cf09c2010-11-16 19:26:55 -08003925 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3926 ixgbe_update_dca(q_vector);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003927#endif
3928
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00003929 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3930 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07003931
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003932 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003933 work_done = budget;
3934
David S. Miller53e52c72008-01-07 21:06:12 -08003935 /* If budget not fully consumed, exit the polling mode */
3936 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08003937 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003938 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08003939 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003940 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00003941 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07003942 }
Auke Kok9a799d72007-09-15 14:07:45 -07003943 return work_done;
3944}
3945
3946/**
3947 * ixgbe_tx_timeout - Respond to a Tx Hang
3948 * @netdev: network interface device structure
3949 **/
3950static void ixgbe_tx_timeout(struct net_device *netdev)
3951{
3952 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3953
3954 /* Do the reset outside of interrupt context */
3955 schedule_work(&adapter->reset_task);
3956}
3957
3958static void ixgbe_reset_task(struct work_struct *work)
3959{
3960 struct ixgbe_adapter *adapter;
3961 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3962
Alexander Duyck2f90b862008-11-20 20:52:10 -08003963 /* If we're already down or resetting, just bail */
3964 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3965 test_bit(__IXGBE_RESETTING, &adapter->state))
3966 return;
3967
Auke Kok9a799d72007-09-15 14:07:45 -07003968 adapter->tx_timeout_count++;
3969
Taku Izumidcd79ae2010-04-27 14:39:53 +00003970 ixgbe_dump(adapter);
3971 netdev_err(adapter->netdev, "Reset adapter\n");
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003972 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003973}
3974
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003975#ifdef CONFIG_IXGBE_DCB
3976static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003977{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003978 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003979 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003980
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003981 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3982 return ret;
3983
3984 f->mask = 0x7 << 3;
3985 adapter->num_rx_queues = f->indices;
3986 adapter->num_tx_queues = f->indices;
3987 ret = true;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003988
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003989 return ret;
3990}
3991#endif
3992
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003993/**
3994 * ixgbe_set_rss_queues: Allocate queues for RSS
3995 * @adapter: board private structure to initialize
3996 *
3997 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3998 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3999 *
4000 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004001static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
4002{
4003 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00004004 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004005
4006 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00004007 f->mask = 0xF;
4008 adapter->num_rx_queues = f->indices;
4009 adapter->num_tx_queues = f->indices;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004010 ret = true;
4011 } else {
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004012 ret = false;
4013 }
4014
4015 return ret;
4016}
4017
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004018/**
4019 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
4020 * @adapter: board private structure to initialize
4021 *
4022 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
4023 * to the original CPU that initiated the Tx session. This runs in addition
4024 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
4025 * Rx load across CPUs using RSS.
4026 *
4027 **/
Joe Perchese8e9f692010-09-07 21:34:53 +00004028static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004029{
4030 bool ret = false;
4031 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
4032
4033 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
4034 f_fdir->mask = 0;
4035
4036 /* Flow Director must have RSS enabled */
4037 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4038 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4039 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
4040 adapter->num_tx_queues = f_fdir->indices;
4041 adapter->num_rx_queues = f_fdir->indices;
4042 ret = true;
4043 } else {
4044 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4045 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4046 }
4047 return ret;
4048}
4049
Yi Zou0331a832009-05-17 12:33:52 +00004050#ifdef IXGBE_FCOE
4051/**
4052 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4053 * @adapter: board private structure to initialize
4054 *
4055 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4056 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4057 * rx queues out of the max number of rx queues, instead, it is used as the
4058 * index of the first rx queue used by FCoE.
4059 *
4060 **/
4061static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4062{
4063 bool ret = false;
4064 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4065
4066 f->indices = min((int)num_online_cpus(), f->indices);
4067 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00004068 adapter->num_rx_queues = 1;
4069 adapter->num_tx_queues = 1;
Yi Zou0331a832009-05-17 12:33:52 +00004070#ifdef CONFIG_IXGBE_DCB
4071 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Emil Tantilov396e7992010-07-01 20:05:12 +00004072 e_info(probe, "FCoE enabled with DCB\n");
Yi Zou0331a832009-05-17 12:33:52 +00004073 ixgbe_set_dcb_queues(adapter);
4074 }
4075#endif
4076 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Emil Tantilov396e7992010-07-01 20:05:12 +00004077 e_info(probe, "FCoE enabled with RSS\n");
Yi Zou8faa2a72009-07-09 02:29:50 +00004078 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4079 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4080 ixgbe_set_fdir_queues(adapter);
4081 else
4082 ixgbe_set_rss_queues(adapter);
Yi Zou0331a832009-05-17 12:33:52 +00004083 }
4084 /* adding FCoE rx rings to the end */
4085 f->mask = adapter->num_rx_queues;
4086 adapter->num_rx_queues += f->indices;
Yi Zou8de8b2e2009-09-03 14:55:50 +00004087 adapter->num_tx_queues += f->indices;
Yi Zou0331a832009-05-17 12:33:52 +00004088
4089 ret = true;
4090 }
4091
4092 return ret;
4093}
4094
4095#endif /* IXGBE_FCOE */
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004096/**
4097 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4098 * @adapter: board private structure to initialize
4099 *
4100 * IOV doesn't actually use anything, so just NAK the
4101 * request for now and let the other queue routines
4102 * figure out what to do.
4103 */
4104static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4105{
4106 return false;
4107}
4108
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004109/*
4110 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
4111 * @adapter: board private structure to initialize
4112 *
4113 * This is the top level queue allocation routine. The order here is very
4114 * important, starting with the "most" number of features turned on at once,
4115 * and ending with the smallest set of features. This way large combinations
4116 * can be allocated if they're turned on, and smaller combinations are the
4117 * fallthrough conditions.
4118 *
4119 **/
Ben Hutchings847f53f2010-09-27 08:28:56 +00004120static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004121{
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004122 /* Start with base case */
4123 adapter->num_rx_queues = 1;
4124 adapter->num_tx_queues = 1;
4125 adapter->num_rx_pools = adapter->num_rx_queues;
4126 adapter->num_rx_queues_per_pool = 1;
4127
4128 if (ixgbe_set_sriov_queues(adapter))
Ben Hutchings847f53f2010-09-27 08:28:56 +00004129 goto done;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004130
Yi Zou0331a832009-05-17 12:33:52 +00004131#ifdef IXGBE_FCOE
4132 if (ixgbe_set_fcoe_queues(adapter))
4133 goto done;
4134
4135#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004136#ifdef CONFIG_IXGBE_DCB
4137 if (ixgbe_set_dcb_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07004138 goto done;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004139
4140#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004141 if (ixgbe_set_fdir_queues(adapter))
4142 goto done;
4143
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004144 if (ixgbe_set_rss_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07004145 goto done;
4146
4147 /* fallback to base case */
4148 adapter->num_rx_queues = 1;
4149 adapter->num_tx_queues = 1;
4150
4151done:
Ben Hutchings847f53f2010-09-27 08:28:56 +00004152 /* Notify the stack of the (possibly) reduced queue counts. */
John Fastabendf0796d52010-07-01 13:21:57 +00004153 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
Ben Hutchings847f53f2010-09-27 08:28:56 +00004154 return netif_set_real_num_rx_queues(adapter->netdev,
4155 adapter->num_rx_queues);
Jesse Brandeburgb9804972008-09-11 20:00:29 -07004156}
4157
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004158static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00004159 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004160{
4161 int err, vector_threshold;
4162
4163 /* We'll want at least 3 (vector_threshold):
4164 * 1) TxQ[0] Cleanup
4165 * 2) RxQ[0] Cleanup
4166 * 3) Other (Link Status Change, etc.)
4167 * 4) TCP Timer (optional)
4168 */
4169 vector_threshold = MIN_MSIX_COUNT;
4170
4171 /* The more we get, the more we will assign to Tx/Rx Cleanup
4172 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4173 * Right now, we simply care about how many we'll get; we'll
4174 * set them up later while requesting irq's.
4175 */
4176 while (vectors >= vector_threshold) {
4177 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Joe Perchese8e9f692010-09-07 21:34:53 +00004178 vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004179 if (!err) /* Success in acquiring all requested vectors. */
4180 break;
4181 else if (err < 0)
4182 vectors = 0; /* Nasty failure, quit now */
4183 else /* err == number of vectors we should try again with */
4184 vectors = err;
4185 }
4186
4187 if (vectors < vector_threshold) {
4188 /* Can't allocate enough MSI-X interrupts? Oh well.
4189 * This just means we'll go with either a single MSI
4190 * vector or fall back to legacy interrupts.
4191 */
Emil Tantilov849c4542010-06-03 16:53:41 +00004192 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4193 "Unable to allocate MSI-X interrupts\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004194 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4195 kfree(adapter->msix_entries);
4196 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004197 } else {
4198 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
Peter P Waskiewicz Jreb7f1392009-02-01 01:18:58 -08004199 /*
4200 * Adjust for only the vectors we'll use, which is minimum
4201 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4202 * vectors we were allocated.
4203 */
4204 adapter->num_msix_vectors = min(vectors,
Joe Perchese8e9f692010-09-07 21:34:53 +00004205 adapter->max_msix_q_vectors + NON_Q_VECTORS);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004206 }
4207}
4208
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004209/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004210 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004211 * @adapter: board private structure to initialize
4212 *
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004213 * Cache the descriptor ring offsets for RSS to the assigned rings.
4214 *
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004215 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004216static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004217{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004218 int i;
4219 bool ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004220
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004221 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4222 for (i = 0; i < adapter->num_rx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004223 adapter->rx_ring[i]->reg_idx = i;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004224 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004225 adapter->tx_ring[i]->reg_idx = i;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004226 ret = true;
4227 } else {
4228 ret = false;
4229 }
4230
4231 return ret;
4232}
4233
4234#ifdef CONFIG_IXGBE_DCB
4235/**
4236 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4237 * @adapter: board private structure to initialize
4238 *
4239 * Cache the descriptor ring offsets for DCB to the assigned rings.
4240 *
4241 **/
4242static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4243{
4244 int i;
4245 bool ret = false;
4246 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4247
4248 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4249 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08004250 /* the number of queues is assumed to be symmetric */
4251 for (i = 0; i < dcb_i; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004252 adapter->rx_ring[i]->reg_idx = i << 3;
4253 adapter->tx_ring[i]->reg_idx = i << 2;
Alexander Duyck2f90b862008-11-20 20:52:10 -08004254 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004255 ret = true;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004256 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00004257 if (dcb_i == 8) {
4258 /*
4259 * Tx TC0 starts at: descriptor queue 0
4260 * Tx TC1 starts at: descriptor queue 32
4261 * Tx TC2 starts at: descriptor queue 64
4262 * Tx TC3 starts at: descriptor queue 80
4263 * Tx TC4 starts at: descriptor queue 96
4264 * Tx TC5 starts at: descriptor queue 104
4265 * Tx TC6 starts at: descriptor queue 112
4266 * Tx TC7 starts at: descriptor queue 120
4267 *
4268 * Rx TC0-TC7 are offset by 16 queues each
4269 */
4270 for (i = 0; i < 3; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004271 adapter->tx_ring[i]->reg_idx = i << 5;
4272 adapter->rx_ring[i]->reg_idx = i << 4;
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00004273 }
4274 for ( ; i < 5; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004275 adapter->tx_ring[i]->reg_idx =
Joe Perchese8e9f692010-09-07 21:34:53 +00004276 ((i + 2) << 4);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004277 adapter->rx_ring[i]->reg_idx = i << 4;
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00004278 }
4279 for ( ; i < dcb_i; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004280 adapter->tx_ring[i]->reg_idx =
Joe Perchese8e9f692010-09-07 21:34:53 +00004281 ((i + 8) << 3);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004282 adapter->rx_ring[i]->reg_idx = i << 4;
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00004283 }
4284
4285 ret = true;
4286 } else if (dcb_i == 4) {
4287 /*
4288 * Tx TC0 starts at: descriptor queue 0
4289 * Tx TC1 starts at: descriptor queue 64
4290 * Tx TC2 starts at: descriptor queue 96
4291 * Tx TC3 starts at: descriptor queue 112
4292 *
4293 * Rx TC0-TC3 are offset by 32 queues each
4294 */
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004295 adapter->tx_ring[0]->reg_idx = 0;
4296 adapter->tx_ring[1]->reg_idx = 64;
4297 adapter->tx_ring[2]->reg_idx = 96;
4298 adapter->tx_ring[3]->reg_idx = 112;
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00004299 for (i = 0 ; i < dcb_i; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004300 adapter->rx_ring[i]->reg_idx = i << 5;
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00004301
4302 ret = true;
4303 } else {
4304 ret = false;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004305 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004306 } else {
4307 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004308 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004309 } else {
4310 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004311 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004312
4313 return ret;
4314}
4315#endif
4316
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004317/**
4318 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4319 * @adapter: board private structure to initialize
4320 *
4321 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4322 *
4323 **/
Joe Perchese8e9f692010-09-07 21:34:53 +00004324static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004325{
4326 int i;
4327 bool ret = false;
4328
4329 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4330 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4331 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
4332 for (i = 0; i < adapter->num_rx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004333 adapter->rx_ring[i]->reg_idx = i;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004334 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004335 adapter->tx_ring[i]->reg_idx = i;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004336 ret = true;
4337 }
4338
4339 return ret;
4340}
4341
Yi Zou0331a832009-05-17 12:33:52 +00004342#ifdef IXGBE_FCOE
4343/**
4344 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4345 * @adapter: board private structure to initialize
4346 *
4347 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4348 *
4349 */
4350static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4351{
Yi Zou8de8b2e2009-09-03 14:55:50 +00004352 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
Yi Zou0331a832009-05-17 12:33:52 +00004353 bool ret = false;
4354 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4355
4356 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4357#ifdef CONFIG_IXGBE_DCB
4358 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00004359 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4360
Yi Zou0331a832009-05-17 12:33:52 +00004361 ixgbe_cache_ring_dcb(adapter);
Yi Zou8de8b2e2009-09-03 14:55:50 +00004362 /* find out queues in TC for FCoE */
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004363 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4364 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
Yi Zou8de8b2e2009-09-03 14:55:50 +00004365 /*
4366 * In 82599, the number of Tx queues for each traffic
4367 * class for both 8-TC and 4-TC modes are:
4368 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4369 * 8 TCs: 32 32 16 16 8 8 8 8
4370 * 4 TCs: 64 64 32 32
4371 * We have max 8 queues for FCoE, where 8 the is
4372 * FCoE redirection table size. If TC for FCoE is
4373 * less than or equal to TC3, we have enough queues
4374 * to add max of 8 queues for FCoE, so we start FCoE
4375 * tx descriptor from the next one, i.e., reg_idx + 1.
4376 * If TC for FCoE is above TC3, implying 8 TC mode,
4377 * and we need 8 for FCoE, we have to take all queues
4378 * in that traffic class for FCoE.
4379 */
4380 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4381 fcoe_tx_i--;
Yi Zou0331a832009-05-17 12:33:52 +00004382 }
4383#endif /* CONFIG_IXGBE_DCB */
4384 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Yi Zou8faa2a72009-07-09 02:29:50 +00004385 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4386 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4387 ixgbe_cache_ring_fdir(adapter);
4388 else
4389 ixgbe_cache_ring_rss(adapter);
4390
Yi Zou8de8b2e2009-09-03 14:55:50 +00004391 fcoe_rx_i = f->mask;
4392 fcoe_tx_i = f->mask;
Yi Zou0331a832009-05-17 12:33:52 +00004393 }
Yi Zou8de8b2e2009-09-03 14:55:50 +00004394 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004395 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4396 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
Yi Zou8de8b2e2009-09-03 14:55:50 +00004397 }
Yi Zou0331a832009-05-17 12:33:52 +00004398 ret = true;
4399 }
4400 return ret;
4401}
4402
4403#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004404/**
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004405 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4406 * @adapter: board private structure to initialize
4407 *
4408 * SR-IOV doesn't use any descriptor rings but changes the default if
4409 * no other mapping is used.
4410 *
4411 */
4412static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4413{
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004414 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4415 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004416 if (adapter->num_vfs)
4417 return true;
4418 else
4419 return false;
4420}
4421
4422/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004423 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4424 * @adapter: board private structure to initialize
4425 *
4426 * Once we know the feature-set enabled for the device, we'll cache
4427 * the register offset the descriptor ring is assigned to.
4428 *
4429 * Note, the order the various feature calls is important. It must start with
4430 * the "most" features enabled at the same time, then trickle down to the
4431 * least amount of features turned on at once.
4432 **/
4433static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4434{
4435 /* start with default case */
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004436 adapter->rx_ring[0]->reg_idx = 0;
4437 adapter->tx_ring[0]->reg_idx = 0;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004438
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004439 if (ixgbe_cache_ring_sriov(adapter))
4440 return;
4441
Yi Zou0331a832009-05-17 12:33:52 +00004442#ifdef IXGBE_FCOE
4443 if (ixgbe_cache_ring_fcoe(adapter))
4444 return;
4445
4446#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004447#ifdef CONFIG_IXGBE_DCB
4448 if (ixgbe_cache_ring_dcb(adapter))
4449 return;
4450
4451#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004452 if (ixgbe_cache_ring_fdir(adapter))
4453 return;
4454
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08004455 if (ixgbe_cache_ring_rss(adapter))
4456 return;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004457}
4458
Auke Kok9a799d72007-09-15 14:07:45 -07004459/**
4460 * ixgbe_alloc_queues - Allocate memory for all rings
4461 * @adapter: board private structure to initialize
4462 *
4463 * We allocate one ring per queue at run-time since we don't know the
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004464 * number of queues at compile-time. The polling_netdev array is
4465 * intended for Multiqueue, but should work fine with a single queue.
Auke Kok9a799d72007-09-15 14:07:45 -07004466 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08004467static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07004468{
4469 int i;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004470 int rx_count;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004471 int orig_node = adapter->node;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004472
4473 for (i = 0; i < adapter->num_tx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004474 struct ixgbe_ring *ring = adapter->tx_ring[i];
4475 if (orig_node == -1) {
4476 int cur_node = next_online_node(adapter->node);
4477 if (cur_node == MAX_NUMNODES)
4478 cur_node = first_online_node;
4479 adapter->node = cur_node;
4480 }
4481 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
Joe Perchese8e9f692010-09-07 21:34:53 +00004482 adapter->node);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004483 if (!ring)
4484 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4485 if (!ring)
4486 goto err_tx_ring_allocation;
4487 ring->count = adapter->tx_ring_count;
4488 ring->queue_index = i;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004489 ring->dev = &adapter->pdev->dev;
Alexander Duyckfc77dc32010-11-16 19:26:51 -08004490 ring->netdev = adapter->netdev;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004491 ring->numa_node = adapter->node;
4492
4493 adapter->tx_ring[i] = ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004494 }
Jesse Brandeburgb9804972008-09-11 20:00:29 -07004495
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004496 /* Restore the adapter's original node */
4497 adapter->node = orig_node;
4498
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004499 rx_count = adapter->rx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004500 for (i = 0; i < adapter->num_rx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004501 struct ixgbe_ring *ring = adapter->rx_ring[i];
4502 if (orig_node == -1) {
4503 int cur_node = next_online_node(adapter->node);
4504 if (cur_node == MAX_NUMNODES)
4505 cur_node = first_online_node;
4506 adapter->node = cur_node;
4507 }
4508 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
Joe Perchese8e9f692010-09-07 21:34:53 +00004509 adapter->node);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004510 if (!ring)
4511 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4512 if (!ring)
4513 goto err_rx_ring_allocation;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004514 ring->count = rx_count;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004515 ring->queue_index = i;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004516 ring->dev = &adapter->pdev->dev;
Alexander Duyckfc77dc32010-11-16 19:26:51 -08004517 ring->netdev = adapter->netdev;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004518 ring->numa_node = adapter->node;
4519
4520 adapter->rx_ring[i] = ring;
Auke Kok9a799d72007-09-15 14:07:45 -07004521 }
4522
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004523 /* Restore the adapter's original node */
4524 adapter->node = orig_node;
4525
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004526 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004527
4528 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004529
4530err_rx_ring_allocation:
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004531 for (i = 0; i < adapter->num_tx_queues; i++)
4532 kfree(adapter->tx_ring[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004533err_tx_ring_allocation:
4534 return -ENOMEM;
4535}
4536
4537/**
4538 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4539 * @adapter: board private structure to initialize
4540 *
4541 * Attempt to configure the interrupts using the best available
4542 * capabilities of the hardware and the kernel.
4543 **/
Al Virofeea6a52008-11-27 15:34:07 -08004544static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004545{
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00004546 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004547 int err = 0;
4548 int vector, v_budget;
4549
4550 /*
4551 * It's easy to be greedy for MSI-X vectors, but it really
4552 * doesn't do us much good if we have a lot more vectors
4553 * than CPU's. So let's be conservative and only ask for
PJ Waskiewicz342bde12009-11-12 23:50:43 +00004554 * (roughly) the same number of vectors as there are CPU's.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004555 */
4556 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
Joe Perchese8e9f692010-09-07 21:34:53 +00004557 (int)num_online_cpus()) + NON_Q_VECTORS;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004558
4559 /*
4560 * At the same time, hardware can only support a maximum of
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00004561 * hw.mac->max_msix_vectors vectors. With features
4562 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4563 * descriptor queues supported by our device. Thus, we cap it off in
4564 * those rare cases where the cpu count also exceeds our vector limit.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004565 */
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00004566 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004567
4568 /* A failure in MSI-X entry allocation isn't fatal, but it does
4569 * mean we disable MSI-X capabilities of the adapter. */
4570 adapter->msix_entries = kcalloc(v_budget,
Joe Perchese8e9f692010-09-07 21:34:53 +00004571 sizeof(struct msix_entry), GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004572 if (adapter->msix_entries) {
4573 for (vector = 0; vector < v_budget; vector++)
4574 adapter->msix_entries[vector].entry = vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004575
Alexander Duyck7a921c92009-05-06 10:43:28 +00004576 ixgbe_acquire_msix_vectors(adapter, v_budget);
4577
4578 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4579 goto out;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004580 }
David S. Miller26d27842010-05-03 15:18:22 -07004581
Alexander Duyck7a921c92009-05-06 10:43:28 +00004582 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4583 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004584 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4585 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4586 adapter->atr_sample_rate = 0;
Greg Rose1cdd1ec2010-01-09 02:26:46 +00004587 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4588 ixgbe_disable_sriov(adapter);
4589
Ben Hutchings847f53f2010-09-27 08:28:56 +00004590 err = ixgbe_set_num_queues(adapter);
4591 if (err)
4592 return err;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004593
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004594 err = pci_enable_msi(adapter->pdev);
4595 if (!err) {
4596 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4597 } else {
Emil Tantilov849c4542010-06-03 16:53:41 +00004598 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4599 "Unable to allocate MSI interrupt, "
4600 "falling back to legacy. Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004601 /* reset err */
4602 err = 0;
4603 }
4604
4605out:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004606 return err;
4607}
4608
Alexander Duyck7a921c92009-05-06 10:43:28 +00004609/**
4610 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4611 * @adapter: board private structure to initialize
4612 *
4613 * We allocate one q_vector per queue interrupt. If allocation fails we
4614 * return -ENOMEM.
4615 **/
4616static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4617{
4618 int q_idx, num_q_vectors;
4619 struct ixgbe_q_vector *q_vector;
4620 int napi_vectors;
4621 int (*poll)(struct napi_struct *, int);
4622
4623 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4624 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4625 napi_vectors = adapter->num_rx_queues;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004626 poll = &ixgbe_clean_rxtx_many;
Alexander Duyck7a921c92009-05-06 10:43:28 +00004627 } else {
4628 num_q_vectors = 1;
4629 napi_vectors = 1;
4630 poll = &ixgbe_poll;
4631 }
4632
4633 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004634 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
Joe Perchese8e9f692010-09-07 21:34:53 +00004635 GFP_KERNEL, adapter->node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004636 if (!q_vector)
4637 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
Joe Perchese8e9f692010-09-07 21:34:53 +00004638 GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004639 if (!q_vector)
4640 goto err_out;
4641 q_vector->adapter = adapter;
Nelson, Shannonf7554a22009-09-18 09:46:06 +00004642 if (q_vector->txr_count && !q_vector->rxr_count)
4643 q_vector->eitr = adapter->tx_eitr_param;
4644 else
4645 q_vector->eitr = adapter->rx_eitr_param;
Alexander Duyckfe49f042009-06-04 16:00:09 +00004646 q_vector->v_idx = q_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004647 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004648 adapter->q_vector[q_idx] = q_vector;
4649 }
4650
4651 return 0;
4652
4653err_out:
4654 while (q_idx) {
4655 q_idx--;
4656 q_vector = adapter->q_vector[q_idx];
4657 netif_napi_del(&q_vector->napi);
4658 kfree(q_vector);
4659 adapter->q_vector[q_idx] = NULL;
4660 }
4661 return -ENOMEM;
4662}
4663
4664/**
4665 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4666 * @adapter: board private structure to initialize
4667 *
4668 * This function frees the memory allocated to the q_vectors. In addition if
4669 * NAPI is enabled it will delete any references to the NAPI struct prior
4670 * to freeing the q_vector.
4671 **/
4672static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4673{
4674 int q_idx, num_q_vectors;
Alexander Duyck7a921c92009-05-06 10:43:28 +00004675
Alexander Duyck91281fd2009-06-04 16:00:27 +00004676 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
Alexander Duyck7a921c92009-05-06 10:43:28 +00004677 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004678 else
Alexander Duyck7a921c92009-05-06 10:43:28 +00004679 num_q_vectors = 1;
Alexander Duyck7a921c92009-05-06 10:43:28 +00004680
4681 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4682 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
Alexander Duyck7a921c92009-05-06 10:43:28 +00004683 adapter->q_vector[q_idx] = NULL;
Alexander Duyck91281fd2009-06-04 16:00:27 +00004684 netif_napi_del(&q_vector->napi);
Alexander Duyck7a921c92009-05-06 10:43:28 +00004685 kfree(q_vector);
4686 }
4687}
4688
Don Skidmore7b25cdb2009-08-25 04:47:32 +00004689static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004690{
4691 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4692 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4693 pci_disable_msix(adapter->pdev);
4694 kfree(adapter->msix_entries);
4695 adapter->msix_entries = NULL;
4696 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4697 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4698 pci_disable_msi(adapter->pdev);
4699 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004700}
4701
4702/**
4703 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4704 * @adapter: board private structure to initialize
4705 *
4706 * We determine which interrupt scheme to use based on...
4707 * - Kernel support (MSI, MSI-X)
4708 * - which can be user-defined (via MODULE_PARAM)
4709 * - Hardware queue count (num_*_queues)
4710 * - defined by miscellaneous hardware support/features (RSS, etc.)
4711 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08004712int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004713{
4714 int err;
4715
4716 /* Number of supported queues */
Ben Hutchings847f53f2010-09-27 08:28:56 +00004717 err = ixgbe_set_num_queues(adapter);
4718 if (err)
4719 return err;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004720
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004721 err = ixgbe_set_interrupt_capability(adapter);
4722 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004723 e_dev_err("Unable to setup interrupt capabilities\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004724 goto err_set_interrupt;
4725 }
4726
Alexander Duyck7a921c92009-05-06 10:43:28 +00004727 err = ixgbe_alloc_q_vectors(adapter);
4728 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004729 e_dev_err("Unable to allocate memory for queue vectors\n");
Alexander Duyck7a921c92009-05-06 10:43:28 +00004730 goto err_alloc_q_vectors;
4731 }
4732
4733 err = ixgbe_alloc_queues(adapter);
4734 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004735 e_dev_err("Unable to allocate memory for queues\n");
Alexander Duyck7a921c92009-05-06 10:43:28 +00004736 goto err_alloc_queues;
4737 }
4738
Emil Tantilov849c4542010-06-03 16:53:41 +00004739 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
Emil Tantilov396e7992010-07-01 20:05:12 +00004740 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4741 adapter->num_rx_queues, adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004742
4743 set_bit(__IXGBE_DOWN, &adapter->state);
4744
4745 return 0;
4746
Alexander Duyck7a921c92009-05-06 10:43:28 +00004747err_alloc_queues:
4748 ixgbe_free_q_vectors(adapter);
4749err_alloc_q_vectors:
4750 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004751err_set_interrupt:
Alexander Duyck7a921c92009-05-06 10:43:28 +00004752 return err;
4753}
4754
Eric Dumazet1a515022010-11-16 19:26:42 -08004755static void ring_free_rcu(struct rcu_head *head)
4756{
4757 kfree(container_of(head, struct ixgbe_ring, rcu));
4758}
4759
Alexander Duyck7a921c92009-05-06 10:43:28 +00004760/**
4761 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4762 * @adapter: board private structure to clear interrupt scheme on
4763 *
4764 * We go through and clear interrupt specific resources and reset the structure
4765 * to pre-load conditions
4766 **/
4767void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4768{
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004769 int i;
4770
4771 for (i = 0; i < adapter->num_tx_queues; i++) {
4772 kfree(adapter->tx_ring[i]);
4773 adapter->tx_ring[i] = NULL;
4774 }
4775 for (i = 0; i < adapter->num_rx_queues; i++) {
Eric Dumazet1a515022010-11-16 19:26:42 -08004776 struct ixgbe_ring *ring = adapter->rx_ring[i];
4777
4778 /* ixgbe_get_stats64() might access this ring, we must wait
4779 * a grace period before freeing it.
4780 */
4781 call_rcu(&ring->rcu, ring_free_rcu);
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004782 adapter->rx_ring[i] = NULL;
4783 }
Alexander Duyck7a921c92009-05-06 10:43:28 +00004784
4785 ixgbe_free_q_vectors(adapter);
4786 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004787}
4788
4789/**
Donald Skidmorec4900be2008-11-20 21:11:42 -08004790 * ixgbe_sfp_timer - worker thread to find a missing module
4791 * @data: pointer to our adapter struct
4792 **/
4793static void ixgbe_sfp_timer(unsigned long data)
4794{
4795 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4796
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004797 /*
4798 * Do the sfp_timer outside of interrupt context due to the
Donald Skidmorec4900be2008-11-20 21:11:42 -08004799 * delays that sfp+ detection requires
4800 */
4801 schedule_work(&adapter->sfp_task);
4802}
4803
4804/**
4805 * ixgbe_sfp_task - worker thread to find a missing module
4806 * @work: pointer to work_struct containing our data
4807 **/
4808static void ixgbe_sfp_task(struct work_struct *work)
4809{
4810 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00004811 struct ixgbe_adapter,
4812 sfp_task);
Donald Skidmorec4900be2008-11-20 21:11:42 -08004813 struct ixgbe_hw *hw = &adapter->hw;
4814
4815 if ((hw->phy.type == ixgbe_phy_nl) &&
4816 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4817 s32 ret = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004818 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
Donald Skidmorec4900be2008-11-20 21:11:42 -08004819 goto reschedule;
4820 ret = hw->phy.ops.reset(hw);
4821 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004822 e_dev_err("failed to initialize because an unsupported "
4823 "SFP+ module type was detected.\n");
4824 e_dev_err("Reload the driver after installing a "
4825 "supported module.\n");
Donald Skidmorec4900be2008-11-20 21:11:42 -08004826 unregister_netdev(adapter->netdev);
4827 } else {
Emil Tantilov396e7992010-07-01 20:05:12 +00004828 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
Donald Skidmorec4900be2008-11-20 21:11:42 -08004829 }
4830 /* don't need this routine any more */
4831 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4832 }
4833 return;
4834reschedule:
4835 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4836 mod_timer(&adapter->sfp_timer,
Joe Perchese8e9f692010-09-07 21:34:53 +00004837 round_jiffies(jiffies + (2 * HZ)));
Donald Skidmorec4900be2008-11-20 21:11:42 -08004838}
4839
4840/**
Auke Kok9a799d72007-09-15 14:07:45 -07004841 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4842 * @adapter: board private structure to initialize
4843 *
4844 * ixgbe_sw_init initializes the Adapter private data structure.
4845 * Fields are initialized based on PCI device information and
4846 * OS network device settings (MTU size).
4847 **/
4848static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4849{
4850 struct ixgbe_hw *hw = &adapter->hw;
4851 struct pci_dev *pdev = adapter->pdev;
Peter Waskiewicz9a713e72010-02-10 16:07:54 +00004852 struct net_device *dev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004853 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08004854#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08004855 int j;
4856 struct tc_configuration *tc;
4857#endif
John Fastabend16b61be2010-11-16 19:26:44 -08004858 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004859
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004860 /* PCI config space info */
4861
4862 hw->vendor_id = pdev->vendor;
4863 hw->device_id = pdev->device;
4864 hw->revision_id = pdev->revision;
4865 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4866 hw->subsystem_device_id = pdev->subsystem_device;
4867
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004868 /* Set capability flags */
4869 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
4870 adapter->ring_feature[RING_F_RSS].indices = rss;
4871 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08004872 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
Don Skidmorebf069c92009-05-07 10:39:54 +00004873 if (hw->mac.type == ixgbe_mac_82598EB) {
4874 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4875 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004876 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
Don Skidmorebf069c92009-05-07 10:39:54 +00004877 } else if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004878 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00004879 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4880 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07004881 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4882 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
Peter Waskiewicz9a713e72010-02-10 16:07:54 +00004883 if (dev->features & NETIF_F_NTUPLE) {
4884 /* Flow Director perfect filter enabled */
4885 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4886 adapter->atr_sample_rate = 0;
4887 spin_lock_init(&adapter->fdir_perfect_lock);
4888 } else {
4889 /* Flow Director hash filters enabled */
4890 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4891 adapter->atr_sample_rate = 20;
4892 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004893 adapter->ring_feature[RING_F_FDIR].indices =
Joe Perchese8e9f692010-09-07 21:34:53 +00004894 IXGBE_MAX_FDIR_INDICES;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004895 adapter->fdir_pballoc = 0;
Yi Zoueacd73f2009-05-13 13:11:06 +00004896#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00004897 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4898 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4899 adapter->ring_feature[RING_F_FCOE].indices = 0;
Yi Zou61a0f422009-12-03 11:32:22 +00004900#ifdef CONFIG_IXGBE_DCB
Yi Zou6ee16522009-08-31 12:34:28 +00004901 /* Default traffic class to use for FCoE */
4902 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
John Fastabend56075a92010-07-26 20:41:31 +00004903 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
Yi Zou61a0f422009-12-03 11:32:22 +00004904#endif
Yi Zoueacd73f2009-05-13 13:11:06 +00004905#endif /* IXGBE_FCOE */
Alexander Duyckf8212f92009-04-27 22:42:37 +00004906 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08004907
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08004908#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08004909 /* Configure DCB traffic classes */
4910 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4911 tc = &adapter->dcb_cfg.tc_config[j];
4912 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4913 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4914 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4915 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4916 tc->dcb_pfc = pfc_disabled;
4917 }
4918 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4919 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4920 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004921 adapter->dcb_cfg.pfc_mode_enable = false;
Alexander Duyck2f90b862008-11-20 20:52:10 -08004922 adapter->dcb_cfg.round_robin_enable = false;
4923 adapter->dcb_set_bitmap = 0x00;
4924 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
Joe Perchese8e9f692010-09-07 21:34:53 +00004925 adapter->ring_feature[RING_F_DCB].indices);
Alexander Duyck2f90b862008-11-20 20:52:10 -08004926
4927#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004928
4929 /* default flow control settings */
Don Skidmorecd7664f2009-03-31 21:33:44 +00004930 hw->fc.requested_mode = ixgbe_fc_full;
Don Skidmore71fd5702009-03-31 21:35:05 +00004931 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004932#ifdef CONFIG_DCB
4933 adapter->last_lfc_mode = hw->fc.current_mode;
4934#endif
John Fastabend16b61be2010-11-16 19:26:44 -08004935 hw->fc.high_water = FC_HIGH_WATER(max_frame);
4936 hw->fc.low_water = FC_LOW_WATER(max_frame);
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07004937 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4938 hw->fc.send_xon = true;
Don Skidmore71fd5702009-03-31 21:35:05 +00004939 hw->fc.disable_fc_autoneg = false;
Auke Kok9a799d72007-09-15 14:07:45 -07004940
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07004941 /* enable itr by default in dynamic mode */
Nelson, Shannonf7554a22009-09-18 09:46:06 +00004942 adapter->rx_itr_setting = 1;
4943 adapter->rx_eitr_param = 20000;
4944 adapter->tx_itr_setting = 1;
4945 adapter->tx_eitr_param = 10000;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07004946
4947 /* set defaults for eitr in MegaBytes */
4948 adapter->eitr_low = 10;
4949 adapter->eitr_high = 20;
4950
4951 /* set default ring sizes */
4952 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4953 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4954
Auke Kok9a799d72007-09-15 14:07:45 -07004955 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004956 if (ixgbe_init_eeprom_params_generic(hw)) {
Emil Tantilov849c4542010-06-03 16:53:41 +00004957 e_dev_err("EEPROM initialization failed\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004958 return -EIO;
4959 }
4960
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004961 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07004962 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
4963
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004964 /* get assigned NUMA node */
4965 adapter->node = dev_to_node(&pdev->dev);
4966
Auke Kok9a799d72007-09-15 14:07:45 -07004967 set_bit(__IXGBE_DOWN, &adapter->state);
4968
4969 return 0;
4970}
4971
4972/**
4973 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004974 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07004975 *
4976 * Return 0 on success, negative on failure
4977 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004978int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004979{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004980 struct device *dev = tx_ring->dev;
Auke Kok9a799d72007-09-15 14:07:45 -07004981 int size;
4982
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004983 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00004984 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00004985 if (!tx_ring->tx_buffer_info)
4986 tx_ring->tx_buffer_info = vmalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004987 if (!tx_ring->tx_buffer_info)
4988 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004989 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07004990
4991 /* round up to nearest 4K */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08004992 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004993 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07004994
Alexander Duyckb6ec8952010-11-16 19:26:49 -08004995 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
Nick Nunley1b507732010-04-27 13:10:27 +00004996 &tx_ring->dma, GFP_KERNEL);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004997 if (!tx_ring->desc)
4998 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07004999
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005000 tx_ring->next_to_use = 0;
5001 tx_ring->next_to_clean = 0;
5002 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07005003 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07005004
5005err:
5006 vfree(tx_ring->tx_buffer_info);
5007 tx_ring->tx_buffer_info = NULL;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005008 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07005009 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07005010}
5011
5012/**
Alexander Duyck69888672008-09-11 20:05:39 -07005013 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5014 * @adapter: board private structure
5015 *
5016 * If this function returns with an error, then it's possible one or
5017 * more of the rings is populated (while the rest are not). It is the
5018 * callers duty to clean those orphaned rings.
5019 *
5020 * Return 0 on success, negative on failure
5021 **/
5022static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5023{
5024 int i, err = 0;
5025
5026 for (i = 0; i < adapter->num_tx_queues; i++) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005027 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07005028 if (!err)
5029 continue;
Emil Tantilov396e7992010-07-01 20:05:12 +00005030 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Alexander Duyck69888672008-09-11 20:05:39 -07005031 break;
5032 }
5033
5034 return err;
5035}
5036
5037/**
Auke Kok9a799d72007-09-15 14:07:45 -07005038 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005039 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07005040 *
5041 * Returns 0 on success, negative on failure
5042 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005043int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07005044{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005045 struct device *dev = rx_ring->dev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005046 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07005047
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005048 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005049 rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
Jesse Brandeburg1a6c14a2010-02-03 14:18:50 +00005050 if (!rx_ring->rx_buffer_info)
5051 rx_ring->rx_buffer_info = vmalloc(size);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005052 if (!rx_ring->rx_buffer_info)
5053 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005054 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07005055
Auke Kok9a799d72007-09-15 14:07:45 -07005056 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005057 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5058 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07005059
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005060 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
Nick Nunley1b507732010-04-27 13:10:27 +00005061 &rx_ring->dma, GFP_KERNEL);
Auke Kok9a799d72007-09-15 14:07:45 -07005062
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005063 if (!rx_ring->desc)
5064 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07005065
Jesse Brandeburg3a581072008-08-26 04:27:08 -07005066 rx_ring->next_to_clean = 0;
5067 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005068
5069 return 0;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005070err:
5071 vfree(rx_ring->rx_buffer_info);
5072 rx_ring->rx_buffer_info = NULL;
5073 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07005074 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07005075}
5076
5077/**
Alexander Duyck69888672008-09-11 20:05:39 -07005078 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5079 * @adapter: board private structure
5080 *
5081 * If this function returns with an error, then it's possible one or
5082 * more of the rings is populated (while the rest are not). It is the
5083 * callers duty to clean those orphaned rings.
5084 *
5085 * Return 0 on success, negative on failure
5086 **/
Alexander Duyck69888672008-09-11 20:05:39 -07005087static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5088{
5089 int i, err = 0;
5090
5091 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005092 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
Alexander Duyck69888672008-09-11 20:05:39 -07005093 if (!err)
5094 continue;
Emil Tantilov396e7992010-07-01 20:05:12 +00005095 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Alexander Duyck69888672008-09-11 20:05:39 -07005096 break;
5097 }
5098
5099 return err;
5100}
5101
5102/**
Auke Kok9a799d72007-09-15 14:07:45 -07005103 * ixgbe_free_tx_resources - Free Tx Resources per Queue
Auke Kok9a799d72007-09-15 14:07:45 -07005104 * @tx_ring: Tx descriptor ring for a specific queue
5105 *
5106 * Free all transmit software resources
5107 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005108void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07005109{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005110 ixgbe_clean_tx_ring(tx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07005111
5112 vfree(tx_ring->tx_buffer_info);
5113 tx_ring->tx_buffer_info = NULL;
5114
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005115 /* if not set, then don't free */
5116 if (!tx_ring->desc)
5117 return;
5118
5119 dma_free_coherent(tx_ring->dev, tx_ring->size,
5120 tx_ring->desc, tx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07005121
5122 tx_ring->desc = NULL;
5123}
5124
5125/**
5126 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5127 * @adapter: board private structure
5128 *
5129 * Free all transmit software resources
5130 **/
5131static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5132{
5133 int i;
5134
5135 for (i = 0; i < adapter->num_tx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00005136 if (adapter->tx_ring[i]->desc)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005137 ixgbe_free_tx_resources(adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07005138}
5139
5140/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005141 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07005142 * @rx_ring: ring to clean the resources from
5143 *
5144 * Free all receive software resources
5145 **/
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005146void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07005147{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005148 ixgbe_clean_rx_ring(rx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07005149
5150 vfree(rx_ring->rx_buffer_info);
5151 rx_ring->rx_buffer_info = NULL;
5152
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005153 /* if not set, then don't free */
5154 if (!rx_ring->desc)
5155 return;
5156
5157 dma_free_coherent(rx_ring->dev, rx_ring->size,
5158 rx_ring->desc, rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07005159
5160 rx_ring->desc = NULL;
5161}
5162
5163/**
5164 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5165 * @adapter: board private structure
5166 *
5167 * Free all receive software resources
5168 **/
5169static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5170{
5171 int i;
5172
5173 for (i = 0; i < adapter->num_rx_queues; i++)
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00005174 if (adapter->rx_ring[i]->desc)
Alexander Duyckb6ec8952010-11-16 19:26:49 -08005175 ixgbe_free_rx_resources(adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07005176}
5177
5178/**
Auke Kok9a799d72007-09-15 14:07:45 -07005179 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5180 * @netdev: network interface device structure
5181 * @new_mtu: new value for maximum frame size
5182 *
5183 * Returns 0 on success, negative on failure
5184 **/
5185static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5186{
5187 struct ixgbe_adapter *adapter = netdev_priv(netdev);
John Fastabend16b61be2010-11-16 19:26:44 -08005188 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07005189 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5190
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07005191 /* MTU < 68 is an error and causes problems on some kernels */
5192 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07005193 return -EINVAL;
5194
Emil Tantilov396e7992010-07-01 20:05:12 +00005195 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005196 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07005197 netdev->mtu = new_mtu;
5198
John Fastabend16b61be2010-11-16 19:26:44 -08005199 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5200 hw->fc.low_water = FC_LOW_WATER(max_frame);
5201
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08005202 if (netif_running(netdev))
5203 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005204
5205 return 0;
5206}
5207
5208/**
5209 * ixgbe_open - Called when a network interface is made active
5210 * @netdev: network interface device structure
5211 *
5212 * Returns 0 on success, negative value on failure
5213 *
5214 * The open entry point is called when a network interface is made
5215 * active by the system (IFF_UP). At this point all resources needed
5216 * for transmit and receive operations are allocated, the interrupt
5217 * handler is registered with the OS, the watchdog timer is started,
5218 * and the stack is notified that the interface is ready.
5219 **/
5220static int ixgbe_open(struct net_device *netdev)
5221{
5222 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5223 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07005224
Auke Kok4bebfaa2008-02-11 09:26:01 -08005225 /* disallow open during test */
5226 if (test_bit(__IXGBE_TESTING, &adapter->state))
5227 return -EBUSY;
5228
Jesse Brandeburg54386462009-04-17 20:44:27 +00005229 netif_carrier_off(netdev);
5230
Auke Kok9a799d72007-09-15 14:07:45 -07005231 /* allocate transmit descriptors */
5232 err = ixgbe_setup_all_tx_resources(adapter);
5233 if (err)
5234 goto err_setup_tx;
5235
Auke Kok9a799d72007-09-15 14:07:45 -07005236 /* allocate receive descriptors */
5237 err = ixgbe_setup_all_rx_resources(adapter);
5238 if (err)
5239 goto err_setup_rx;
5240
5241 ixgbe_configure(adapter);
5242
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005243 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005244 if (err)
5245 goto err_req_irq;
5246
Auke Kok9a799d72007-09-15 14:07:45 -07005247 err = ixgbe_up_complete(adapter);
5248 if (err)
5249 goto err_up;
5250
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07005251 netif_tx_start_all_queues(netdev);
5252
Auke Kok9a799d72007-09-15 14:07:45 -07005253 return 0;
5254
5255err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005256 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005257 ixgbe_free_irq(adapter);
5258err_req_irq:
Auke Kok9a799d72007-09-15 14:07:45 -07005259err_setup_rx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00005260 ixgbe_free_all_rx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005261err_setup_tx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00005262 ixgbe_free_all_tx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005263 ixgbe_reset(adapter);
5264
5265 return err;
5266}
5267
5268/**
5269 * ixgbe_close - Disables a network interface
5270 * @netdev: network interface device structure
5271 *
5272 * Returns 0, this is not allowed to fail
5273 *
5274 * The close entry point is called when an interface is de-activated
5275 * by the OS. The hardware is still under the drivers control, but
5276 * needs to be disabled. A global MAC reset is issued to stop the
5277 * hardware, and all transmit and receive resources are freed.
5278 **/
5279static int ixgbe_close(struct net_device *netdev)
5280{
5281 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005282
5283 ixgbe_down(adapter);
5284 ixgbe_free_irq(adapter);
5285
5286 ixgbe_free_all_tx_resources(adapter);
5287 ixgbe_free_all_rx_resources(adapter);
5288
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005289 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005290
5291 return 0;
5292}
5293
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005294#ifdef CONFIG_PM
5295static int ixgbe_resume(struct pci_dev *pdev)
5296{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005297 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5298 struct net_device *netdev = adapter->netdev;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005299 u32 err;
5300
5301 pci_set_power_state(pdev, PCI_D0);
5302 pci_restore_state(pdev);
Don Skidmore656ab812009-12-23 21:19:19 -08005303 /*
5304 * pci_restore_state clears dev->state_saved so call
5305 * pci_save_state to restore it.
5306 */
5307 pci_save_state(pdev);
gouji-new9ce77662009-05-06 10:44:45 +00005308
5309 err = pci_enable_device_mem(pdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005310 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005311 e_dev_err("Cannot enable PCI device from suspend\n");
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005312 return err;
5313 }
5314 pci_set_master(pdev);
5315
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005316 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005317
5318 err = ixgbe_init_interrupt_scheme(adapter);
5319 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005320 e_dev_err("Cannot initialize interrupts for device\n");
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005321 return err;
5322 }
5323
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005324 ixgbe_reset(adapter);
5325
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00005326 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5327
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005328 if (netif_running(netdev)) {
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005329 err = ixgbe_open(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005330 if (err)
5331 return err;
5332 }
5333
5334 netif_device_attach(netdev);
5335
5336 return 0;
5337}
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005338#endif /* CONFIG_PM */
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005339
5340static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005341{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08005342 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5343 struct net_device *netdev = adapter->netdev;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005344 struct ixgbe_hw *hw = &adapter->hw;
5345 u32 ctrl, fctrl;
5346 u32 wufc = adapter->wol;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005347#ifdef CONFIG_PM
5348 int retval = 0;
5349#endif
5350
5351 netif_device_detach(netdev);
5352
5353 if (netif_running(netdev)) {
5354 ixgbe_down(adapter);
5355 ixgbe_free_irq(adapter);
5356 ixgbe_free_all_tx_resources(adapter);
5357 ixgbe_free_all_rx_resources(adapter);
5358 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005359
Alexander Duyck5f5ae6f2010-11-16 19:26:52 -08005360 ixgbe_clear_interrupt_scheme(adapter);
5361
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005362#ifdef CONFIG_PM
5363 retval = pci_save_state(pdev);
5364 if (retval)
5365 return retval;
Jesse Brandeburg4df10462009-03-13 22:15:31 +00005366
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005367#endif
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005368 if (wufc) {
5369 ixgbe_set_rx_mode(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005370
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005371 /* turn on all-multi mode if wake on multicast is enabled */
5372 if (wufc & IXGBE_WUFC_MC) {
5373 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5374 fctrl |= IXGBE_FCTRL_MPE;
5375 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5376 }
5377
5378 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5379 ctrl |= IXGBE_CTRL_GIO_DIS;
5380 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5381
5382 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5383 } else {
5384 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5385 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5386 }
5387
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005388 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
5389 pci_wake_from_d3(pdev, true);
5390 else
5391 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005392
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005393 *enable_wake = !!wufc;
5394
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005395 ixgbe_release_hw_control(adapter);
5396
5397 pci_disable_device(pdev);
5398
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005399 return 0;
5400}
5401
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005402#ifdef CONFIG_PM
5403static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5404{
5405 int retval;
5406 bool wake;
5407
5408 retval = __ixgbe_shutdown(pdev, &wake);
5409 if (retval)
5410 return retval;
5411
5412 if (wake) {
5413 pci_prepare_to_sleep(pdev);
5414 } else {
5415 pci_wake_from_d3(pdev, false);
5416 pci_set_power_state(pdev, PCI_D3hot);
5417 }
5418
5419 return 0;
5420}
5421#endif /* CONFIG_PM */
5422
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005423static void ixgbe_shutdown(struct pci_dev *pdev)
5424{
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00005425 bool wake;
5426
5427 __ixgbe_shutdown(pdev, &wake);
5428
5429 if (system_state == SYSTEM_POWER_OFF) {
5430 pci_wake_from_d3(pdev, wake);
5431 pci_set_power_state(pdev, PCI_D3hot);
5432 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07005433}
5434
5435/**
Auke Kok9a799d72007-09-15 14:07:45 -07005436 * ixgbe_update_stats - Update the board statistics counters.
5437 * @adapter: board private structure
5438 **/
5439void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5440{
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005441 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07005442 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005443 struct ixgbe_hw_stats *hwstats = &adapter->stats;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005444 u64 total_mpc = 0;
5445 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005446 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5447 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5448 u64 bytes = 0, packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005449
Don Skidmored08935c2010-06-11 13:20:29 +00005450 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5451 test_bit(__IXGBE_RESETTING, &adapter->state))
5452 return;
5453
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005454 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00005455 u64 rsc_count = 0;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005456 u64 rsc_flush = 0;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00005457 for (i = 0; i < 16; i++)
5458 adapter->hw_rx_no_dma_resources +=
Joe Perches7ca647b2010-09-07 21:35:40 +00005459 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005460 for (i = 0; i < adapter->num_rx_queues; i++) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08005461 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5462 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
Mallikarjuna R Chilakala94b982b2009-11-23 06:32:06 +00005463 }
5464 adapter->rsc_total_count = rsc_count;
5465 adapter->rsc_total_flush = rsc_flush;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00005466 }
5467
Alexander Duyck5b7da512010-11-16 19:26:50 -08005468 for (i = 0; i < adapter->num_rx_queues; i++) {
5469 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5470 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5471 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5472 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5473 bytes += rx_ring->stats.bytes;
5474 packets += rx_ring->stats.packets;
5475 }
Mallikarjuna R Chilakalaeb985f02009-12-15 11:56:59 +00005476 adapter->non_eop_descs = non_eop_descs;
Alexander Duyck5b7da512010-11-16 19:26:50 -08005477 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5478 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5479 netdev->stats.rx_bytes = bytes;
5480 netdev->stats.rx_packets = packets;
5481
5482 bytes = 0;
5483 packets = 0;
5484 /* gather some stats to the adapter struct that are per queue */
5485 for (i = 0; i < adapter->num_tx_queues; i++) {
5486 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5487 restart_queue += tx_ring->tx_stats.restart_queue;
5488 tx_busy += tx_ring->tx_stats.tx_busy;
5489 bytes += tx_ring->stats.bytes;
5490 packets += tx_ring->stats.packets;
5491 }
5492 adapter->restart_queue = restart_queue;
5493 adapter->tx_busy = tx_busy;
5494 netdev->stats.tx_bytes = bytes;
5495 netdev->stats.tx_packets = packets;
Jesse Brandeburg7ca3bc52009-12-03 11:33:29 +00005496
Joe Perches7ca647b2010-09-07 21:35:40 +00005497 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005498 for (i = 0; i < 8; i++) {
5499 /* for packet buffers not used, the register should read 0 */
5500 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5501 missed_rx += mpc;
Joe Perches7ca647b2010-09-07 21:35:40 +00005502 hwstats->mpc[i] += mpc;
5503 total_mpc += hwstats->mpc[i];
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005504 if (hw->mac.type == ixgbe_mac_82598EB)
Joe Perches7ca647b2010-09-07 21:35:40 +00005505 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5506 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5507 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5508 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5509 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005510 if (hw->mac.type == ixgbe_mac_82599EB) {
Joe Perches7ca647b2010-09-07 21:35:40 +00005511 hwstats->pxonrxc[i] +=
5512 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5513 hwstats->pxoffrxc[i] +=
5514 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5515 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005516 } else {
Joe Perches7ca647b2010-09-07 21:35:40 +00005517 hwstats->pxonrxc[i] +=
5518 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5519 hwstats->pxoffrxc[i] +=
5520 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005521 }
Joe Perches7ca647b2010-09-07 21:35:40 +00005522 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5523 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005524 }
Joe Perches7ca647b2010-09-07 21:35:40 +00005525 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005526 /* work around hardware counting issue */
Joe Perches7ca647b2010-09-07 21:35:40 +00005527 hwstats->gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07005528
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005529 /* 82598 hardware only has a 32 bit counter in the high register */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005530 if (hw->mac.type == ixgbe_mac_82599EB) {
Ben Greearaad71912009-09-30 12:08:16 +00005531 u64 tmp;
Joe Perches7ca647b2010-09-07 21:35:40 +00005532 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
Joe Perchese8e9f692010-09-07 21:34:53 +00005533 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
5534 /* 4 high bits of GORC */
Joe Perches7ca647b2010-09-07 21:35:40 +00005535 hwstats->gorc += (tmp << 32);
5536 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
Joe Perchese8e9f692010-09-07 21:34:53 +00005537 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
5538 /* 4 high bits of GOTC */
Joe Perches7ca647b2010-09-07 21:35:40 +00005539 hwstats->gotc += (tmp << 32);
5540 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
Joe Perchese8e9f692010-09-07 21:34:53 +00005541 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
Joe Perches7ca647b2010-09-07 21:35:40 +00005542 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5543 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5544 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5545 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
Yi Zou6d455222009-05-13 13:12:16 +00005546#ifdef IXGBE_FCOE
Joe Perches7ca647b2010-09-07 21:35:40 +00005547 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5548 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5549 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5550 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5551 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5552 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
Yi Zou6d455222009-05-13 13:12:16 +00005553#endif /* IXGBE_FCOE */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005554 } else {
Joe Perches7ca647b2010-09-07 21:35:40 +00005555 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5556 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5557 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5558 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5559 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005560 }
Auke Kok9a799d72007-09-15 14:07:45 -07005561 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005562 hwstats->bprc += bprc;
5563 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005564 if (hw->mac.type == ixgbe_mac_82598EB)
Joe Perches7ca647b2010-09-07 21:35:40 +00005565 hwstats->mprc -= bprc;
5566 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5567 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5568 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5569 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5570 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5571 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5572 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5573 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005574 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005575 hwstats->lxontxc += lxon;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005576 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
Joe Perches7ca647b2010-09-07 21:35:40 +00005577 hwstats->lxofftxc += lxoff;
5578 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5579 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5580 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08005581 /*
5582 * 82598 errata - tx of flow control packets is included in tx counters
5583 */
5584 xon_off_tot = lxon + lxoff;
Joe Perches7ca647b2010-09-07 21:35:40 +00005585 hwstats->gptc -= xon_off_tot;
5586 hwstats->mptc -= xon_off_tot;
5587 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5588 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5589 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5590 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5591 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5592 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5593 hwstats->ptc64 -= xon_off_tot;
5594 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5595 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5596 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5597 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5598 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5599 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
Auke Kok9a799d72007-09-15 14:07:45 -07005600
5601 /* Fill out the OS statistics structure */
Joe Perches7ca647b2010-09-07 21:35:40 +00005602 netdev->stats.multicast = hwstats->mprc;
Auke Kok9a799d72007-09-15 14:07:45 -07005603
5604 /* Rx Errors */
Joe Perches7ca647b2010-09-07 21:35:40 +00005605 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005606 netdev->stats.rx_dropped = 0;
Joe Perches7ca647b2010-09-07 21:35:40 +00005607 netdev->stats.rx_length_errors = hwstats->rlec;
5608 netdev->stats.rx_crc_errors = hwstats->crcerrs;
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005609 netdev->stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07005610}
5611
5612/**
5613 * ixgbe_watchdog - Timer Call-back
5614 * @data: pointer to adapter cast into an unsigned long
5615 **/
5616static void ixgbe_watchdog(unsigned long data)
5617{
5618 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005619 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00005620 u64 eics = 0;
5621 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07005622
Alexander Duyckfe49f042009-06-04 16:00:09 +00005623 /*
5624 * Do the watchdog outside of interrupt context due to the lovely
5625 * delays that some of the newer hardware requires
5626 */
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00005627
Alexander Duyckfe49f042009-06-04 16:00:09 +00005628 if (test_bit(__IXGBE_DOWN, &adapter->state))
5629 goto watchdog_short_circuit;
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00005630
Alexander Duyckfe49f042009-06-04 16:00:09 +00005631 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5632 /*
5633 * for legacy and MSI interrupts don't set any bits
5634 * that are enabled for EIAM, because this operation
5635 * would set *both* EIMS and EICS for any bit in EIAM
5636 */
5637 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5638 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5639 goto watchdog_reschedule;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005640 }
5641
Alexander Duyckfe49f042009-06-04 16:00:09 +00005642 /* get one bit for every active tx/rx interrupt vector */
5643 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5644 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5645 if (qv->rxr_count || qv->txr_count)
5646 eics |= ((u64)1 << i);
5647 }
5648
5649 /* Cause software interrupt to ensure rx rings are cleaned */
5650 ixgbe_irq_rearm_queues(adapter, eics);
5651
5652watchdog_reschedule:
5653 /* Reset the timer */
5654 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5655
5656watchdog_short_circuit:
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005657 schedule_work(&adapter->watchdog_task);
5658}
5659
5660/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005661 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5662 * @work: pointer to work_struct containing our data
5663 **/
5664static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5665{
5666 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005667 struct ixgbe_adapter,
5668 multispeed_fiber_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005669 struct ixgbe_hw *hw = &adapter->hw;
5670 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00005671 bool negotiation;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005672
5673 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
Mallikarjuna R Chilakalaa1f25322009-06-30 11:44:36 +00005674 autoneg = hw->phy.autoneg_advertised;
5675 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00005676 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
Mallikarjuna R Chilakala1097cd12010-03-18 14:34:52 +00005677 hw->mac.autotry_restart = false;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00005678 if (hw->mac.ops.setup_link)
5679 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005680 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5681 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5682}
5683
5684/**
5685 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5686 * @work: pointer to work_struct containing our data
5687 **/
5688static void ixgbe_sfp_config_module_task(struct work_struct *work)
5689{
5690 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005691 struct ixgbe_adapter,
5692 sfp_config_module_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005693 struct ixgbe_hw *hw = &adapter->hw;
5694 u32 err;
5695
5696 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
Don Skidmore63d6e1d2009-07-02 12:50:12 +00005697
5698 /* Time for electrical oscillations to settle down */
5699 msleep(100);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005700 err = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00005701
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005702 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Emil Tantilov849c4542010-06-03 16:53:41 +00005703 e_dev_err("failed to initialize because an unsupported SFP+ "
5704 "module type was detected.\n");
5705 e_dev_err("Reload the driver after installing a supported "
5706 "module.\n");
Don Skidmore63d6e1d2009-07-02 12:50:12 +00005707 unregister_netdev(adapter->netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005708 return;
5709 }
5710 hw->mac.ops.setup_sfp(hw);
5711
Tony Breeds8d1c3c02009-04-09 22:29:10 +00005712 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005713 /* This will also work for DA Twinax connections */
5714 schedule_work(&adapter->multispeed_fiber_task);
5715 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5716}
5717
5718/**
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005719 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5720 * @work: pointer to work_struct containing our data
5721 **/
5722static void ixgbe_fdir_reinit_task(struct work_struct *work)
5723{
5724 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005725 struct ixgbe_adapter,
5726 fdir_reinit_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005727 struct ixgbe_hw *hw = &adapter->hw;
5728 int i;
5729
5730 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5731 for (i = 0; i < adapter->num_tx_queues; i++)
5732 set_bit(__IXGBE_FDIR_INIT_DONE,
Joe Perchese8e9f692010-09-07 21:34:53 +00005733 &(adapter->tx_ring[i]->reinit_state));
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005734 } else {
Emil Tantilov396e7992010-07-01 20:05:12 +00005735 e_err(probe, "failed to finish FDIR re-initialization, "
Emil Tantilov849c4542010-06-03 16:53:41 +00005736 "ignored adding FDIR ATR filters\n");
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005737 }
5738 /* Done FDIR Re-initialization, enable transmits */
5739 netif_tx_start_all_queues(adapter->netdev);
5740}
5741
John Fastabend10eec952010-02-03 14:23:32 +00005742static DEFINE_MUTEX(ixgbe_watchdog_lock);
5743
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005744/**
Alexander Duyck69888672008-09-11 20:05:39 -07005745 * ixgbe_watchdog_task - worker thread to bring link up
5746 * @work: pointer to work_struct containing our data
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005747 **/
5748static void ixgbe_watchdog_task(struct work_struct *work)
5749{
5750 struct ixgbe_adapter *adapter = container_of(work,
Joe Perchese8e9f692010-09-07 21:34:53 +00005751 struct ixgbe_adapter,
5752 watchdog_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005753 struct net_device *netdev = adapter->netdev;
5754 struct ixgbe_hw *hw = &adapter->hw;
John Fastabend10eec952010-02-03 14:23:32 +00005755 u32 link_speed;
5756 bool link_up;
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00005757 int i;
5758 struct ixgbe_ring *tx_ring;
5759 int some_tx_pending = 0;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005760
John Fastabend10eec952010-02-03 14:23:32 +00005761 mutex_lock(&ixgbe_watchdog_lock);
5762
5763 link_up = adapter->link_up;
5764 link_speed = adapter->link_speed;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005765
5766 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5767 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005768 if (link_up) {
5769#ifdef CONFIG_DCB
5770 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5771 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00005772 hw->mac.ops.fc_enable(hw, i);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005773 } else {
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00005774 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005775 }
5776#else
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00005777 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005778#endif
5779 }
5780
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005781 if (link_up ||
5782 time_after(jiffies, (adapter->link_check_timeout +
Joe Perchese8e9f692010-09-07 21:34:53 +00005783 IXGBE_TRY_LINK_TIMEOUT))) {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005784 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00005785 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005786 }
5787 adapter->link_up = link_up;
5788 adapter->link_speed = link_speed;
5789 }
Auke Kok9a799d72007-09-15 14:07:45 -07005790
5791 if (link_up) {
5792 if (!netif_carrier_ok(netdev)) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005793 bool flow_rx, flow_tx;
5794
5795 if (hw->mac.type == ixgbe_mac_82599EB) {
5796 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5797 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00005798 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5799 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005800 } else {
5801 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5802 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00005803 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5804 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005805 }
5806
Emil Tantilov396e7992010-07-01 20:05:12 +00005807 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
Jeff Kirshera46e5342008-11-27 00:22:21 -08005808 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
Emil Tantilov849c4542010-06-03 16:53:41 +00005809 "10 Gbps" :
5810 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5811 "1 Gbps" : "unknown speed")),
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005812 ((flow_rx && flow_tx) ? "RX/TX" :
Emil Tantilov849c4542010-06-03 16:53:41 +00005813 (flow_rx ? "RX" :
5814 (flow_tx ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07005815
5816 netif_carrier_on(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005817 } else {
5818 /* Force detection of hung controller */
5819 adapter->detect_tx_hung = true;
5820 }
5821 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005822 adapter->link_up = false;
5823 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005824 if (netif_carrier_ok(netdev)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00005825 e_info(drv, "NIC Link is Down\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005826 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005827 }
5828 }
5829
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00005830 if (!netif_carrier_ok(netdev)) {
5831 for (i = 0; i < adapter->num_tx_queues; i++) {
PJ Waskiewicz4a0b9ca2010-02-03 14:19:12 +00005832 tx_ring = adapter->tx_ring[i];
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00005833 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5834 some_tx_pending = 1;
5835 break;
5836 }
5837 }
5838
5839 if (some_tx_pending) {
5840 /* We've lost link, so the controller stops DMA,
5841 * but we've got queued Tx work that's never going
5842 * to get done, so reset controller to flush Tx.
5843 * (Do the reset outside of interrupt context).
5844 */
5845 schedule_work(&adapter->reset_task);
5846 }
5847 }
5848
Auke Kok9a799d72007-09-15 14:07:45 -07005849 ixgbe_update_stats(adapter);
John Fastabend10eec952010-02-03 14:23:32 +00005850 mutex_unlock(&ixgbe_watchdog_lock);
Auke Kok9a799d72007-09-15 14:07:45 -07005851}
5852
Auke Kok9a799d72007-09-15 14:07:45 -07005853static int ixgbe_tso(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00005854 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
Hao Zheng5e09a102010-11-11 13:47:59 +00005855 u32 tx_flags, u8 *hdr_len, __be16 protocol)
Auke Kok9a799d72007-09-15 14:07:45 -07005856{
5857 struct ixgbe_adv_tx_context_desc *context_desc;
5858 unsigned int i;
5859 int err;
5860 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005861 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
5862 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07005863
5864 if (skb_is_gso(skb)) {
5865 if (skb_header_cloned(skb)) {
5866 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5867 if (err)
5868 return err;
5869 }
5870 l4len = tcp_hdrlen(skb);
5871 *hdr_len += l4len;
5872
Hao Zheng5e09a102010-11-11 13:47:59 +00005873 if (protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005874 struct iphdr *iph = ip_hdr(skb);
5875 iph->tot_len = 0;
5876 iph->check = 0;
5877 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
Joe Perchese8e9f692010-09-07 21:34:53 +00005878 iph->daddr, 0,
5879 IPPROTO_TCP,
5880 0);
Sridhar Samudrala8e1e8a42010-01-23 02:02:21 -08005881 } else if (skb_is_gso_v6(skb)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005882 ipv6_hdr(skb)->payload_len = 0;
5883 tcp_hdr(skb)->check =
5884 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
Joe Perchese8e9f692010-09-07 21:34:53 +00005885 &ipv6_hdr(skb)->daddr,
5886 0, IPPROTO_TCP, 0);
Auke Kok9a799d72007-09-15 14:07:45 -07005887 }
5888
5889 i = tx_ring->next_to_use;
5890
5891 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +00005892 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07005893
5894 /* VLAN MACLEN IPLEN */
5895 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5896 vlan_macip_lens |=
5897 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5898 vlan_macip_lens |= ((skb_network_offset(skb)) <<
Joe Perchese8e9f692010-09-07 21:34:53 +00005899 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07005900 *hdr_len += skb_network_offset(skb);
5901 vlan_macip_lens |=
5902 (skb_transport_header(skb) - skb_network_header(skb));
5903 *hdr_len +=
5904 (skb_transport_header(skb) - skb_network_header(skb));
5905 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5906 context_desc->seqnum_seed = 0;
5907
5908 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005909 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
Joe Perchese8e9f692010-09-07 21:34:53 +00005910 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07005911
Hao Zheng5e09a102010-11-11 13:47:59 +00005912 if (protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07005913 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5914 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5915 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5916
5917 /* MSS L4LEN IDX */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005918 mss_l4len_idx =
Auke Kok9a799d72007-09-15 14:07:45 -07005919 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
5920 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07005921 /* use index 1 for TSO */
5922 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07005923 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5924
5925 tx_buffer_info->time_stamp = jiffies;
5926 tx_buffer_info->next_to_watch = i;
5927
5928 i++;
5929 if (i == tx_ring->count)
5930 i = 0;
5931 tx_ring->next_to_use = i;
5932
5933 return true;
5934 }
5935 return false;
5936}
5937
Hao Zheng5e09a102010-11-11 13:47:59 +00005938static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5939 __be16 protocol)
Joe Perches7ca647b2010-09-07 21:35:40 +00005940{
5941 u32 rtn = 0;
Joe Perches7ca647b2010-09-07 21:35:40 +00005942
5943 switch (protocol) {
5944 case cpu_to_be16(ETH_P_IP):
5945 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
5946 switch (ip_hdr(skb)->protocol) {
5947 case IPPROTO_TCP:
5948 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5949 break;
5950 case IPPROTO_SCTP:
5951 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5952 break;
5953 }
5954 break;
5955 case cpu_to_be16(ETH_P_IPV6):
5956 /* XXX what about other V6 headers?? */
5957 switch (ipv6_hdr(skb)->nexthdr) {
5958 case IPPROTO_TCP:
5959 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5960 break;
5961 case IPPROTO_SCTP:
5962 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5963 break;
5964 }
5965 break;
5966 default:
5967 if (unlikely(net_ratelimit()))
5968 e_warn(probe, "partial checksum but proto=%x!\n",
Hao Zheng5e09a102010-11-11 13:47:59 +00005969 protocol);
Joe Perches7ca647b2010-09-07 21:35:40 +00005970 break;
5971 }
5972
5973 return rtn;
5974}
5975
Auke Kok9a799d72007-09-15 14:07:45 -07005976static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00005977 struct ixgbe_ring *tx_ring,
Hao Zheng5e09a102010-11-11 13:47:59 +00005978 struct sk_buff *skb, u32 tx_flags,
5979 __be16 protocol)
Auke Kok9a799d72007-09-15 14:07:45 -07005980{
5981 struct ixgbe_adv_tx_context_desc *context_desc;
5982 unsigned int i;
5983 struct ixgbe_tx_buffer *tx_buffer_info;
5984 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
5985
5986 if (skb->ip_summed == CHECKSUM_PARTIAL ||
5987 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5988 i = tx_ring->next_to_use;
5989 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +00005990 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07005991
5992 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5993 vlan_macip_lens |=
5994 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5995 vlan_macip_lens |= (skb_network_offset(skb) <<
Joe Perchese8e9f692010-09-07 21:34:53 +00005996 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07005997 if (skb->ip_summed == CHECKSUM_PARTIAL)
5998 vlan_macip_lens |= (skb_transport_header(skb) -
Joe Perchese8e9f692010-09-07 21:34:53 +00005999 skb_network_header(skb));
Auke Kok9a799d72007-09-15 14:07:45 -07006000
6001 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6002 context_desc->seqnum_seed = 0;
6003
6004 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
Joe Perchese8e9f692010-09-07 21:34:53 +00006005 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07006006
Joe Perches7ca647b2010-09-07 21:35:40 +00006007 if (skb->ip_summed == CHECKSUM_PARTIAL)
Hao Zheng5e09a102010-11-11 13:47:59 +00006008 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
Auke Kok9a799d72007-09-15 14:07:45 -07006009
6010 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07006011 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07006012 context_desc->mss_l4len_idx = 0;
6013
6014 tx_buffer_info->time_stamp = jiffies;
6015 tx_buffer_info->next_to_watch = i;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006016
Auke Kok9a799d72007-09-15 14:07:45 -07006017 i++;
6018 if (i == tx_ring->count)
6019 i = 0;
6020 tx_ring->next_to_use = i;
6021
6022 return true;
6023 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006024
Auke Kok9a799d72007-09-15 14:07:45 -07006025 return false;
6026}
6027
6028static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
Joe Perchese8e9f692010-09-07 21:34:53 +00006029 struct ixgbe_ring *tx_ring,
6030 struct sk_buff *skb, u32 tx_flags,
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006031 unsigned int first, const u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07006032{
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006033 struct device *dev = tx_ring->dev;
Auke Kok9a799d72007-09-15 14:07:45 -07006034 struct ixgbe_tx_buffer *tx_buffer_info;
Yi Zoueacd73f2009-05-13 13:11:06 +00006035 unsigned int len;
6036 unsigned int total = skb->len;
Auke Kok9a799d72007-09-15 14:07:45 -07006037 unsigned int offset = 0, size, count = 0, i;
6038 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6039 unsigned int f;
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006040 unsigned int bytecount = skb->len;
6041 u16 gso_segs = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07006042
6043 i = tx_ring->next_to_use;
6044
Yi Zoueacd73f2009-05-13 13:11:06 +00006045 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
6046 /* excluding fcoe_crc_eof for FCoE */
6047 total -= sizeof(struct fcoe_crc_eof);
6048
6049 len = min(skb_headlen(skb), total);
Auke Kok9a799d72007-09-15 14:07:45 -07006050 while (len) {
6051 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6052 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6053
6054 tx_buffer_info->length = size;
Alexander Duycke5a43542009-12-02 16:46:56 +00006055 tx_buffer_info->mapped_as_page = false;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006056 tx_buffer_info->dma = dma_map_single(dev,
Alexander Duycke5a43542009-12-02 16:46:56 +00006057 skb->data + offset,
Nick Nunley1b507732010-04-27 13:10:27 +00006058 size, DMA_TO_DEVICE);
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006059 if (dma_mapping_error(dev, tx_buffer_info->dma))
Alexander Duycke5a43542009-12-02 16:46:56 +00006060 goto dma_error;
Auke Kok9a799d72007-09-15 14:07:45 -07006061 tx_buffer_info->time_stamp = jiffies;
6062 tx_buffer_info->next_to_watch = i;
6063
6064 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00006065 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07006066 offset += size;
6067 count++;
Alexander Duyck44df32c2009-03-31 21:34:23 +00006068
6069 if (len) {
6070 i++;
6071 if (i == tx_ring->count)
6072 i = 0;
6073 }
Auke Kok9a799d72007-09-15 14:07:45 -07006074 }
6075
6076 for (f = 0; f < nr_frags; f++) {
6077 struct skb_frag_struct *frag;
6078
6079 frag = &skb_shinfo(skb)->frags[f];
Yi Zoueacd73f2009-05-13 13:11:06 +00006080 len = min((unsigned int)frag->size, total);
Alexander Duycke5a43542009-12-02 16:46:56 +00006081 offset = frag->page_offset;
Auke Kok9a799d72007-09-15 14:07:45 -07006082
6083 while (len) {
Alexander Duyck44df32c2009-03-31 21:34:23 +00006084 i++;
6085 if (i == tx_ring->count)
6086 i = 0;
6087
Auke Kok9a799d72007-09-15 14:07:45 -07006088 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6089 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6090
6091 tx_buffer_info->length = size;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006092 tx_buffer_info->dma = dma_map_page(dev,
Alexander Duycke5a43542009-12-02 16:46:56 +00006093 frag->page,
6094 offset, size,
Nick Nunley1b507732010-04-27 13:10:27 +00006095 DMA_TO_DEVICE);
Alexander Duycke5a43542009-12-02 16:46:56 +00006096 tx_buffer_info->mapped_as_page = true;
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006097 if (dma_mapping_error(dev, tx_buffer_info->dma))
Alexander Duycke5a43542009-12-02 16:46:56 +00006098 goto dma_error;
Auke Kok9a799d72007-09-15 14:07:45 -07006099 tx_buffer_info->time_stamp = jiffies;
6100 tx_buffer_info->next_to_watch = i;
6101
6102 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00006103 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07006104 offset += size;
6105 count++;
Auke Kok9a799d72007-09-15 14:07:45 -07006106 }
Yi Zoueacd73f2009-05-13 13:11:06 +00006107 if (total == 0)
6108 break;
Auke Kok9a799d72007-09-15 14:07:45 -07006109 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00006110
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006111 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6112 gso_segs = skb_shinfo(skb)->gso_segs;
6113#ifdef IXGBE_FCOE
6114 /* adjust for FCoE Sequence Offload */
6115 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6116 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6117 skb_shinfo(skb)->gso_size);
6118#endif /* IXGBE_FCOE */
6119 bytecount += (gso_segs - 1) * hdr_len;
6120
6121 /* multiply data chunks by size of headers */
6122 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6123 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
Auke Kok9a799d72007-09-15 14:07:45 -07006124 tx_ring->tx_buffer_info[i].skb = skb;
6125 tx_ring->tx_buffer_info[first].next_to_watch = i;
6126
6127 return count;
Alexander Duycke5a43542009-12-02 16:46:56 +00006128
6129dma_error:
Emil Tantilov849c4542010-06-03 16:53:41 +00006130 e_dev_err("TX DMA map failed\n");
Alexander Duycke5a43542009-12-02 16:46:56 +00006131
6132 /* clear timestamp and dma mappings for failed tx_buffer_info map */
6133 tx_buffer_info->dma = 0;
6134 tx_buffer_info->time_stamp = 0;
6135 tx_buffer_info->next_to_watch = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00006136 if (count)
6137 count--;
Alexander Duycke5a43542009-12-02 16:46:56 +00006138
6139 /* clear timestamp and dma mappings for remaining portion of packet */
Roel Kluinc1fa3472010-01-19 14:21:45 +00006140 while (count--) {
Joe Perchese8e9f692010-09-07 21:34:53 +00006141 if (i == 0)
Alexander Duycke5a43542009-12-02 16:46:56 +00006142 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00006143 i--;
Alexander Duycke5a43542009-12-02 16:46:56 +00006144 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyckb6ec8952010-11-16 19:26:49 -08006145 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Alexander Duycke5a43542009-12-02 16:46:56 +00006146 }
6147
Anton Blancharde44d38e2010-02-03 13:12:51 +00006148 return 0;
Auke Kok9a799d72007-09-15 14:07:45 -07006149}
6150
Alexander Duyck84ea2592010-11-16 19:26:49 -08006151static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
Joe Perchese8e9f692010-09-07 21:34:53 +00006152 int tx_flags, int count, u32 paylen, u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07006153{
6154 union ixgbe_adv_tx_desc *tx_desc = NULL;
6155 struct ixgbe_tx_buffer *tx_buffer_info;
6156 u32 olinfo_status = 0, cmd_type_len = 0;
6157 unsigned int i;
6158 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6159
6160 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
6161
6162 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
6163
6164 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6165 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
6166
6167 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6168 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6169
6170 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006171 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07006172
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07006173 /* use index 1 context for tso */
6174 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07006175 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6176 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006177 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07006178
6179 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6180 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Joe Perchese8e9f692010-09-07 21:34:53 +00006181 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07006182
Yi Zoueacd73f2009-05-13 13:11:06 +00006183 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6184 olinfo_status |= IXGBE_ADVTXD_CC;
6185 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6186 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6187 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6188 }
6189
Auke Kok9a799d72007-09-15 14:07:45 -07006190 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6191
6192 i = tx_ring->next_to_use;
6193 while (count--) {
6194 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck31f05a22010-08-19 13:40:31 +00006195 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -07006196 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6197 tx_desc->read.cmd_type_len =
Joe Perchese8e9f692010-09-07 21:34:53 +00006198 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
Auke Kok9a799d72007-09-15 14:07:45 -07006199 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Auke Kok9a799d72007-09-15 14:07:45 -07006200 i++;
6201 if (i == tx_ring->count)
6202 i = 0;
6203 }
6204
6205 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
6206
6207 /*
6208 * Force memory writes to complete before letting h/w
6209 * know there are new descriptors to fetch. (Only
6210 * applicable for weak-ordered memory model archs,
6211 * such as IA-64).
6212 */
6213 wmb();
6214
6215 tx_ring->next_to_use = i;
Alexander Duyck84ea2592010-11-16 19:26:49 -08006216 writel(i, tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07006217}
6218
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006219static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
Hao Zheng5e09a102010-11-11 13:47:59 +00006220 int queue, u32 tx_flags, __be16 protocol)
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006221{
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006222 struct ixgbe_atr_input atr_input;
6223 struct tcphdr *th;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006224 struct iphdr *iph = ip_hdr(skb);
6225 struct ethhdr *eth = (struct ethhdr *)skb->data;
6226 u16 vlan_id, src_port, dst_port, flex_bytes;
6227 u32 src_ipv4_addr, dst_ipv4_addr;
6228 u8 l4type = 0;
6229
Guillaume Gaudonvilled3ead242010-06-29 18:29:00 +00006230 /* Right now, we support IPv4 only */
Hao Zheng5e09a102010-11-11 13:47:59 +00006231 if (protocol != htons(ETH_P_IP))
Guillaume Gaudonvilled3ead242010-06-29 18:29:00 +00006232 return;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006233 /* check if we're UDP or TCP */
6234 if (iph->protocol == IPPROTO_TCP) {
6235 th = tcp_hdr(skb);
6236 src_port = th->source;
6237 dst_port = th->dest;
6238 l4type |= IXGBE_ATR_L4TYPE_TCP;
6239 /* l4type IPv4 type is 0, no need to assign */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006240 } else {
6241 /* Unsupported L4 header, just bail here */
6242 return;
6243 }
6244
6245 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6246
6247 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
Joe Perchese8e9f692010-09-07 21:34:53 +00006248 IXGBE_TX_FLAGS_VLAN_SHIFT;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006249 src_ipv4_addr = iph->saddr;
6250 dst_ipv4_addr = iph->daddr;
6251 flex_bytes = eth->h_proto;
6252
6253 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
6254 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
6255 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
6256 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
6257 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
6258 /* src and dst are inverted, think how the receiver sees them */
6259 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
6260 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
6261
6262 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6263 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6264}
6265
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006266static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006267{
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006268 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006269 /* Herbert's original patch had:
6270 * smp_mb__after_netif_stop_queue();
6271 * but since that doesn't exist yet, just open code it. */
6272 smp_mb();
6273
6274 /* We need to check again in a case another CPU has just
6275 * made room available. */
6276 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
6277 return -EBUSY;
6278
6279 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006280 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Alexander Duyck5b7da512010-11-16 19:26:50 -08006281 ++tx_ring->tx_stats.restart_queue;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006282 return 0;
6283}
6284
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006285static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006286{
6287 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6288 return 0;
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006289 return __ixgbe_maybe_stop_tx(tx_ring, size);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08006290}
6291
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006292static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6293{
6294 struct ixgbe_adapter *adapter = netdev_priv(dev);
Yi Zou5f715822009-12-03 11:32:44 +00006295 int txq = smp_processor_id();
John Fastabend56075a92010-07-26 20:41:31 +00006296#ifdef IXGBE_FCOE
Hao Zheng5e09a102010-11-11 13:47:59 +00006297 __be16 protocol;
6298
6299 protocol = vlan_get_protocol(skb);
6300
6301 if ((protocol == htons(ETH_P_FCOE)) ||
6302 (protocol == htons(ETH_P_FIP))) {
John Fastabend56075a92010-07-26 20:41:31 +00006303 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6304 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6305 txq += adapter->ring_feature[RING_F_FCOE].mask;
6306 return txq;
John Fastabend4bc091d2010-08-08 15:46:15 +00006307#ifdef CONFIG_IXGBE_DCB
John Fastabend56075a92010-07-26 20:41:31 +00006308 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6309 txq = adapter->fcoe.up;
6310 return txq;
John Fastabend4bc091d2010-08-08 15:46:15 +00006311#endif
John Fastabend56075a92010-07-26 20:41:31 +00006312 }
6313 }
6314#endif
6315
Krishna Kumarfdd3d632010-02-03 13:13:10 +00006316 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6317 while (unlikely(txq >= dev->real_num_tx_queues))
6318 txq -= dev->real_num_tx_queues;
Yi Zou5f715822009-12-03 11:32:44 +00006319 return txq;
Krishna Kumarfdd3d632010-02-03 13:13:10 +00006320 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006321
John Fastabend2ea186a2010-02-27 03:28:24 -08006322 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6323 if (skb->priority == TC_PRIO_CONTROL)
6324 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6325 else
6326 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6327 >> 13;
6328 return txq;
6329 }
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006330
6331 return skb_tx_hash(dev, skb);
6332}
6333
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006334netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
Alexander Duyck84418e32010-08-19 13:40:54 +00006335 struct ixgbe_adapter *adapter,
6336 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07006337{
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006338 struct net_device *netdev = tx_ring->netdev;
Eric Dumazet60d51132009-12-08 07:22:03 +00006339 struct netdev_queue *txq;
Auke Kok9a799d72007-09-15 14:07:45 -07006340 unsigned int first;
6341 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08006342 u8 hdr_len = 0;
Yi Zou5f715822009-12-03 11:32:44 +00006343 int tso;
Auke Kok9a799d72007-09-15 14:07:45 -07006344 int count = 0;
6345 unsigned int f;
Hao Zheng5e09a102010-11-11 13:47:59 +00006346 __be16 protocol;
6347
6348 protocol = vlan_get_protocol(skb);
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006349
Jesse Grosseab6d182010-10-20 13:56:03 +00006350 if (vlan_tx_tag_present(skb)) {
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006351 tx_flags |= vlan_tx_tag_get(skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -08006352 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6353 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
Yi Zou5f715822009-12-03 11:32:44 +00006354 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
Alexander Duyck2f90b862008-11-20 20:52:10 -08006355 }
6356 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6357 tx_flags |= IXGBE_TX_FLAGS_VLAN;
John Fastabend33c66bd2010-05-18 16:00:11 +00006358 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6359 skb->priority != TC_PRIO_CONTROL) {
John Fastabend2ea186a2010-02-27 03:28:24 -08006360 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
6361 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6362 tx_flags |= IXGBE_TX_FLAGS_VLAN;
Auke Kok9a799d72007-09-15 14:07:45 -07006363 }
Yi Zoueacd73f2009-05-13 13:11:06 +00006364
Yi Zou09ad1cc2009-09-03 14:56:10 +00006365#ifdef IXGBE_FCOE
John Fastabend56075a92010-07-26 20:41:31 +00006366 /* for FCoE with DCB, we force the priority to what
6367 * was specified by the switch */
6368 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
Hao Zheng5e09a102010-11-11 13:47:59 +00006369 (protocol == htons(ETH_P_FCOE) ||
6370 protocol == htons(ETH_P_FIP))) {
John Fastabend4bc091d2010-08-08 15:46:15 +00006371#ifdef CONFIG_IXGBE_DCB
6372 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6373 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6374 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6375 tx_flags |= ((adapter->fcoe.up << 13)
6376 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6377 }
6378#endif
Robert Loveca77cd52010-03-24 12:45:00 +00006379 /* flag for FCoE offloads */
Hao Zheng5e09a102010-11-11 13:47:59 +00006380 if (protocol == htons(ETH_P_FCOE))
Robert Loveca77cd52010-03-24 12:45:00 +00006381 tx_flags |= IXGBE_TX_FLAGS_FCOE;
Yi Zou09ad1cc2009-09-03 14:56:10 +00006382 }
Robert Loveca77cd52010-03-24 12:45:00 +00006383#endif
6384
Yi Zoueacd73f2009-05-13 13:11:06 +00006385 /* four things can cause us to need a context descriptor */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006386 if (skb_is_gso(skb) ||
6387 (skb->ip_summed == CHECKSUM_PARTIAL) ||
Yi Zoueacd73f2009-05-13 13:11:06 +00006388 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6389 (tx_flags & IXGBE_TX_FLAGS_FCOE))
Auke Kok9a799d72007-09-15 14:07:45 -07006390 count++;
6391
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07006392 count += TXD_USE_COUNT(skb_headlen(skb));
6393 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
Auke Kok9a799d72007-09-15 14:07:45 -07006394 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6395
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006396 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
Alexander Duyck5b7da512010-11-16 19:26:50 -08006397 tx_ring->tx_stats.tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07006398 return NETDEV_TX_BUSY;
6399 }
Auke Kok9a799d72007-09-15 14:07:45 -07006400
Auke Kok9a799d72007-09-15 14:07:45 -07006401 first = tx_ring->next_to_use;
Yi Zoueacd73f2009-05-13 13:11:06 +00006402 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6403#ifdef IXGBE_FCOE
6404 /* setup tx offload for FCoE */
6405 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6406 if (tso < 0) {
6407 dev_kfree_skb_any(skb);
6408 return NETDEV_TX_OK;
6409 }
6410 if (tso)
6411 tx_flags |= IXGBE_TX_FLAGS_FSO;
6412#endif /* IXGBE_FCOE */
6413 } else {
Hao Zheng5e09a102010-11-11 13:47:59 +00006414 if (protocol == htons(ETH_P_IP))
Yi Zoueacd73f2009-05-13 13:11:06 +00006415 tx_flags |= IXGBE_TX_FLAGS_IPV4;
Hao Zheng5e09a102010-11-11 13:47:59 +00006416 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6417 protocol);
Yi Zoueacd73f2009-05-13 13:11:06 +00006418 if (tso < 0) {
6419 dev_kfree_skb_any(skb);
6420 return NETDEV_TX_OK;
6421 }
6422
6423 if (tso)
6424 tx_flags |= IXGBE_TX_FLAGS_TSO;
Hao Zheng5e09a102010-11-11 13:47:59 +00006425 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6426 protocol) &&
Yi Zoueacd73f2009-05-13 13:11:06 +00006427 (skb->ip_summed == CHECKSUM_PARTIAL))
6428 tx_flags |= IXGBE_TX_FLAGS_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07006429 }
6430
Alexander Duyck8ad494b2010-11-16 19:26:47 -08006431 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
Alexander Duyck44df32c2009-03-31 21:34:23 +00006432 if (count) {
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006433 /* add the ATR filter if ATR is on */
6434 if (tx_ring->atr_sample_rate) {
6435 ++tx_ring->atr_count;
6436 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
Joe Perchese8e9f692010-09-07 21:34:53 +00006437 test_bit(__IXGBE_FDIR_INIT_DONE,
6438 &tx_ring->reinit_state)) {
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006439 ixgbe_atr(adapter, skb, tx_ring->queue_index,
Hao Zheng5e09a102010-11-11 13:47:59 +00006440 tx_flags, protocol);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00006441 tx_ring->atr_count = 0;
6442 }
6443 }
Eric Dumazet60d51132009-12-08 07:22:03 +00006444 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6445 txq->tx_bytes += skb->len;
6446 txq->tx_packets++;
Alexander Duyck84ea2592010-11-16 19:26:49 -08006447 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006448 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07006449
Alexander Duyck44df32c2009-03-31 21:34:23 +00006450 } else {
6451 dev_kfree_skb_any(skb);
6452 tx_ring->tx_buffer_info[first].time_stamp = 0;
6453 tx_ring->next_to_use = first;
6454 }
Auke Kok9a799d72007-09-15 14:07:45 -07006455
6456 return NETDEV_TX_OK;
6457}
6458
Alexander Duyck84418e32010-08-19 13:40:54 +00006459static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6460{
6461 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6462 struct ixgbe_ring *tx_ring;
6463
6464 tx_ring = adapter->tx_ring[skb->queue_mapping];
Alexander Duyckfc77dc32010-11-16 19:26:51 -08006465 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
Alexander Duyck84418e32010-08-19 13:40:54 +00006466}
6467
Auke Kok9a799d72007-09-15 14:07:45 -07006468/**
Auke Kok9a799d72007-09-15 14:07:45 -07006469 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6470 * @netdev: network interface device structure
6471 * @p: pointer to an address structure
6472 *
6473 * Returns 0 on success, negative on failure
6474 **/
6475static int ixgbe_set_mac(struct net_device *netdev, void *p)
6476{
6477 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006478 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07006479 struct sockaddr *addr = p;
6480
6481 if (!is_valid_ether_addr(addr->sa_data))
6482 return -EADDRNOTAVAIL;
6483
6484 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006485 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07006486
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006487 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
6488 IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07006489
6490 return 0;
6491}
6492
Ben Hutchings6b73e102009-04-29 08:08:58 +00006493static int
6494ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6495{
6496 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6497 struct ixgbe_hw *hw = &adapter->hw;
6498 u16 value;
6499 int rc;
6500
6501 if (prtad != hw->phy.mdio.prtad)
6502 return -EINVAL;
6503 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6504 if (!rc)
6505 rc = value;
6506 return rc;
6507}
6508
6509static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6510 u16 addr, u16 value)
6511{
6512 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6513 struct ixgbe_hw *hw = &adapter->hw;
6514
6515 if (prtad != hw->phy.mdio.prtad)
6516 return -EINVAL;
6517 return hw->phy.ops.write_reg(hw, addr, devad, value);
6518}
6519
6520static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6521{
6522 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6523
6524 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6525}
6526
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006527/**
6528 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00006529 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006530 * @netdev: network interface device structure
6531 *
6532 * Returns non-zero on failure
6533 **/
6534static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6535{
6536 int err = 0;
6537 struct ixgbe_adapter *adapter = netdev_priv(dev);
6538 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6539
6540 if (is_valid_ether_addr(mac->san_addr)) {
6541 rtnl_lock();
6542 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6543 rtnl_unlock();
6544 }
6545 return err;
6546}
6547
6548/**
6549 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00006550 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00006551 * @netdev: network interface device structure
6552 *
6553 * Returns non-zero on failure
6554 **/
6555static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6556{
6557 int err = 0;
6558 struct ixgbe_adapter *adapter = netdev_priv(dev);
6559 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6560
6561 if (is_valid_ether_addr(mac->san_addr)) {
6562 rtnl_lock();
6563 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6564 rtnl_unlock();
6565 }
6566 return err;
6567}
6568
Auke Kok9a799d72007-09-15 14:07:45 -07006569#ifdef CONFIG_NET_POLL_CONTROLLER
6570/*
6571 * Polling 'interrupt' - used by things like netconsole to send skbs
6572 * without having to re-enable interrupts. It's not called while
6573 * the interrupt routine is executing.
6574 */
6575static void ixgbe_netpoll(struct net_device *netdev)
6576{
6577 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00006578 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07006579
Alexander Duyck1a647bd2010-01-13 01:49:13 +00006580 /* if interface is down do nothing */
6581 if (test_bit(__IXGBE_DOWN, &adapter->state))
6582 return;
6583
Auke Kok9a799d72007-09-15 14:07:45 -07006584 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00006585 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6586 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6587 for (i = 0; i < num_q_vectors; i++) {
6588 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6589 ixgbe_msix_clean_many(0, q_vector);
6590 }
6591 } else {
6592 ixgbe_intr(adapter->pdev->irq, netdev);
6593 }
Auke Kok9a799d72007-09-15 14:07:45 -07006594 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Auke Kok9a799d72007-09-15 14:07:45 -07006595}
6596#endif
6597
Eric Dumazetde1036b2010-10-20 23:00:04 +00006598static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6599 struct rtnl_link_stats64 *stats)
6600{
6601 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6602 int i;
6603
6604 /* accurate rx/tx bytes/packets stats */
6605 dev_txq_stats_fold(netdev, stats);
Eric Dumazet1a515022010-11-16 19:26:42 -08006606 rcu_read_lock();
Eric Dumazetde1036b2010-10-20 23:00:04 +00006607 for (i = 0; i < adapter->num_rx_queues; i++) {
Eric Dumazet1a515022010-11-16 19:26:42 -08006608 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
Eric Dumazetde1036b2010-10-20 23:00:04 +00006609 u64 bytes, packets;
6610 unsigned int start;
6611
Eric Dumazet1a515022010-11-16 19:26:42 -08006612 if (ring) {
6613 do {
6614 start = u64_stats_fetch_begin_bh(&ring->syncp);
6615 packets = ring->stats.packets;
6616 bytes = ring->stats.bytes;
6617 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6618 stats->rx_packets += packets;
6619 stats->rx_bytes += bytes;
6620 }
Eric Dumazetde1036b2010-10-20 23:00:04 +00006621 }
Eric Dumazet1a515022010-11-16 19:26:42 -08006622 rcu_read_unlock();
Eric Dumazetde1036b2010-10-20 23:00:04 +00006623 /* following stats updated by ixgbe_watchdog_task() */
6624 stats->multicast = netdev->stats.multicast;
6625 stats->rx_errors = netdev->stats.rx_errors;
6626 stats->rx_length_errors = netdev->stats.rx_length_errors;
6627 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6628 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6629 return stats;
6630}
6631
6632
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006633static const struct net_device_ops ixgbe_netdev_ops = {
Joe Perchese8e9f692010-09-07 21:34:53 +00006634 .ndo_open = ixgbe_open,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006635 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08006636 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07006637 .ndo_select_queue = ixgbe_select_queue,
Chris Leeche90d4002009-03-10 16:00:24 +00006638 .ndo_set_rx_mode = ixgbe_set_rx_mode,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006639 .ndo_set_multicast_list = ixgbe_set_rx_mode,
6640 .ndo_validate_addr = eth_validate_addr,
6641 .ndo_set_mac_address = ixgbe_set_mac,
6642 .ndo_change_mtu = ixgbe_change_mtu,
6643 .ndo_tx_timeout = ixgbe_tx_timeout,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006644 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6645 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
Ben Hutchings6b73e102009-04-29 08:08:58 +00006646 .ndo_do_ioctl = ixgbe_ioctl,
Greg Rose7f016482010-05-04 22:12:06 +00006647 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6648 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6649 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6650 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
Eric Dumazetde1036b2010-10-20 23:00:04 +00006651 .ndo_get_stats64 = ixgbe_get_stats64,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006652#ifdef CONFIG_NET_POLL_CONTROLLER
6653 .ndo_poll_controller = ixgbe_netpoll,
6654#endif
Yi Zou332d4a72009-05-13 13:11:53 +00006655#ifdef IXGBE_FCOE
6656 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
6657 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
Yi Zou8450ff82009-08-31 12:32:14 +00006658 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6659 .ndo_fcoe_disable = ixgbe_fcoe_disable,
Yi Zou61a1fa12009-10-28 18:24:56 +00006660 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
Yi Zou332d4a72009-05-13 13:11:53 +00006661#endif /* IXGBE_FCOE */
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006662};
6663
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006664static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6665 const struct ixgbe_info *ii)
6666{
6667#ifdef CONFIG_PCI_IOV
6668 struct ixgbe_hw *hw = &adapter->hw;
6669 int err;
6670
6671 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
6672 return;
6673
6674 /* The 82599 supports up to 64 VFs per physical function
6675 * but this implementation limits allocation to 63 so that
6676 * basic networking resources are still available to the
6677 * physical function
6678 */
6679 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
6680 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6681 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6682 if (err) {
Emil Tantilov396e7992010-07-01 20:05:12 +00006683 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006684 goto err_novfs;
6685 }
6686 /* If call to enable VFs succeeded then allocate memory
6687 * for per VF control structures.
6688 */
6689 adapter->vfinfo =
6690 kcalloc(adapter->num_vfs,
6691 sizeof(struct vf_data_storage), GFP_KERNEL);
6692 if (adapter->vfinfo) {
6693 /* Now that we're sure SR-IOV is enabled
6694 * and memory allocated set up the mailbox parameters
6695 */
6696 ixgbe_init_mbx_params_pf(hw);
6697 memcpy(&hw->mbx.ops, ii->mbx_ops,
6698 sizeof(hw->mbx.ops));
6699
6700 /* Disable RSC when in SR-IOV mode */
6701 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
6702 IXGBE_FLAG2_RSC_ENABLED);
6703 return;
6704 }
6705
6706 /* Oh oh */
Emil Tantilov396e7992010-07-01 20:05:12 +00006707 e_err(probe, "Unable to allocate memory for VF Data Storage - "
6708 "SRIOV disabled\n");
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006709 pci_disable_sriov(adapter->pdev);
6710
6711err_novfs:
6712 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
6713 adapter->num_vfs = 0;
6714#endif /* CONFIG_PCI_IOV */
6715}
6716
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006717/**
Auke Kok9a799d72007-09-15 14:07:45 -07006718 * ixgbe_probe - Device Initialization Routine
6719 * @pdev: PCI device information struct
6720 * @ent: entry in ixgbe_pci_tbl
6721 *
6722 * Returns 0 on success, negative on failure
6723 *
6724 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
6725 * The OS initialization, configuring of the adapter private structure,
6726 * and a hardware reset occur.
6727 **/
6728static int __devinit ixgbe_probe(struct pci_dev *pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00006729 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07006730{
6731 struct net_device *netdev;
6732 struct ixgbe_adapter *adapter = NULL;
6733 struct ixgbe_hw *hw;
6734 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07006735 static int cards_found;
6736 int i, err, pci_using_dac;
John Fastabendc85a2612010-02-25 23:15:21 +00006737 unsigned int indices = num_possible_cpus();
Yi Zoueacd73f2009-05-13 13:11:06 +00006738#ifdef IXGBE_FCOE
6739 u16 device_caps;
6740#endif
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006741 u32 part_num, eec;
Auke Kok9a799d72007-09-15 14:07:45 -07006742
Andy Gospodarekbded64a2010-07-21 06:40:31 +00006743 /* Catch broken hardware that put the wrong VF device ID in
6744 * the PCIe SR-IOV capability.
6745 */
6746 if (pdev->is_virtfn) {
6747 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
6748 pci_name(pdev), pdev->vendor, pdev->device);
6749 return -EINVAL;
6750 }
6751
gouji-new9ce77662009-05-06 10:44:45 +00006752 err = pci_enable_device_mem(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006753 if (err)
6754 return err;
6755
Nick Nunley1b507732010-04-27 13:10:27 +00006756 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6757 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Auke Kok9a799d72007-09-15 14:07:45 -07006758 pci_using_dac = 1;
6759 } else {
Nick Nunley1b507732010-04-27 13:10:27 +00006760 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07006761 if (err) {
Nick Nunley1b507732010-04-27 13:10:27 +00006762 err = dma_set_coherent_mask(&pdev->dev,
6763 DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07006764 if (err) {
Dan Carpenterb8bc0422010-07-27 00:05:56 +00006765 dev_err(&pdev->dev,
6766 "No usable DMA configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07006767 goto err_dma;
6768 }
6769 }
6770 pci_using_dac = 0;
6771 }
6772
gouji-new9ce77662009-05-06 10:44:45 +00006773 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00006774 IORESOURCE_MEM), ixgbe_driver_name);
Auke Kok9a799d72007-09-15 14:07:45 -07006775 if (err) {
Dan Carpenterb8bc0422010-07-27 00:05:56 +00006776 dev_err(&pdev->dev,
6777 "pci_request_selected_regions failed 0x%x\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07006778 goto err_pci_reg;
6779 }
6780
Frans Pop19d5afd2009-10-02 10:04:12 -07006781 pci_enable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08006782
Auke Kok9a799d72007-09-15 14:07:45 -07006783 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07006784 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006785
John Fastabendc85a2612010-02-25 23:15:21 +00006786 if (ii->mac == ixgbe_mac_82598EB)
6787 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
6788 else
6789 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6790
6791 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
6792#ifdef IXGBE_FCOE
6793 indices += min_t(unsigned int, num_possible_cpus(),
6794 IXGBE_MAX_FCOE_INDICES);
6795#endif
John Fastabendc85a2612010-02-25 23:15:21 +00006796 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
Auke Kok9a799d72007-09-15 14:07:45 -07006797 if (!netdev) {
6798 err = -ENOMEM;
6799 goto err_alloc_etherdev;
6800 }
6801
Auke Kok9a799d72007-09-15 14:07:45 -07006802 SET_NETDEV_DEV(netdev, &pdev->dev);
6803
Auke Kok9a799d72007-09-15 14:07:45 -07006804 adapter = netdev_priv(netdev);
Alexander Duyckc60fbb02010-11-16 19:26:54 -08006805 pci_set_drvdata(pdev, adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07006806
6807 adapter->netdev = netdev;
6808 adapter->pdev = pdev;
6809 hw = &adapter->hw;
6810 hw->back = adapter;
6811 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6812
Jeff Kirsher05857982008-09-11 19:57:00 -07006813 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
Joe Perchese8e9f692010-09-07 21:34:53 +00006814 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07006815 if (!hw->hw_addr) {
6816 err = -EIO;
6817 goto err_ioremap;
6818 }
6819
6820 for (i = 1; i <= 5; i++) {
6821 if (pci_resource_len(pdev, i) == 0)
6822 continue;
6823 }
6824
Stephen Hemminger0edc3522008-11-19 22:24:29 -08006825 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07006826 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006827 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07006828 strcpy(netdev->name, pci_name(pdev));
6829
Auke Kok9a799d72007-09-15 14:07:45 -07006830 adapter->bd_number = cards_found;
6831
Auke Kok9a799d72007-09-15 14:07:45 -07006832 /* Setup hw api */
6833 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08006834 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07006835
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006836 /* EEPROM */
6837 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
6838 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6839 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
6840 if (!(eec & (1 << 8)))
6841 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
6842
6843 /* PHY */
6844 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08006845 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
Ben Hutchings6b73e102009-04-29 08:08:58 +00006846 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
6847 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
6848 hw->phy.mdio.mmds = 0;
6849 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
6850 hw->phy.mdio.dev = netdev;
6851 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
6852 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
Donald Skidmorec4900be2008-11-20 21:11:42 -08006853
6854 /* set up this timer and work struct before calling get_invariants
6855 * which might start the timer
6856 */
6857 init_timer(&adapter->sfp_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00006858 adapter->sfp_timer.function = ixgbe_sfp_timer;
Donald Skidmorec4900be2008-11-20 21:11:42 -08006859 adapter->sfp_timer.data = (unsigned long) adapter;
6860
6861 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006862
PJ Waskiewicze8e26352009-02-27 15:45:05 +00006863 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
6864 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
6865
6866 /* a new SFP+ module arrival, called from GPI SDP2 context */
6867 INIT_WORK(&adapter->sfp_config_module_task,
Joe Perchese8e9f692010-09-07 21:34:53 +00006868 ixgbe_sfp_config_module_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00006869
Don Skidmore8ca783a2009-05-26 20:40:47 -07006870 ii->get_invariants(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07006871
6872 /* setup the private structure */
6873 err = ixgbe_sw_init(adapter);
6874 if (err)
6875 goto err_sw_init;
6876
Don Skidmoree86bff02010-02-11 04:14:08 +00006877 /* Make it possible the adapter to be woken up via WOL */
6878 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6879 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6880
Don Skidmorebf069c92009-05-07 10:39:54 +00006881 /*
6882 * If there is a fan on this device and it has failed log the
6883 * failure.
6884 */
6885 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
6886 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
6887 if (esdp & IXGBE_ESDP_SDP1)
Emil Tantilov396e7992010-07-01 20:05:12 +00006888 e_crit(probe, "Fan has stopped, replace the adapter\n");
Don Skidmorebf069c92009-05-07 10:39:54 +00006889 }
6890
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006891 /* reset_hw fills in the perm_addr as well */
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07006892 hw->phy.reset_if_overtemp = true;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006893 err = hw->mac.ops.reset_hw(hw);
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07006894 hw->phy.reset_if_overtemp = false;
Don Skidmore8ca783a2009-05-26 20:40:47 -07006895 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
6896 hw->mac.type == ixgbe_mac_82598EB) {
6897 /*
6898 * Start a kernel thread to watch for a module to arrive.
6899 * Only do this for 82598, since 82599 will generate
6900 * interrupts on module arrival.
6901 */
6902 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6903 mod_timer(&adapter->sfp_timer,
6904 round_jiffies(jiffies + (2 * HZ)));
6905 err = 0;
6906 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Emil Tantilov849c4542010-06-03 16:53:41 +00006907 e_dev_err("failed to initialize because an unsupported SFP+ "
6908 "module type was detected.\n");
6909 e_dev_err("Reload the driver after installing a supported "
6910 "module.\n");
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00006911 goto err_sw_init;
6912 } else if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00006913 e_dev_err("HW Init failed: %d\n", err);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006914 goto err_sw_init;
6915 }
6916
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006917 ixgbe_probe_vf(adapter, ii);
6918
Emil Tantilov396e7992010-07-01 20:05:12 +00006919 netdev->features = NETIF_F_SG |
Joe Perchese8e9f692010-09-07 21:34:53 +00006920 NETIF_F_IP_CSUM |
6921 NETIF_F_HW_VLAN_TX |
6922 NETIF_F_HW_VLAN_RX |
6923 NETIF_F_HW_VLAN_FILTER;
Auke Kok9a799d72007-09-15 14:07:45 -07006924
Jesse Brandeburge9990a92008-08-26 04:27:24 -07006925 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07006926 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07006927 netdev->features |= NETIF_F_TSO6;
Herbert Xu78b6f4c2009-01-18 21:49:45 -08006928 netdev->features |= NETIF_F_GRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07006929
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00006930 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6931 netdev->features |= NETIF_F_SCTP_CSUM;
6932
Jeff Kirsherad31c402008-06-05 04:05:30 -07006933 netdev->vlan_features |= NETIF_F_TSO;
6934 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07006935 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00006936 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07006937 netdev->vlan_features |= NETIF_F_SG;
6938
Greg Rose1cdd1ec2010-01-09 02:26:46 +00006939 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6940 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6941 IXGBE_FLAG_DCB_ENABLED);
Alexander Duyck2f90b862008-11-20 20:52:10 -08006942 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6943 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
6944
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08006945#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08006946 netdev->dcbnl_ops = &dcbnl_ops;
6947#endif
6948
Yi Zoueacd73f2009-05-13 13:11:06 +00006949#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00006950 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
Yi Zoueacd73f2009-05-13 13:11:06 +00006951 if (hw->mac.ops.get_device_caps) {
6952 hw->mac.ops.get_device_caps(hw, &device_caps);
Yi Zou0d551582009-07-22 14:07:12 +00006953 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
6954 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
Yi Zoueacd73f2009-05-13 13:11:06 +00006955 }
6956 }
Yi Zou5e09d7f2010-07-19 13:59:52 +00006957 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
6958 netdev->vlan_features |= NETIF_F_FCOE_CRC;
6959 netdev->vlan_features |= NETIF_F_FSO;
6960 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6961 }
Yi Zoueacd73f2009-05-13 13:11:06 +00006962#endif /* IXGBE_FCOE */
Yi Zou7b872a52010-09-22 17:57:58 +00006963 if (pci_using_dac) {
Auke Kok9a799d72007-09-15 14:07:45 -07006964 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00006965 netdev->vlan_features |= NETIF_F_HIGHDMA;
6966 }
Auke Kok9a799d72007-09-15 14:07:45 -07006967
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00006968 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
Alexander Duyckf8212f92009-04-27 22:42:37 +00006969 netdev->features |= NETIF_F_LRO;
6970
Auke Kok9a799d72007-09-15 14:07:45 -07006971 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006972 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Emil Tantilov849c4542010-06-03 16:53:41 +00006973 e_dev_err("The EEPROM Checksum Is Not Valid\n");
Auke Kok9a799d72007-09-15 14:07:45 -07006974 err = -EIO;
6975 goto err_eeprom;
6976 }
6977
6978 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
6979 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6980
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07006981 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
Emil Tantilov849c4542010-06-03 16:53:41 +00006982 e_dev_err("invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07006983 err = -EIO;
6984 goto err_eeprom;
6985 }
6986
Peter Waskiewicz61fac742010-04-27 00:38:15 +00006987 /* power down the optics */
6988 if (hw->phy.multispeed_fiber)
6989 hw->mac.ops.disable_tx_laser(hw);
6990
Auke Kok9a799d72007-09-15 14:07:45 -07006991 init_timer(&adapter->watchdog_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00006992 adapter->watchdog_timer.function = ixgbe_watchdog;
Auke Kok9a799d72007-09-15 14:07:45 -07006993 adapter->watchdog_timer.data = (unsigned long)adapter;
6994
6995 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07006996 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07006997
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08006998 err = ixgbe_init_interrupt_scheme(adapter);
6999 if (err)
7000 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07007001
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007002 switch (pdev->device) {
7003 case IXGBE_DEV_ID_82599_KX4:
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00007004 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
Joe Perchese8e9f692010-09-07 21:34:53 +00007005 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007006 break;
7007 default:
7008 adapter->wol = 0;
7009 break;
7010 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007011 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7012
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00007013 /* pick up the PCI bus settings for reporting later */
7014 hw->mac.ops.get_bus_info(hw);
7015
Auke Kok9a799d72007-09-15 14:07:45 -07007016 /* print bus type/speed/width info */
Emil Tantilov849c4542010-06-03 16:53:41 +00007017 e_dev_info("(PCI Express:%s:%s) %pM\n",
Joe Perchese8e9f692010-09-07 21:34:53 +00007018 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
7019 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
7020 "Unknown"),
7021 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7022 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7023 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7024 "Unknown"),
7025 netdev->dev_addr);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007026 ixgbe_read_pba_num_generic(hw, &part_num);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007027 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
Emil Tantilov849c4542010-06-03 16:53:41 +00007028 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
7029 "PBA No: %06x-%03x\n",
7030 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7031 (part_num >> 8), (part_num & 0xff));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007032 else
Emil Tantilov849c4542010-06-03 16:53:41 +00007033 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
7034 hw->mac.type, hw->phy.type,
7035 (part_num >> 8), (part_num & 0xff));
Auke Kok9a799d72007-09-15 14:07:45 -07007036
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007037 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007038 e_dev_warn("PCI-Express bandwidth available for this card is "
7039 "not sufficient for optimal performance.\n");
7040 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7041 "is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08007042 }
7043
Peter P Waskiewicz Jr34b03682009-02-05 23:54:42 -08007044 /* save off EEPROM version number */
7045 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
7046
Auke Kok9a799d72007-09-15 14:07:45 -07007047 /* reset the hardware with the new settings */
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007048 err = hw->mac.ops.start_hw(hw);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07007049
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007050 if (err == IXGBE_ERR_EEPROM_VERSION) {
7051 /* We are running on a pre-production device, log a warning */
Emil Tantilov849c4542010-06-03 16:53:41 +00007052 e_dev_warn("This device is a pre-production adapter/LOM. "
7053 "Please be aware there may be issues associated "
7054 "with your hardware. If you are experiencing "
7055 "problems please contact your Intel or hardware "
7056 "representative who provided you with this "
7057 "hardware.\n");
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00007058 }
Auke Kok9a799d72007-09-15 14:07:45 -07007059 strcpy(netdev->name, "eth%d");
7060 err = register_netdev(netdev);
7061 if (err)
7062 goto err_register;
7063
Jesse Brandeburg54386462009-04-17 20:44:27 +00007064 /* carrier off reporting is important to ethtool even BEFORE open */
7065 netif_carrier_off(netdev);
7066
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00007067 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7068 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7069 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
7070
Mallikarjuna R Chilakala119fc602010-05-20 23:07:06 -07007071 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
Joe Perchese8e9f692010-09-07 21:34:53 +00007072 INIT_WORK(&adapter->check_overtemp_task,
7073 ixgbe_check_overtemp_task);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007074#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03007075 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007076 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007077 ixgbe_setup_dca(adapter);
7078 }
7079#endif
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007080 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007081 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007082 for (i = 0; i < adapter->num_vfs; i++)
7083 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7084 }
7085
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00007086 /* add san mac addr to netdev */
7087 ixgbe_add_sanmac_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007088
Emil Tantilov849c4542010-06-03 16:53:41 +00007089 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007090 cards_found++;
7091 return 0;
7092
7093err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08007094 ixgbe_release_hw_control(adapter);
Alexander Duyck7a921c92009-05-06 10:43:28 +00007095 ixgbe_clear_interrupt_scheme(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007096err_sw_init:
7097err_eeprom:
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007098 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7099 ixgbe_disable_sriov(adapter);
Donald Skidmorec4900be2008-11-20 21:11:42 -08007100 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7101 del_timer_sync(&adapter->sfp_timer);
7102 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007103 cancel_work_sync(&adapter->multispeed_fiber_task);
7104 cancel_work_sync(&adapter->sfp_config_module_task);
Auke Kok9a799d72007-09-15 14:07:45 -07007105 iounmap(hw->hw_addr);
7106err_ioremap:
7107 free_netdev(netdev);
7108err_alloc_etherdev:
Joe Perchese8e9f692010-09-07 21:34:53 +00007109 pci_release_selected_regions(pdev,
7110 pci_select_bars(pdev, IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07007111err_pci_reg:
7112err_dma:
7113 pci_disable_device(pdev);
7114 return err;
7115}
7116
7117/**
7118 * ixgbe_remove - Device Removal Routine
7119 * @pdev: PCI device information struct
7120 *
7121 * ixgbe_remove is called by the PCI subsystem to alert the driver
7122 * that it should release a PCI device. The could be caused by a
7123 * Hot-Plug event, or because the driver is going to be removed from
7124 * memory.
7125 **/
7126static void __devexit ixgbe_remove(struct pci_dev *pdev)
7127{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007128 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7129 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007130
7131 set_bit(__IXGBE_DOWN, &adapter->state);
Donald Skidmorec4900be2008-11-20 21:11:42 -08007132 /* clear the module not found bit to make sure the worker won't
7133 * reschedule
7134 */
7135 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07007136 del_timer_sync(&adapter->watchdog_timer);
7137
Donald Skidmorec4900be2008-11-20 21:11:42 -08007138 del_timer_sync(&adapter->sfp_timer);
7139 cancel_work_sync(&adapter->watchdog_task);
7140 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00007141 cancel_work_sync(&adapter->multispeed_fiber_task);
7142 cancel_work_sync(&adapter->sfp_config_module_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00007143 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7144 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7145 cancel_work_sync(&adapter->fdir_reinit_task);
Auke Kok9a799d72007-09-15 14:07:45 -07007146 flush_scheduled_work();
7147
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007148#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007149 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7150 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7151 dca_remove_requester(&pdev->dev);
7152 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7153 }
7154
7155#endif
Yi Zou332d4a72009-05-13 13:11:53 +00007156#ifdef IXGBE_FCOE
7157 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7158 ixgbe_cleanup_fcoe(adapter);
7159
7160#endif /* IXGBE_FCOE */
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00007161
7162 /* remove the added san mac */
7163 ixgbe_del_sanmac_netdev(netdev);
7164
Donald Skidmorec4900be2008-11-20 21:11:42 -08007165 if (netdev->reg_state == NETREG_REGISTERED)
7166 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007167
Greg Rose1cdd1ec2010-01-09 02:26:46 +00007168 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7169 ixgbe_disable_sriov(adapter);
7170
Alexander Duyck7a921c92009-05-06 10:43:28 +00007171 ixgbe_clear_interrupt_scheme(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08007172
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007173 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07007174
7175 iounmap(adapter->hw.hw_addr);
gouji-new9ce77662009-05-06 10:44:45 +00007176 pci_release_selected_regions(pdev, pci_select_bars(pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007177 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07007178
Emil Tantilov849c4542010-06-03 16:53:41 +00007179 e_dev_info("complete\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08007180
Auke Kok9a799d72007-09-15 14:07:45 -07007181 free_netdev(netdev);
7182
Frans Pop19d5afd2009-10-02 10:04:12 -07007183 pci_disable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007184
Auke Kok9a799d72007-09-15 14:07:45 -07007185 pci_disable_device(pdev);
7186}
7187
7188/**
7189 * ixgbe_io_error_detected - called when PCI error is detected
7190 * @pdev: Pointer to PCI device
7191 * @state: The current pci connection state
7192 *
7193 * This function is called after a PCI bus error affecting
7194 * this device has been detected.
7195 */
7196static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Joe Perchese8e9f692010-09-07 21:34:53 +00007197 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07007198{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007199 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7200 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007201
7202 netif_device_detach(netdev);
7203
Breno Leitao3044b8d2009-05-06 10:44:26 +00007204 if (state == pci_channel_io_perm_failure)
7205 return PCI_ERS_RESULT_DISCONNECT;
7206
Auke Kok9a799d72007-09-15 14:07:45 -07007207 if (netif_running(netdev))
7208 ixgbe_down(adapter);
7209 pci_disable_device(pdev);
7210
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07007211 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07007212 return PCI_ERS_RESULT_NEED_RESET;
7213}
7214
7215/**
7216 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7217 * @pdev: Pointer to PCI device
7218 *
7219 * Restart the card from scratch, as if from a cold-boot.
7220 */
7221static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7222{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007223 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007224 pci_ers_result_t result;
7225 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07007226
gouji-new9ce77662009-05-06 10:44:45 +00007227 if (pci_enable_device_mem(pdev)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007228 e_err(probe, "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007229 result = PCI_ERS_RESULT_DISCONNECT;
7230 } else {
7231 pci_set_master(pdev);
7232 pci_restore_state(pdev);
Breno Leitaoc0e1f682009-11-10 08:37:47 +00007233 pci_save_state(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007234
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07007235 pci_wake_from_d3(pdev, false);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007236
7237 ixgbe_reset(adapter);
PJ Waskiewicz88512532009-03-13 22:15:10 +00007238 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007239 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07007240 }
Auke Kok9a799d72007-09-15 14:07:45 -07007241
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007242 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7243 if (err) {
Emil Tantilov849c4542010-06-03 16:53:41 +00007244 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7245 "failed 0x%0x\n", err);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007246 /* non-fatal, continue */
7247 }
Auke Kok9a799d72007-09-15 14:07:45 -07007248
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08007249 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07007250}
7251
7252/**
7253 * ixgbe_io_resume - called when traffic can start flowing again.
7254 * @pdev: Pointer to PCI device
7255 *
7256 * This callback is called when the error recovery driver tells us that
7257 * its OK to resume normal operation.
7258 */
7259static void ixgbe_io_resume(struct pci_dev *pdev)
7260{
Alexander Duyckc60fbb02010-11-16 19:26:54 -08007261 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7262 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07007263
7264 if (netif_running(netdev)) {
7265 if (ixgbe_up(adapter)) {
Emil Tantilov396e7992010-07-01 20:05:12 +00007266 e_info(probe, "ixgbe_up failed after reset\n");
Auke Kok9a799d72007-09-15 14:07:45 -07007267 return;
7268 }
7269 }
7270
7271 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07007272}
7273
7274static struct pci_error_handlers ixgbe_err_handler = {
7275 .error_detected = ixgbe_io_error_detected,
7276 .slot_reset = ixgbe_io_slot_reset,
7277 .resume = ixgbe_io_resume,
7278};
7279
7280static struct pci_driver ixgbe_driver = {
7281 .name = ixgbe_driver_name,
7282 .id_table = ixgbe_pci_tbl,
7283 .probe = ixgbe_probe,
7284 .remove = __devexit_p(ixgbe_remove),
7285#ifdef CONFIG_PM
7286 .suspend = ixgbe_suspend,
7287 .resume = ixgbe_resume,
7288#endif
7289 .shutdown = ixgbe_shutdown,
7290 .err_handler = &ixgbe_err_handler
7291};
7292
7293/**
7294 * ixgbe_init_module - Driver Registration Routine
7295 *
7296 * ixgbe_init_module is the first routine called when the driver is
7297 * loaded. All it does is register with the PCI subsystem.
7298 **/
7299static int __init ixgbe_init_module(void)
7300{
7301 int ret;
Joe Perchesc7689572010-09-07 21:35:17 +00007302 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
Emil Tantilov849c4542010-06-03 16:53:41 +00007303 pr_info("%s\n", ixgbe_copyright);
Auke Kok9a799d72007-09-15 14:07:45 -07007304
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007305#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007306 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007307#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007308
Auke Kok9a799d72007-09-15 14:07:45 -07007309 ret = pci_register_driver(&ixgbe_driver);
7310 return ret;
7311}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07007312
Auke Kok9a799d72007-09-15 14:07:45 -07007313module_init(ixgbe_init_module);
7314
7315/**
7316 * ixgbe_exit_module - Driver Exit Cleanup Routine
7317 *
7318 * ixgbe_exit_module is called just before the driver is removed
7319 * from memory.
7320 **/
7321static void __exit ixgbe_exit_module(void)
7322{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007323#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007324 dca_unregister_notify(&dca_notifier);
7325#endif
Auke Kok9a799d72007-09-15 14:07:45 -07007326 pci_unregister_driver(&ixgbe_driver);
Eric Dumazet1a515022010-11-16 19:26:42 -08007327 rcu_barrier(); /* Wait for completion of call_rcu()'s */
Auke Kok9a799d72007-09-15 14:07:45 -07007328}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007329
Jeff Garzik5dd2d332008-10-16 05:09:31 -04007330#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007331static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Joe Perchese8e9f692010-09-07 21:34:53 +00007332 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007333{
7334 int ret_val;
7335
7336 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Joe Perchese8e9f692010-09-07 21:34:53 +00007337 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007338
7339 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7340}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08007341
Alexander Duyckb4533682009-03-31 21:32:42 +00007342#endif /* CONFIG_IXGBE_DCA */
Emil Tantilov849c4542010-06-03 16:53:41 +00007343
Alexander Duyckb4533682009-03-31 21:32:42 +00007344/**
Emil Tantilov849c4542010-06-03 16:53:41 +00007345 * ixgbe_get_hw_dev return device
Alexander Duyckb4533682009-03-31 21:32:42 +00007346 * used by hardware layer to print debugging information
7347 **/
Emil Tantilov849c4542010-06-03 16:53:41 +00007348struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
Alexander Duyckb4533682009-03-31 21:32:42 +00007349{
7350 struct ixgbe_adapter *adapter = hw->back;
Emil Tantilov849c4542010-06-03 16:53:41 +00007351 return adapter->netdev;
Alexander Duyckb4533682009-03-31 21:32:42 +00007352}
7353
Auke Kok9a799d72007-09-15 14:07:45 -07007354module_exit(ixgbe_exit_module);
7355
7356/* ixgbe_main.c */