]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - drivers/net/cxgb3/cxgb3_main.c
Merge branch 'kconfig' of master.kernel.org:/pub/scm/linux/kernel/git/galak/powerpc...
[linux-3.10.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <asm/uaccess.h>
47
48 #include "common.h"
49 #include "cxgb3_ioctl.h"
50 #include "regs.h"
51 #include "cxgb3_offload.h"
52 #include "version.h"
53
54 #include "cxgb3_ctl_defs.h"
55 #include "t3_cpl.h"
56 #include "firmware_exports.h"
57
58 enum {
59         MAX_TXQ_ENTRIES = 16384,
60         MAX_CTRL_TXQ_ENTRIES = 1024,
61         MAX_RSPQ_ENTRIES = 16384,
62         MAX_RX_BUFFERS = 16384,
63         MAX_RX_JUMBO_BUFFERS = 16384,
64         MIN_TXQ_ENTRIES = 4,
65         MIN_CTRL_TXQ_ENTRIES = 4,
66         MIN_RSPQ_ENTRIES = 32,
67         MIN_FL_ENTRIES = 32
68 };
69
70 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
71
72 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75
76 #define EEPROM_MAGIC 0x38E2F10C
77
78 #define CH_DEVICE(devid, ssid, idx) \
79         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80
81 static const struct pci_device_id cxgb3_pci_tbl[] = {
82         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
83         CH_DEVICE(0x21, 1, 1),  /* T302E */
84         CH_DEVICE(0x22, 1, 2),  /* T310E */
85         CH_DEVICE(0x23, 1, 3),  /* T320X */
86         CH_DEVICE(0x24, 1, 1),  /* T302X */
87         CH_DEVICE(0x25, 1, 3),  /* T320E */
88         CH_DEVICE(0x26, 1, 2),  /* T310X */
89         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
90         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
91         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
92         {0,}
93 };
94
95 MODULE_DESCRIPTION(DRV_DESC);
96 MODULE_AUTHOR("Chelsio Communications");
97 MODULE_LICENSE("Dual BSD/GPL");
98 MODULE_VERSION(DRV_VERSION);
99 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100
101 static int dflt_msg_enable = DFLT_MSG_ENABLE;
102
103 module_param(dflt_msg_enable, int, 0644);
104 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
105
106 /*
107  * The driver uses the best interrupt scheme available on a platform in the
108  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
109  * of these schemes the driver may consider as follows:
110  *
111  * msi = 2: choose from among all three options
112  * msi = 1: only consider MSI and pin interrupts
113  * msi = 0: force pin interrupts
114  */
115 static int msi = 2;
116
117 module_param(msi, int, 0644);
118 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
119
120 /*
121  * The driver enables offload as a default.
122  * To disable it, use ofld_disable = 1.
123  */
124
125 static int ofld_disable = 0;
126
127 module_param(ofld_disable, int, 0644);
128 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
129
130 /*
131  * We have work elements that we need to cancel when an interface is taken
132  * down.  Normally the work elements would be executed by keventd but that
133  * can deadlock because of linkwatch.  If our close method takes the rtnl
134  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136  * for our work to complete.  Get our own work queue to solve this.
137  */
138 static struct workqueue_struct *cxgb3_wq;
139
140 /**
141  *      link_report - show link status and link speed/duplex
142  *      @p: the port whose settings are to be reported
143  *
144  *      Shows the link status, speed, and duplex of a port.
145  */
146 static void link_report(struct net_device *dev)
147 {
148         if (!netif_carrier_ok(dev))
149                 printk(KERN_INFO "%s: link down\n", dev->name);
150         else {
151                 const char *s = "10Mbps";
152                 const struct port_info *p = netdev_priv(dev);
153
154                 switch (p->link_config.speed) {
155                 case SPEED_10000:
156                         s = "10Gbps";
157                         break;
158                 case SPEED_1000:
159                         s = "1000Mbps";
160                         break;
161                 case SPEED_100:
162                         s = "100Mbps";
163                         break;
164                 }
165
166                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
168         }
169 }
170
171 /**
172  *      t3_os_link_changed - handle link status changes
173  *      @adapter: the adapter associated with the link change
174  *      @port_id: the port index whose limk status has changed
175  *      @link_stat: the new status of the link
176  *      @speed: the new speed setting
177  *      @duplex: the new duplex setting
178  *      @pause: the new flow-control setting
179  *
180  *      This is the OS-dependent handler for link status changes.  The OS
181  *      neutral handler takes care of most of the processing for these events,
182  *      then calls this handler for any OS-specific processing.
183  */
184 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185                         int speed, int duplex, int pause)
186 {
187         struct net_device *dev = adapter->port[port_id];
188         struct port_info *pi = netdev_priv(dev);
189         struct cmac *mac = &pi->mac;
190
191         /* Skip changes from disabled ports. */
192         if (!netif_running(dev))
193                 return;
194
195         if (link_stat != netif_carrier_ok(dev)) {
196                 if (link_stat) {
197                         t3_mac_enable(mac, MAC_DIRECTION_RX);
198                         netif_carrier_on(dev);
199                 } else {
200                         netif_carrier_off(dev);
201                         pi->phy.ops->power_down(&pi->phy, 1);
202                         t3_mac_disable(mac, MAC_DIRECTION_RX);
203                         t3_link_start(&pi->phy, mac, &pi->link_config);
204                 }
205
206                 link_report(dev);
207         }
208 }
209
210 static void cxgb_set_rxmode(struct net_device *dev)
211 {
212         struct t3_rx_mode rm;
213         struct port_info *pi = netdev_priv(dev);
214
215         init_rx_mode(&rm, dev, dev->mc_list);
216         t3_mac_set_rx_mode(&pi->mac, &rm);
217 }
218
219 /**
220  *      link_start - enable a port
221  *      @dev: the device to enable
222  *
223  *      Performs the MAC and PHY actions needed to enable a port.
224  */
225 static void link_start(struct net_device *dev)
226 {
227         struct t3_rx_mode rm;
228         struct port_info *pi = netdev_priv(dev);
229         struct cmac *mac = &pi->mac;
230
231         init_rx_mode(&rm, dev, dev->mc_list);
232         t3_mac_reset(mac);
233         t3_mac_set_mtu(mac, dev->mtu);
234         t3_mac_set_address(mac, 0, dev->dev_addr);
235         t3_mac_set_rx_mode(mac, &rm);
236         t3_link_start(&pi->phy, mac, &pi->link_config);
237         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
238 }
239
240 static inline void cxgb_disable_msi(struct adapter *adapter)
241 {
242         if (adapter->flags & USING_MSIX) {
243                 pci_disable_msix(adapter->pdev);
244                 adapter->flags &= ~USING_MSIX;
245         } else if (adapter->flags & USING_MSI) {
246                 pci_disable_msi(adapter->pdev);
247                 adapter->flags &= ~USING_MSI;
248         }
249 }
250
251 /*
252  * Interrupt handler for asynchronous events used with MSI-X.
253  */
254 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
255 {
256         t3_slow_intr_handler(cookie);
257         return IRQ_HANDLED;
258 }
259
260 /*
261  * Name the MSI-X interrupts.
262  */
263 static void name_msix_vecs(struct adapter *adap)
264 {
265         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
266
267         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
268         adap->msix_info[0].desc[n] = 0;
269
270         for_each_port(adap, j) {
271                 struct net_device *d = adap->port[j];
272                 const struct port_info *pi = netdev_priv(d);
273
274                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
275                         snprintf(adap->msix_info[msi_idx].desc, n,
276                                  "%s (queue %d)", d->name, i);
277                         adap->msix_info[msi_idx].desc[n] = 0;
278                 }
279         }
280 }
281
282 static int request_msix_data_irqs(struct adapter *adap)
283 {
284         int i, j, err, qidx = 0;
285
286         for_each_port(adap, i) {
287                 int nqsets = adap2pinfo(adap, i)->nqsets;
288
289                 for (j = 0; j < nqsets; ++j) {
290                         err = request_irq(adap->msix_info[qidx + 1].vec,
291                                           t3_intr_handler(adap,
292                                                           adap->sge.qs[qidx].
293                                                           rspq.polling), 0,
294                                           adap->msix_info[qidx + 1].desc,
295                                           &adap->sge.qs[qidx]);
296                         if (err) {
297                                 while (--qidx >= 0)
298                                         free_irq(adap->msix_info[qidx + 1].vec,
299                                                  &adap->sge.qs[qidx]);
300                                 return err;
301                         }
302                         qidx++;
303                 }
304         }
305         return 0;
306 }
307
308 /**
309  *      setup_rss - configure RSS
310  *      @adap: the adapter
311  *
312  *      Sets up RSS to distribute packets to multiple receive queues.  We
313  *      configure the RSS CPU lookup table to distribute to the number of HW
314  *      receive queues, and the response queue lookup table to narrow that
315  *      down to the response queues actually configured for each port.
316  *      We always configure the RSS mapping for two ports since the mapping
317  *      table has plenty of entries.
318  */
319 static void setup_rss(struct adapter *adap)
320 {
321         int i;
322         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
323         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
324         u8 cpus[SGE_QSETS + 1];
325         u16 rspq_map[RSS_TABLE_SIZE];
326
327         for (i = 0; i < SGE_QSETS; ++i)
328                 cpus[i] = i;
329         cpus[SGE_QSETS] = 0xff; /* terminator */
330
331         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
332                 rspq_map[i] = i % nq0;
333                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
334         }
335
336         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
337                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
338                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
339 }
340
341 /*
342  * If we have multiple receive queues per port serviced by NAPI we need one
343  * netdevice per queue as NAPI operates on netdevices.  We already have one
344  * netdevice, namely the one associated with the interface, so we use dummy
345  * ones for any additional queues.  Note that these netdevices exist purely
346  * so that NAPI has something to work with, they do not represent network
347  * ports and are not registered.
348  */
349 static int init_dummy_netdevs(struct adapter *adap)
350 {
351         int i, j, dummy_idx = 0;
352         struct net_device *nd;
353
354         for_each_port(adap, i) {
355                 struct net_device *dev = adap->port[i];
356                 const struct port_info *pi = netdev_priv(dev);
357
358                 for (j = 0; j < pi->nqsets - 1; j++) {
359                         if (!adap->dummy_netdev[dummy_idx]) {
360                                 nd = alloc_netdev(0, "", ether_setup);
361                                 if (!nd)
362                                         goto free_all;
363
364                                 nd->priv = adap;
365                                 nd->weight = 64;
366                                 set_bit(__LINK_STATE_START, &nd->state);
367                                 adap->dummy_netdev[dummy_idx] = nd;
368                         }
369                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
370                         dummy_idx++;
371                 }
372         }
373         return 0;
374
375 free_all:
376         while (--dummy_idx >= 0) {
377                 free_netdev(adap->dummy_netdev[dummy_idx]);
378                 adap->dummy_netdev[dummy_idx] = NULL;
379         }
380         return -ENOMEM;
381 }
382
383 /*
384  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
385  * both netdevices representing interfaces and the dummy ones for the extra
386  * queues.
387  */
388 static void quiesce_rx(struct adapter *adap)
389 {
390         int i;
391         struct net_device *dev;
392
393         for_each_port(adap, i) {
394                 dev = adap->port[i];
395                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
396                         msleep(1);
397         }
398
399         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
400                 dev = adap->dummy_netdev[i];
401                 if (dev)
402                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
403                                 msleep(1);
404         }
405 }
406
407 /**
408  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
409  *      @adap: the adapter
410  *
411  *      Determines how many sets of SGE queues to use and initializes them.
412  *      We support multiple queue sets per port if we have MSI-X, otherwise
413  *      just one queue set per port.
414  */
415 static int setup_sge_qsets(struct adapter *adap)
416 {
417         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
418         unsigned int ntxq = SGE_TXQ_PER_SET;
419
420         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
421                 irq_idx = -1;
422
423         for_each_port(adap, i) {
424                 struct net_device *dev = adap->port[i];
425                 const struct port_info *pi = netdev_priv(dev);
426
427                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
428                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
429                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
430                                                              irq_idx,
431                                 &adap->params.sge.qset[qset_idx], ntxq,
432                                 j == 0 ? dev :
433                                          adap-> dummy_netdev[dummy_dev_idx++]);
434                         if (err) {
435                                 t3_free_sge_resources(adap);
436                                 return err;
437                         }
438                 }
439         }
440
441         return 0;
442 }
443
444 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
445                          char *buf,
446                          ssize_t(*format) (struct net_device *, char *))
447 {
448         ssize_t len;
449
450         /* Synchronize with ioctls that may shut down the device */
451         rtnl_lock();
452         len = (*format) (to_net_dev(d), buf);
453         rtnl_unlock();
454         return len;
455 }
456
457 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
458                           const char *buf, size_t len,
459                           ssize_t(*set) (struct net_device *, unsigned int),
460                           unsigned int min_val, unsigned int max_val)
461 {
462         char *endp;
463         ssize_t ret;
464         unsigned int val;
465
466         if (!capable(CAP_NET_ADMIN))
467                 return -EPERM;
468
469         val = simple_strtoul(buf, &endp, 0);
470         if (endp == buf || val < min_val || val > max_val)
471                 return -EINVAL;
472
473         rtnl_lock();
474         ret = (*set) (to_net_dev(d), val);
475         if (!ret)
476                 ret = len;
477         rtnl_unlock();
478         return ret;
479 }
480
481 #define CXGB3_SHOW(name, val_expr) \
482 static ssize_t format_##name(struct net_device *dev, char *buf) \
483 { \
484         struct adapter *adap = dev->priv; \
485         return sprintf(buf, "%u\n", val_expr); \
486 } \
487 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
488                            char *buf) \
489 { \
490         return attr_show(d, attr, buf, format_##name); \
491 }
492
493 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
494 {
495         struct adapter *adap = dev->priv;
496         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
497
498         if (adap->flags & FULL_INIT_DONE)
499                 return -EBUSY;
500         if (val && adap->params.rev == 0)
501                 return -EINVAL;
502         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
503             min_tids)
504                 return -EINVAL;
505         adap->params.mc5.nfilters = val;
506         return 0;
507 }
508
509 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
510                               const char *buf, size_t len)
511 {
512         return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
513 }
514
515 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
516 {
517         struct adapter *adap = dev->priv;
518
519         if (adap->flags & FULL_INIT_DONE)
520                 return -EBUSY;
521         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
522             MC5_MIN_TIDS)
523                 return -EINVAL;
524         adap->params.mc5.nservers = val;
525         return 0;
526 }
527
528 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
529                               const char *buf, size_t len)
530 {
531         return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
532 }
533
534 #define CXGB3_ATTR_R(name, val_expr) \
535 CXGB3_SHOW(name, val_expr) \
536 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
537
538 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
539 CXGB3_SHOW(name, val_expr) \
540 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
541
542 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
543 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
544 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
545
546 static struct attribute *cxgb3_attrs[] = {
547         &dev_attr_cam_size.attr,
548         &dev_attr_nfilters.attr,
549         &dev_attr_nservers.attr,
550         NULL
551 };
552
553 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
554
555 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
556                             char *buf, int sched)
557 {
558         ssize_t len;
559         unsigned int v, addr, bpt, cpt;
560         struct adapter *adap = to_net_dev(d)->priv;
561
562         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
563         rtnl_lock();
564         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
565         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
566         if (sched & 1)
567                 v >>= 16;
568         bpt = (v >> 8) & 0xff;
569         cpt = v & 0xff;
570         if (!cpt)
571                 len = sprintf(buf, "disabled\n");
572         else {
573                 v = (adap->params.vpd.cclk * 1000) / cpt;
574                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
575         }
576         rtnl_unlock();
577         return len;
578 }
579
580 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
581                              const char *buf, size_t len, int sched)
582 {
583         char *endp;
584         ssize_t ret;
585         unsigned int val;
586         struct adapter *adap = to_net_dev(d)->priv;
587
588         if (!capable(CAP_NET_ADMIN))
589                 return -EPERM;
590
591         val = simple_strtoul(buf, &endp, 0);
592         if (endp == buf || val > 10000000)
593                 return -EINVAL;
594
595         rtnl_lock();
596         ret = t3_config_sched(adap, val, sched);
597         if (!ret)
598                 ret = len;
599         rtnl_unlock();
600         return ret;
601 }
602
603 #define TM_ATTR(name, sched) \
604 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
605                            char *buf) \
606 { \
607         return tm_attr_show(d, attr, buf, sched); \
608 } \
609 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
610                             const char *buf, size_t len) \
611 { \
612         return tm_attr_store(d, attr, buf, len, sched); \
613 } \
614 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
615
616 TM_ATTR(sched0, 0);
617 TM_ATTR(sched1, 1);
618 TM_ATTR(sched2, 2);
619 TM_ATTR(sched3, 3);
620 TM_ATTR(sched4, 4);
621 TM_ATTR(sched5, 5);
622 TM_ATTR(sched6, 6);
623 TM_ATTR(sched7, 7);
624
625 static struct attribute *offload_attrs[] = {
626         &dev_attr_sched0.attr,
627         &dev_attr_sched1.attr,
628         &dev_attr_sched2.attr,
629         &dev_attr_sched3.attr,
630         &dev_attr_sched4.attr,
631         &dev_attr_sched5.attr,
632         &dev_attr_sched6.attr,
633         &dev_attr_sched7.attr,
634         NULL
635 };
636
637 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
638
639 /*
640  * Sends an sk_buff to an offload queue driver
641  * after dealing with any active network taps.
642  */
643 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
644 {
645         int ret;
646
647         local_bh_disable();
648         ret = t3_offload_tx(tdev, skb);
649         local_bh_enable();
650         return ret;
651 }
652
653 static int write_smt_entry(struct adapter *adapter, int idx)
654 {
655         struct cpl_smt_write_req *req;
656         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
657
658         if (!skb)
659                 return -ENOMEM;
660
661         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
662         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
663         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
664         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
665         req->iff = idx;
666         memset(req->src_mac1, 0, sizeof(req->src_mac1));
667         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
668         skb->priority = 1;
669         offload_tx(&adapter->tdev, skb);
670         return 0;
671 }
672
673 static int init_smt(struct adapter *adapter)
674 {
675         int i;
676
677         for_each_port(adapter, i)
678             write_smt_entry(adapter, i);
679         return 0;
680 }
681
682 static void init_port_mtus(struct adapter *adapter)
683 {
684         unsigned int mtus = adapter->port[0]->mtu;
685
686         if (adapter->port[1])
687                 mtus |= adapter->port[1]->mtu << 16;
688         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
689 }
690
691 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
692                               int hi, int port)
693 {
694         struct sk_buff *skb;
695         struct mngt_pktsched_wr *req;
696
697         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
698         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
699         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
700         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
701         req->sched = sched;
702         req->idx = qidx;
703         req->min = lo;
704         req->max = hi;
705         req->binding = port;
706         t3_mgmt_tx(adap, skb);
707 }
708
709 static void bind_qsets(struct adapter *adap)
710 {
711         int i, j;
712
713         for_each_port(adap, i) {
714                 const struct port_info *pi = adap2pinfo(adap, i);
715
716                 for (j = 0; j < pi->nqsets; ++j)
717                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
718                                           -1, i);
719         }
720 }
721
722 #define FW_FNAME "t3fw-%d.%d.%d.bin"
723
724 static int upgrade_fw(struct adapter *adap)
725 {
726         int ret;
727         char buf[64];
728         const struct firmware *fw;
729         struct device *dev = &adap->pdev->dev;
730
731         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
732                  FW_VERSION_MINOR, FW_VERSION_MICRO);
733         ret = request_firmware(&fw, buf, dev);
734         if (ret < 0) {
735                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
736                         buf);
737                 return ret;
738         }
739         ret = t3_load_fw(adap, fw->data, fw->size);
740         release_firmware(fw);
741         return ret;
742 }
743
744 /**
745  *      cxgb_up - enable the adapter
746  *      @adapter: adapter being enabled
747  *
748  *      Called when the first port is enabled, this function performs the
749  *      actions necessary to make an adapter operational, such as completing
750  *      the initialization of HW modules, and enabling interrupts.
751  *
752  *      Must be called with the rtnl lock held.
753  */
754 static int cxgb_up(struct adapter *adap)
755 {
756         int err = 0;
757
758         if (!(adap->flags & FULL_INIT_DONE)) {
759                 err = t3_check_fw_version(adap);
760                 if (err == -EINVAL)
761                         err = upgrade_fw(adap);
762                 if (err)
763                         goto out;
764
765                 err = init_dummy_netdevs(adap);
766                 if (err)
767                         goto out;
768
769                 err = t3_init_hw(adap, 0);
770                 if (err)
771                         goto out;
772
773                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
774                 
775                 err = setup_sge_qsets(adap);
776                 if (err)
777                         goto out;
778
779                 setup_rss(adap);
780                 adap->flags |= FULL_INIT_DONE;
781         }
782
783         t3_intr_clear(adap);
784
785         if (adap->flags & USING_MSIX) {
786                 name_msix_vecs(adap);
787                 err = request_irq(adap->msix_info[0].vec,
788                                   t3_async_intr_handler, 0,
789                                   adap->msix_info[0].desc, adap);
790                 if (err)
791                         goto irq_err;
792
793                 if (request_msix_data_irqs(adap)) {
794                         free_irq(adap->msix_info[0].vec, adap);
795                         goto irq_err;
796                 }
797         } else if ((err = request_irq(adap->pdev->irq,
798                                       t3_intr_handler(adap,
799                                                       adap->sge.qs[0].rspq.
800                                                       polling),
801                                       (adap->flags & USING_MSI) ?
802                                        0 : IRQF_SHARED,
803                                       adap->name, adap)))
804                 goto irq_err;
805
806         t3_sge_start(adap);
807         t3_intr_enable(adap);
808
809         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
810                 bind_qsets(adap);
811         adap->flags |= QUEUES_BOUND;
812
813 out:
814         return err;
815 irq_err:
816         CH_ERR(adap, "request_irq failed, err %d\n", err);
817         goto out;
818 }
819
820 /*
821  * Release resources when all the ports and offloading have been stopped.
822  */
823 static void cxgb_down(struct adapter *adapter)
824 {
825         t3_sge_stop(adapter);
826         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
827         t3_intr_disable(adapter);
828         spin_unlock_irq(&adapter->work_lock);
829
830         if (adapter->flags & USING_MSIX) {
831                 int i, n = 0;
832
833                 free_irq(adapter->msix_info[0].vec, adapter);
834                 for_each_port(adapter, i)
835                     n += adap2pinfo(adapter, i)->nqsets;
836
837                 for (i = 0; i < n; ++i)
838                         free_irq(adapter->msix_info[i + 1].vec,
839                                  &adapter->sge.qs[i]);
840         } else
841                 free_irq(adapter->pdev->irq, adapter);
842
843         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
844         quiesce_rx(adapter);
845 }
846
847 static void schedule_chk_task(struct adapter *adap)
848 {
849         unsigned int timeo;
850
851         timeo = adap->params.linkpoll_period ?
852             (HZ * adap->params.linkpoll_period) / 10 :
853             adap->params.stats_update_period * HZ;
854         if (timeo)
855                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
856 }
857
858 static int offload_open(struct net_device *dev)
859 {
860         struct adapter *adapter = dev->priv;
861         struct t3cdev *tdev = T3CDEV(dev);
862         int adap_up = adapter->open_device_map & PORT_MASK;
863         int err = 0;
864
865         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
866                 return 0;
867
868         if (!adap_up && (err = cxgb_up(adapter)) < 0)
869                 return err;
870
871         t3_tp_set_offload_mode(adapter, 1);
872         tdev->lldev = adapter->port[0];
873         err = cxgb3_offload_activate(adapter);
874         if (err)
875                 goto out;
876
877         init_port_mtus(adapter);
878         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
879                      adapter->params.b_wnd,
880                      adapter->params.rev == 0 ?
881                      adapter->port[0]->mtu : 0xffff);
882         init_smt(adapter);
883
884         /* Never mind if the next step fails */
885         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
886
887         /* Call back all registered clients */
888         cxgb3_add_clients(tdev);
889
890 out:
891         /* restore them in case the offload module has changed them */
892         if (err) {
893                 t3_tp_set_offload_mode(adapter, 0);
894                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
895                 cxgb3_set_dummy_ops(tdev);
896         }
897         return err;
898 }
899
900 static int offload_close(struct t3cdev *tdev)
901 {
902         struct adapter *adapter = tdev2adap(tdev);
903
904         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
905                 return 0;
906
907         /* Call back all registered clients */
908         cxgb3_remove_clients(tdev);
909
910         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
911
912         tdev->lldev = NULL;
913         cxgb3_set_dummy_ops(tdev);
914         t3_tp_set_offload_mode(adapter, 0);
915         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
916
917         if (!adapter->open_device_map)
918                 cxgb_down(adapter);
919
920         cxgb3_offload_deactivate(adapter);
921         return 0;
922 }
923
924 static int cxgb_open(struct net_device *dev)
925 {
926         int err;
927         struct adapter *adapter = dev->priv;
928         struct port_info *pi = netdev_priv(dev);
929         int other_ports = adapter->open_device_map & PORT_MASK;
930
931         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
932                 return err;
933
934         set_bit(pi->port_id, &adapter->open_device_map);
935         if (is_offload(adapter) && !ofld_disable) {
936                 err = offload_open(dev);
937                 if (err)
938                         printk(KERN_WARNING
939                                "Could not initialize offload capabilities\n");
940         }
941
942         link_start(dev);
943         t3_port_intr_enable(adapter, pi->port_id);
944         netif_start_queue(dev);
945         if (!other_ports)
946                 schedule_chk_task(adapter);
947
948         return 0;
949 }
950
951 static int cxgb_close(struct net_device *dev)
952 {
953         struct adapter *adapter = dev->priv;
954         struct port_info *p = netdev_priv(dev);
955
956         t3_port_intr_disable(adapter, p->port_id);
957         netif_stop_queue(dev);
958         p->phy.ops->power_down(&p->phy, 1);
959         netif_carrier_off(dev);
960         t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
961
962         spin_lock(&adapter->work_lock); /* sync with update task */
963         clear_bit(p->port_id, &adapter->open_device_map);
964         spin_unlock(&adapter->work_lock);
965
966         if (!(adapter->open_device_map & PORT_MASK))
967                 cancel_rearming_delayed_workqueue(cxgb3_wq,
968                                                   &adapter->adap_check_task);
969
970         if (!adapter->open_device_map)
971                 cxgb_down(adapter);
972
973         return 0;
974 }
975
976 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
977 {
978         struct adapter *adapter = dev->priv;
979         struct port_info *p = netdev_priv(dev);
980         struct net_device_stats *ns = &p->netstats;
981         const struct mac_stats *pstats;
982
983         spin_lock(&adapter->stats_lock);
984         pstats = t3_mac_update_stats(&p->mac);
985         spin_unlock(&adapter->stats_lock);
986
987         ns->tx_bytes = pstats->tx_octets;
988         ns->tx_packets = pstats->tx_frames;
989         ns->rx_bytes = pstats->rx_octets;
990         ns->rx_packets = pstats->rx_frames;
991         ns->multicast = pstats->rx_mcast_frames;
992
993         ns->tx_errors = pstats->tx_underrun;
994         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
995             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
996             pstats->rx_fifo_ovfl;
997
998         /* detailed rx_errors */
999         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1000         ns->rx_over_errors = 0;
1001         ns->rx_crc_errors = pstats->rx_fcs_errs;
1002         ns->rx_frame_errors = pstats->rx_symbol_errs;
1003         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1004         ns->rx_missed_errors = pstats->rx_cong_drops;
1005
1006         /* detailed tx_errors */
1007         ns->tx_aborted_errors = 0;
1008         ns->tx_carrier_errors = 0;
1009         ns->tx_fifo_errors = pstats->tx_underrun;
1010         ns->tx_heartbeat_errors = 0;
1011         ns->tx_window_errors = 0;
1012         return ns;
1013 }
1014
1015 static u32 get_msglevel(struct net_device *dev)
1016 {
1017         struct adapter *adapter = dev->priv;
1018
1019         return adapter->msg_enable;
1020 }
1021
1022 static void set_msglevel(struct net_device *dev, u32 val)
1023 {
1024         struct adapter *adapter = dev->priv;
1025
1026         adapter->msg_enable = val;
1027 }
1028
1029 static char stats_strings[][ETH_GSTRING_LEN] = {
1030         "TxOctetsOK         ",
1031         "TxFramesOK         ",
1032         "TxMulticastFramesOK",
1033         "TxBroadcastFramesOK",
1034         "TxPauseFrames      ",
1035         "TxUnderrun         ",
1036         "TxExtUnderrun      ",
1037
1038         "TxFrames64         ",
1039         "TxFrames65To127    ",
1040         "TxFrames128To255   ",
1041         "TxFrames256To511   ",
1042         "TxFrames512To1023  ",
1043         "TxFrames1024To1518 ",
1044         "TxFrames1519ToMax  ",
1045
1046         "RxOctetsOK         ",
1047         "RxFramesOK         ",
1048         "RxMulticastFramesOK",
1049         "RxBroadcastFramesOK",
1050         "RxPauseFrames      ",
1051         "RxFCSErrors        ",
1052         "RxSymbolErrors     ",
1053         "RxShortErrors      ",
1054         "RxJabberErrors     ",
1055         "RxLengthErrors     ",
1056         "RxFIFOoverflow     ",
1057
1058         "RxFrames64         ",
1059         "RxFrames65To127    ",
1060         "RxFrames128To255   ",
1061         "RxFrames256To511   ",
1062         "RxFrames512To1023  ",
1063         "RxFrames1024To1518 ",
1064         "RxFrames1519ToMax  ",
1065
1066         "PhyFIFOErrors      ",
1067         "TSO                ",
1068         "VLANextractions    ",
1069         "VLANinsertions     ",
1070         "TxCsumOffload      ",
1071         "RxCsumGood         ",
1072         "RxDrops            ",
1073
1074         "CheckTXEnToggled   ",
1075         "CheckResets        ",
1076
1077 };
1078
1079 static int get_stats_count(struct net_device *dev)
1080 {
1081         return ARRAY_SIZE(stats_strings);
1082 }
1083
1084 #define T3_REGMAP_SIZE (3 * 1024)
1085
1086 static int get_regs_len(struct net_device *dev)
1087 {
1088         return T3_REGMAP_SIZE;
1089 }
1090
1091 static int get_eeprom_len(struct net_device *dev)
1092 {
1093         return EEPROMSIZE;
1094 }
1095
1096 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1097 {
1098         u32 fw_vers = 0;
1099         struct adapter *adapter = dev->priv;
1100
1101         t3_get_fw_version(adapter, &fw_vers);
1102
1103         strcpy(info->driver, DRV_NAME);
1104         strcpy(info->version, DRV_VERSION);
1105         strcpy(info->bus_info, pci_name(adapter->pdev));
1106         if (!fw_vers)
1107                 strcpy(info->fw_version, "N/A");
1108         else {
1109                 snprintf(info->fw_version, sizeof(info->fw_version),
1110                          "%s %u.%u.%u",
1111                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1112                          G_FW_VERSION_MAJOR(fw_vers),
1113                          G_FW_VERSION_MINOR(fw_vers),
1114                          G_FW_VERSION_MICRO(fw_vers));
1115         }
1116 }
1117
1118 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1119 {
1120         if (stringset == ETH_SS_STATS)
1121                 memcpy(data, stats_strings, sizeof(stats_strings));
1122 }
1123
1124 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1125                                             struct port_info *p, int idx)
1126 {
1127         int i;
1128         unsigned long tot = 0;
1129
1130         for (i = 0; i < p->nqsets; ++i)
1131                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1132         return tot;
1133 }
1134
1135 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1136                       u64 *data)
1137 {
1138         struct adapter *adapter = dev->priv;
1139         struct port_info *pi = netdev_priv(dev);
1140         const struct mac_stats *s;
1141
1142         spin_lock(&adapter->stats_lock);
1143         s = t3_mac_update_stats(&pi->mac);
1144         spin_unlock(&adapter->stats_lock);
1145
1146         *data++ = s->tx_octets;
1147         *data++ = s->tx_frames;
1148         *data++ = s->tx_mcast_frames;
1149         *data++ = s->tx_bcast_frames;
1150         *data++ = s->tx_pause;
1151         *data++ = s->tx_underrun;
1152         *data++ = s->tx_fifo_urun;
1153
1154         *data++ = s->tx_frames_64;
1155         *data++ = s->tx_frames_65_127;
1156         *data++ = s->tx_frames_128_255;
1157         *data++ = s->tx_frames_256_511;
1158         *data++ = s->tx_frames_512_1023;
1159         *data++ = s->tx_frames_1024_1518;
1160         *data++ = s->tx_frames_1519_max;
1161
1162         *data++ = s->rx_octets;
1163         *data++ = s->rx_frames;
1164         *data++ = s->rx_mcast_frames;
1165         *data++ = s->rx_bcast_frames;
1166         *data++ = s->rx_pause;
1167         *data++ = s->rx_fcs_errs;
1168         *data++ = s->rx_symbol_errs;
1169         *data++ = s->rx_short;
1170         *data++ = s->rx_jabber;
1171         *data++ = s->rx_too_long;
1172         *data++ = s->rx_fifo_ovfl;
1173
1174         *data++ = s->rx_frames_64;
1175         *data++ = s->rx_frames_65_127;
1176         *data++ = s->rx_frames_128_255;
1177         *data++ = s->rx_frames_256_511;
1178         *data++ = s->rx_frames_512_1023;
1179         *data++ = s->rx_frames_1024_1518;
1180         *data++ = s->rx_frames_1519_max;
1181
1182         *data++ = pi->phy.fifo_errors;
1183
1184         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1185         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1186         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1187         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1188         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1189         *data++ = s->rx_cong_drops;
1190
1191         *data++ = s->num_toggled;
1192         *data++ = s->num_resets;
1193 }
1194
1195 static inline void reg_block_dump(struct adapter *ap, void *buf,
1196                                   unsigned int start, unsigned int end)
1197 {
1198         u32 *p = buf + start;
1199
1200         for (; start <= end; start += sizeof(u32))
1201                 *p++ = t3_read_reg(ap, start);
1202 }
1203
1204 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1205                      void *buf)
1206 {
1207         struct adapter *ap = dev->priv;
1208
1209         /*
1210          * Version scheme:
1211          * bits 0..9: chip version
1212          * bits 10..15: chip revision
1213          * bit 31: set for PCIe cards
1214          */
1215         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1216
1217         /*
1218          * We skip the MAC statistics registers because they are clear-on-read.
1219          * Also reading multi-register stats would need to synchronize with the
1220          * periodic mac stats accumulation.  Hard to justify the complexity.
1221          */
1222         memset(buf, 0, T3_REGMAP_SIZE);
1223         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1224         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1225         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1226         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1227         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1228         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1229                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1230         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1231                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1232 }
1233
1234 static int restart_autoneg(struct net_device *dev)
1235 {
1236         struct port_info *p = netdev_priv(dev);
1237
1238         if (!netif_running(dev))
1239                 return -EAGAIN;
1240         if (p->link_config.autoneg != AUTONEG_ENABLE)
1241                 return -EINVAL;
1242         p->phy.ops->autoneg_restart(&p->phy);
1243         return 0;
1244 }
1245
1246 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1247 {
1248         int i;
1249         struct adapter *adapter = dev->priv;
1250
1251         if (data == 0)
1252                 data = 2;
1253
1254         for (i = 0; i < data * 2; i++) {
1255                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1256                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1257                 if (msleep_interruptible(500))
1258                         break;
1259         }
1260         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1261                          F_GPIO0_OUT_VAL);
1262         return 0;
1263 }
1264
1265 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1266 {
1267         struct port_info *p = netdev_priv(dev);
1268
1269         cmd->supported = p->link_config.supported;
1270         cmd->advertising = p->link_config.advertising;
1271
1272         if (netif_carrier_ok(dev)) {
1273                 cmd->speed = p->link_config.speed;
1274                 cmd->duplex = p->link_config.duplex;
1275         } else {
1276                 cmd->speed = -1;
1277                 cmd->duplex = -1;
1278         }
1279
1280         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1281         cmd->phy_address = p->phy.addr;
1282         cmd->transceiver = XCVR_EXTERNAL;
1283         cmd->autoneg = p->link_config.autoneg;
1284         cmd->maxtxpkt = 0;
1285         cmd->maxrxpkt = 0;
1286         return 0;
1287 }
1288
1289 static int speed_duplex_to_caps(int speed, int duplex)
1290 {
1291         int cap = 0;
1292
1293         switch (speed) {
1294         case SPEED_10:
1295                 if (duplex == DUPLEX_FULL)
1296                         cap = SUPPORTED_10baseT_Full;
1297                 else
1298                         cap = SUPPORTED_10baseT_Half;
1299                 break;
1300         case SPEED_100:
1301                 if (duplex == DUPLEX_FULL)
1302                         cap = SUPPORTED_100baseT_Full;
1303                 else
1304                         cap = SUPPORTED_100baseT_Half;
1305                 break;
1306         case SPEED_1000:
1307                 if (duplex == DUPLEX_FULL)
1308                         cap = SUPPORTED_1000baseT_Full;
1309                 else
1310                         cap = SUPPORTED_1000baseT_Half;
1311                 break;
1312         case SPEED_10000:
1313                 if (duplex == DUPLEX_FULL)
1314                         cap = SUPPORTED_10000baseT_Full;
1315         }
1316         return cap;
1317 }
1318
1319 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1320                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1321                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1322                       ADVERTISED_10000baseT_Full)
1323
1324 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1325 {
1326         struct port_info *p = netdev_priv(dev);
1327         struct link_config *lc = &p->link_config;
1328
1329         if (!(lc->supported & SUPPORTED_Autoneg))
1330                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1331
1332         if (cmd->autoneg == AUTONEG_DISABLE) {
1333                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1334
1335                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1336                         return -EINVAL;
1337                 lc->requested_speed = cmd->speed;
1338                 lc->requested_duplex = cmd->duplex;
1339                 lc->advertising = 0;
1340         } else {
1341                 cmd->advertising &= ADVERTISED_MASK;
1342                 cmd->advertising &= lc->supported;
1343                 if (!cmd->advertising)
1344                         return -EINVAL;
1345                 lc->requested_speed = SPEED_INVALID;
1346                 lc->requested_duplex = DUPLEX_INVALID;
1347                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1348         }
1349         lc->autoneg = cmd->autoneg;
1350         if (netif_running(dev))
1351                 t3_link_start(&p->phy, &p->mac, lc);
1352         return 0;
1353 }
1354
1355 static void get_pauseparam(struct net_device *dev,
1356                            struct ethtool_pauseparam *epause)
1357 {
1358         struct port_info *p = netdev_priv(dev);
1359
1360         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1361         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1362         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1363 }
1364
1365 static int set_pauseparam(struct net_device *dev,
1366                           struct ethtool_pauseparam *epause)
1367 {
1368         struct port_info *p = netdev_priv(dev);
1369         struct link_config *lc = &p->link_config;
1370
1371         if (epause->autoneg == AUTONEG_DISABLE)
1372                 lc->requested_fc = 0;
1373         else if (lc->supported & SUPPORTED_Autoneg)
1374                 lc->requested_fc = PAUSE_AUTONEG;
1375         else
1376                 return -EINVAL;
1377
1378         if (epause->rx_pause)
1379                 lc->requested_fc |= PAUSE_RX;
1380         if (epause->tx_pause)
1381                 lc->requested_fc |= PAUSE_TX;
1382         if (lc->autoneg == AUTONEG_ENABLE) {
1383                 if (netif_running(dev))
1384                         t3_link_start(&p->phy, &p->mac, lc);
1385         } else {
1386                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1387                 if (netif_running(dev))
1388                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1389         }
1390         return 0;
1391 }
1392
1393 static u32 get_rx_csum(struct net_device *dev)
1394 {
1395         struct port_info *p = netdev_priv(dev);
1396
1397         return p->rx_csum_offload;
1398 }
1399
1400 static int set_rx_csum(struct net_device *dev, u32 data)
1401 {
1402         struct port_info *p = netdev_priv(dev);
1403
1404         p->rx_csum_offload = data;
1405         return 0;
1406 }
1407
1408 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1409 {
1410         const struct adapter *adapter = dev->priv;
1411         const struct port_info *pi = netdev_priv(dev);
1412         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1413
1414         e->rx_max_pending = MAX_RX_BUFFERS;
1415         e->rx_mini_max_pending = 0;
1416         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1417         e->tx_max_pending = MAX_TXQ_ENTRIES;
1418
1419         e->rx_pending = q->fl_size;
1420         e->rx_mini_pending = q->rspq_size;
1421         e->rx_jumbo_pending = q->jumbo_size;
1422         e->tx_pending = q->txq_size[0];
1423 }
1424
1425 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1426 {
1427         int i;
1428         struct qset_params *q;
1429         struct adapter *adapter = dev->priv;
1430         const struct port_info *pi = netdev_priv(dev);
1431
1432         if (e->rx_pending > MAX_RX_BUFFERS ||
1433             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1434             e->tx_pending > MAX_TXQ_ENTRIES ||
1435             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1436             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1437             e->rx_pending < MIN_FL_ENTRIES ||
1438             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1439             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1440                 return -EINVAL;
1441
1442         if (adapter->flags & FULL_INIT_DONE)
1443                 return -EBUSY;
1444
1445         q = &adapter->params.sge.qset[pi->first_qset];
1446         for (i = 0; i < pi->nqsets; ++i, ++q) {
1447                 q->rspq_size = e->rx_mini_pending;
1448                 q->fl_size = e->rx_pending;
1449                 q->jumbo_size = e->rx_jumbo_pending;
1450                 q->txq_size[0] = e->tx_pending;
1451                 q->txq_size[1] = e->tx_pending;
1452                 q->txq_size[2] = e->tx_pending;
1453         }
1454         return 0;
1455 }
1456
1457 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1458 {
1459         struct adapter *adapter = dev->priv;
1460         struct qset_params *qsp = &adapter->params.sge.qset[0];
1461         struct sge_qset *qs = &adapter->sge.qs[0];
1462
1463         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1464                 return -EINVAL;
1465
1466         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1467         t3_update_qset_coalesce(qs, qsp);
1468         return 0;
1469 }
1470
1471 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1472 {
1473         struct adapter *adapter = dev->priv;
1474         struct qset_params *q = adapter->params.sge.qset;
1475
1476         c->rx_coalesce_usecs = q->coalesce_usecs;
1477         return 0;
1478 }
1479
1480 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1481                       u8 * data)
1482 {
1483         int i, err = 0;
1484         struct adapter *adapter = dev->priv;
1485
1486         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1487         if (!buf)
1488                 return -ENOMEM;
1489
1490         e->magic = EEPROM_MAGIC;
1491         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1492                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1493
1494         if (!err)
1495                 memcpy(data, buf + e->offset, e->len);
1496         kfree(buf);
1497         return err;
1498 }
1499
1500 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1501                       u8 * data)
1502 {
1503         u8 *buf;
1504         int err = 0;
1505         u32 aligned_offset, aligned_len, *p;
1506         struct adapter *adapter = dev->priv;
1507
1508         if (eeprom->magic != EEPROM_MAGIC)
1509                 return -EINVAL;
1510
1511         aligned_offset = eeprom->offset & ~3;
1512         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1513
1514         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1515                 buf = kmalloc(aligned_len, GFP_KERNEL);
1516                 if (!buf)
1517                         return -ENOMEM;
1518                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1519                 if (!err && aligned_len > 4)
1520                         err = t3_seeprom_read(adapter,
1521                                               aligned_offset + aligned_len - 4,
1522                                               (u32 *) & buf[aligned_len - 4]);
1523                 if (err)
1524                         goto out;
1525                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1526         } else
1527                 buf = data;
1528
1529         err = t3_seeprom_wp(adapter, 0);
1530         if (err)
1531                 goto out;
1532
1533         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1534                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1535                 aligned_offset += 4;
1536         }
1537
1538         if (!err)
1539                 err = t3_seeprom_wp(adapter, 1);
1540 out:
1541         if (buf != data)
1542                 kfree(buf);
1543         return err;
1544 }
1545
1546 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1547 {
1548         wol->supported = 0;
1549         wol->wolopts = 0;
1550         memset(&wol->sopass, 0, sizeof(wol->sopass));
1551 }
1552
1553 static const struct ethtool_ops cxgb_ethtool_ops = {
1554         .get_settings = get_settings,
1555         .set_settings = set_settings,
1556         .get_drvinfo = get_drvinfo,
1557         .get_msglevel = get_msglevel,
1558         .set_msglevel = set_msglevel,
1559         .get_ringparam = get_sge_param,
1560         .set_ringparam = set_sge_param,
1561         .get_coalesce = get_coalesce,
1562         .set_coalesce = set_coalesce,
1563         .get_eeprom_len = get_eeprom_len,
1564         .get_eeprom = get_eeprom,
1565         .set_eeprom = set_eeprom,
1566         .get_pauseparam = get_pauseparam,
1567         .set_pauseparam = set_pauseparam,
1568         .get_rx_csum = get_rx_csum,
1569         .set_rx_csum = set_rx_csum,
1570         .get_tx_csum = ethtool_op_get_tx_csum,
1571         .set_tx_csum = ethtool_op_set_tx_csum,
1572         .get_sg = ethtool_op_get_sg,
1573         .set_sg = ethtool_op_set_sg,
1574         .get_link = ethtool_op_get_link,
1575         .get_strings = get_strings,
1576         .phys_id = cxgb3_phys_id,
1577         .nway_reset = restart_autoneg,
1578         .get_stats_count = get_stats_count,
1579         .get_ethtool_stats = get_stats,
1580         .get_regs_len = get_regs_len,
1581         .get_regs = get_regs,
1582         .get_wol = get_wol,
1583         .get_tso = ethtool_op_get_tso,
1584         .set_tso = ethtool_op_set_tso,
1585         .get_perm_addr = ethtool_op_get_perm_addr
1586 };
1587
1588 static int in_range(int val, int lo, int hi)
1589 {
1590         return val < 0 || (val <= hi && val >= lo);
1591 }
1592
1593 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1594 {
1595         int ret;
1596         u32 cmd;
1597         struct adapter *adapter = dev->priv;
1598
1599         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1600                 return -EFAULT;
1601
1602         switch (cmd) {
1603         case CHELSIO_SET_QSET_PARAMS:{
1604                 int i;
1605                 struct qset_params *q;
1606                 struct ch_qset_params t;
1607
1608                 if (!capable(CAP_NET_ADMIN))
1609                         return -EPERM;
1610                 if (copy_from_user(&t, useraddr, sizeof(t)))
1611                         return -EFAULT;
1612                 if (t.qset_idx >= SGE_QSETS)
1613                         return -EINVAL;
1614                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1615                         !in_range(t.cong_thres, 0, 255) ||
1616                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1617                                 MAX_TXQ_ENTRIES) ||
1618                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1619                                 MAX_TXQ_ENTRIES) ||
1620                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1621                                 MAX_CTRL_TXQ_ENTRIES) ||
1622                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1623                                 MAX_RX_BUFFERS)
1624                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1625                                         MAX_RX_JUMBO_BUFFERS)
1626                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1627                                         MAX_RSPQ_ENTRIES))
1628                         return -EINVAL;
1629                 if ((adapter->flags & FULL_INIT_DONE) &&
1630                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1631                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1632                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1633                         t.polling >= 0 || t.cong_thres >= 0))
1634                         return -EBUSY;
1635
1636                 q = &adapter->params.sge.qset[t.qset_idx];
1637
1638                 if (t.rspq_size >= 0)
1639                         q->rspq_size = t.rspq_size;
1640                 if (t.fl_size[0] >= 0)
1641                         q->fl_size = t.fl_size[0];
1642                 if (t.fl_size[1] >= 0)
1643                         q->jumbo_size = t.fl_size[1];
1644                 if (t.txq_size[0] >= 0)
1645                         q->txq_size[0] = t.txq_size[0];
1646                 if (t.txq_size[1] >= 0)
1647                         q->txq_size[1] = t.txq_size[1];
1648                 if (t.txq_size[2] >= 0)
1649                         q->txq_size[2] = t.txq_size[2];
1650                 if (t.cong_thres >= 0)
1651                         q->cong_thres = t.cong_thres;
1652                 if (t.intr_lat >= 0) {
1653                         struct sge_qset *qs =
1654                                 &adapter->sge.qs[t.qset_idx];
1655
1656                         q->coalesce_usecs = t.intr_lat;
1657                         t3_update_qset_coalesce(qs, q);
1658                 }
1659                 if (t.polling >= 0) {
1660                         if (adapter->flags & USING_MSIX)
1661                                 q->polling = t.polling;
1662                         else {
1663                                 /* No polling with INTx for T3A */
1664                                 if (adapter->params.rev == 0 &&
1665                                         !(adapter->flags & USING_MSI))
1666                                         t.polling = 0;
1667
1668                                 for (i = 0; i < SGE_QSETS; i++) {
1669                                         q = &adapter->params.sge.
1670                                                 qset[i];
1671                                         q->polling = t.polling;
1672                                 }
1673                         }
1674                 }
1675                 break;
1676         }
1677         case CHELSIO_GET_QSET_PARAMS:{
1678                 struct qset_params *q;
1679                 struct ch_qset_params t;
1680
1681                 if (copy_from_user(&t, useraddr, sizeof(t)))
1682                         return -EFAULT;
1683                 if (t.qset_idx >= SGE_QSETS)
1684                         return -EINVAL;
1685
1686                 q = &adapter->params.sge.qset[t.qset_idx];
1687                 t.rspq_size = q->rspq_size;
1688                 t.txq_size[0] = q->txq_size[0];
1689                 t.txq_size[1] = q->txq_size[1];
1690                 t.txq_size[2] = q->txq_size[2];
1691                 t.fl_size[0] = q->fl_size;
1692                 t.fl_size[1] = q->jumbo_size;
1693                 t.polling = q->polling;
1694                 t.intr_lat = q->coalesce_usecs;
1695                 t.cong_thres = q->cong_thres;
1696
1697                 if (copy_to_user(useraddr, &t, sizeof(t)))
1698                         return -EFAULT;
1699                 break;
1700         }
1701         case CHELSIO_SET_QSET_NUM:{
1702                 struct ch_reg edata;
1703                 struct port_info *pi = netdev_priv(dev);
1704                 unsigned int i, first_qset = 0, other_qsets = 0;
1705
1706                 if (!capable(CAP_NET_ADMIN))
1707                         return -EPERM;
1708                 if (adapter->flags & FULL_INIT_DONE)
1709                         return -EBUSY;
1710                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1711                         return -EFAULT;
1712                 if (edata.val < 1 ||
1713                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1714                         return -EINVAL;
1715
1716                 for_each_port(adapter, i)
1717                         if (adapter->port[i] && adapter->port[i] != dev)
1718                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1719
1720                 if (edata.val + other_qsets > SGE_QSETS)
1721                         return -EINVAL;
1722
1723                 pi->nqsets = edata.val;
1724
1725                 for_each_port(adapter, i)
1726                         if (adapter->port[i]) {
1727                                 pi = adap2pinfo(adapter, i);
1728                                 pi->first_qset = first_qset;
1729                                 first_qset += pi->nqsets;
1730                         }
1731                 break;
1732         }
1733         case CHELSIO_GET_QSET_NUM:{
1734                 struct ch_reg edata;
1735                 struct port_info *pi = netdev_priv(dev);
1736
1737                 edata.cmd = CHELSIO_GET_QSET_NUM;
1738                 edata.val = pi->nqsets;
1739                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1740                         return -EFAULT;
1741                 break;
1742         }
1743         case CHELSIO_LOAD_FW:{
1744                 u8 *fw_data;
1745                 struct ch_mem_range t;
1746
1747                 if (!capable(CAP_NET_ADMIN))
1748                         return -EPERM;
1749                 if (copy_from_user(&t, useraddr, sizeof(t)))
1750                         return -EFAULT;
1751
1752                 fw_data = kmalloc(t.len, GFP_KERNEL);
1753                 if (!fw_data)
1754                         return -ENOMEM;
1755
1756                 if (copy_from_user
1757                         (fw_data, useraddr + sizeof(t), t.len)) {
1758                         kfree(fw_data);
1759                         return -EFAULT;
1760                 }
1761
1762                 ret = t3_load_fw(adapter, fw_data, t.len);
1763                 kfree(fw_data);
1764                 if (ret)
1765                         return ret;
1766                 break;
1767         }
1768         case CHELSIO_SETMTUTAB:{
1769                 struct ch_mtus m;
1770                 int i;
1771
1772                 if (!is_offload(adapter))
1773                         return -EOPNOTSUPP;
1774                 if (!capable(CAP_NET_ADMIN))
1775                         return -EPERM;
1776                 if (offload_running(adapter))
1777                         return -EBUSY;
1778                 if (copy_from_user(&m, useraddr, sizeof(m)))
1779                         return -EFAULT;
1780                 if (m.nmtus != NMTUS)
1781                         return -EINVAL;
1782                 if (m.mtus[0] < 81)     /* accommodate SACK */
1783                         return -EINVAL;
1784
1785                 /* MTUs must be in ascending order */
1786                 for (i = 1; i < NMTUS; ++i)
1787                         if (m.mtus[i] < m.mtus[i - 1])
1788                                 return -EINVAL;
1789
1790                 memcpy(adapter->params.mtus, m.mtus,
1791                         sizeof(adapter->params.mtus));
1792                 break;
1793         }
1794         case CHELSIO_GET_PM:{
1795                 struct tp_params *p = &adapter->params.tp;
1796                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1797
1798                 if (!is_offload(adapter))
1799                         return -EOPNOTSUPP;
1800                 m.tx_pg_sz = p->tx_pg_size;
1801                 m.tx_num_pg = p->tx_num_pgs;
1802                 m.rx_pg_sz = p->rx_pg_size;
1803                 m.rx_num_pg = p->rx_num_pgs;
1804                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1805                 if (copy_to_user(useraddr, &m, sizeof(m)))
1806                         return -EFAULT;
1807                 break;
1808         }
1809         case CHELSIO_SET_PM:{
1810                 struct ch_pm m;
1811                 struct tp_params *p = &adapter->params.tp;
1812
1813                 if (!is_offload(adapter))
1814                         return -EOPNOTSUPP;
1815                 if (!capable(CAP_NET_ADMIN))
1816                         return -EPERM;
1817                 if (adapter->flags & FULL_INIT_DONE)
1818                         return -EBUSY;
1819                 if (copy_from_user(&m, useraddr, sizeof(m)))
1820                         return -EFAULT;
1821                 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1822                         !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1823                         return -EINVAL; /* not power of 2 */
1824                 if (!(m.rx_pg_sz & 0x14000))
1825                         return -EINVAL; /* not 16KB or 64KB */
1826                 if (!(m.tx_pg_sz & 0x1554000))
1827                         return -EINVAL;
1828                 if (m.tx_num_pg == -1)
1829                         m.tx_num_pg = p->tx_num_pgs;
1830                 if (m.rx_num_pg == -1)
1831                         m.rx_num_pg = p->rx_num_pgs;
1832                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1833                         return -EINVAL;
1834                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1835                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1836                         return -EINVAL;
1837                 p->rx_pg_size = m.rx_pg_sz;
1838                 p->tx_pg_size = m.tx_pg_sz;
1839                 p->rx_num_pgs = m.rx_num_pg;
1840                 p->tx_num_pgs = m.tx_num_pg;
1841                 break;
1842         }
1843         case CHELSIO_GET_MEM:{
1844                 struct ch_mem_range t;
1845                 struct mc7 *mem;
1846                 u64 buf[32];
1847
1848                 if (!is_offload(adapter))
1849                         return -EOPNOTSUPP;
1850                 if (!(adapter->flags & FULL_INIT_DONE))
1851                         return -EIO;    /* need the memory controllers */
1852                 if (copy_from_user(&t, useraddr, sizeof(t)))
1853                         return -EFAULT;
1854                 if ((t.addr & 7) || (t.len & 7))
1855                         return -EINVAL;
1856                 if (t.mem_id == MEM_CM)
1857                         mem = &adapter->cm;
1858                 else if (t.mem_id == MEM_PMRX)
1859                         mem = &adapter->pmrx;
1860                 else if (t.mem_id == MEM_PMTX)
1861                         mem = &adapter->pmtx;
1862                 else
1863                         return -EINVAL;
1864
1865                 /*
1866                  * Version scheme:
1867                  * bits 0..9: chip version
1868                  * bits 10..15: chip revision
1869                  */
1870                 t.version = 3 | (adapter->params.rev << 10);
1871                 if (copy_to_user(useraddr, &t, sizeof(t)))
1872                         return -EFAULT;
1873
1874                 /*
1875                  * Read 256 bytes at a time as len can be large and we don't
1876                  * want to use huge intermediate buffers.
1877                  */
1878                 useraddr += sizeof(t);  /* advance to start of buffer */
1879                 while (t.len) {
1880                         unsigned int chunk =
1881                                 min_t(unsigned int, t.len, sizeof(buf));
1882
1883                         ret =
1884                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1885                                                 buf);
1886                         if (ret)
1887                                 return ret;
1888                         if (copy_to_user(useraddr, buf, chunk))
1889                                 return -EFAULT;
1890                         useraddr += chunk;
1891                         t.addr += chunk;
1892                         t.len -= chunk;
1893                 }
1894                 break;
1895         }
1896         case CHELSIO_SET_TRACE_FILTER:{
1897                 struct ch_trace t;
1898                 const struct trace_params *tp;
1899
1900                 if (!capable(CAP_NET_ADMIN))
1901                         return -EPERM;
1902                 if (!offload_running(adapter))
1903                         return -EAGAIN;
1904                 if (copy_from_user(&t, useraddr, sizeof(t)))
1905                         return -EFAULT;
1906
1907                 tp = (const struct trace_params *)&t.sip;
1908                 if (t.config_tx)
1909                         t3_config_trace_filter(adapter, tp, 0,
1910                                                 t.invert_match,
1911                                                 t.trace_tx);
1912                 if (t.config_rx)
1913                         t3_config_trace_filter(adapter, tp, 1,
1914                                                 t.invert_match,
1915                                                 t.trace_rx);
1916                 break;
1917         }
1918         default:
1919                 return -EOPNOTSUPP;
1920         }
1921         return 0;
1922 }
1923
1924 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1925 {
1926         int ret, mmd;
1927         struct adapter *adapter = dev->priv;
1928         struct port_info *pi = netdev_priv(dev);
1929         struct mii_ioctl_data *data = if_mii(req);
1930
1931         switch (cmd) {
1932         case SIOCGMIIPHY:
1933                 data->phy_id = pi->phy.addr;
1934                 /* FALLTHRU */
1935         case SIOCGMIIREG:{
1936                 u32 val;
1937                 struct cphy *phy = &pi->phy;
1938
1939                 if (!phy->mdio_read)
1940                         return -EOPNOTSUPP;
1941                 if (is_10G(adapter)) {
1942                         mmd = data->phy_id >> 8;
1943                         if (!mmd)
1944                                 mmd = MDIO_DEV_PCS;
1945                         else if (mmd > MDIO_DEV_XGXS)
1946                                 return -EINVAL;
1947
1948                         ret =
1949                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1950                                                 mmd, data->reg_num, &val);
1951                 } else
1952                         ret =
1953                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1954                                                 0, data->reg_num & 0x1f,
1955                                                 &val);
1956                 if (!ret)
1957                         data->val_out = val;
1958                 break;
1959         }
1960         case SIOCSMIIREG:{
1961                 struct cphy *phy = &pi->phy;
1962
1963                 if (!capable(CAP_NET_ADMIN))
1964                         return -EPERM;
1965                 if (!phy->mdio_write)
1966                         return -EOPNOTSUPP;
1967                 if (is_10G(adapter)) {
1968                         mmd = data->phy_id >> 8;
1969                         if (!mmd)
1970                                 mmd = MDIO_DEV_PCS;
1971                         else if (mmd > MDIO_DEV_XGXS)
1972                                 return -EINVAL;
1973
1974                         ret =
1975                                 phy->mdio_write(adapter,
1976                                                 data->phy_id & 0x1f, mmd,
1977                                                 data->reg_num,
1978                                                 data->val_in);
1979                 } else
1980                         ret =
1981                                 phy->mdio_write(adapter,
1982                                                 data->phy_id & 0x1f, 0,
1983                                                 data->reg_num & 0x1f,
1984                                                 data->val_in);
1985                 break;
1986         }
1987         case SIOCCHIOCTL:
1988                 return cxgb_extension_ioctl(dev, req->ifr_data);
1989         default:
1990                 return -EOPNOTSUPP;
1991         }
1992         return ret;
1993 }
1994
1995 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1996 {
1997         int ret;
1998         struct adapter *adapter = dev->priv;
1999         struct port_info *pi = netdev_priv(dev);
2000
2001         if (new_mtu < 81)       /* accommodate SACK */
2002                 return -EINVAL;
2003         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2004                 return ret;
2005         dev->mtu = new_mtu;
2006         init_port_mtus(adapter);
2007         if (adapter->params.rev == 0 && offload_running(adapter))
2008                 t3_load_mtus(adapter, adapter->params.mtus,
2009                              adapter->params.a_wnd, adapter->params.b_wnd,
2010                              adapter->port[0]->mtu);
2011         return 0;
2012 }
2013
2014 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2015 {
2016         struct adapter *adapter = dev->priv;
2017         struct port_info *pi = netdev_priv(dev);
2018         struct sockaddr *addr = p;
2019
2020         if (!is_valid_ether_addr(addr->sa_data))
2021                 return -EINVAL;
2022
2023         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2024         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2025         if (offload_running(adapter))
2026                 write_smt_entry(adapter, pi->port_id);
2027         return 0;
2028 }
2029
2030 /**
2031  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2032  * @adap: the adapter
2033  * @p: the port
2034  *
2035  * Ensures that current Rx processing on any of the queues associated with
2036  * the given port completes before returning.  We do this by acquiring and
2037  * releasing the locks of the response queues associated with the port.
2038  */
2039 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2040 {
2041         int i;
2042
2043         for (i = 0; i < p->nqsets; i++) {
2044                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2045
2046                 spin_lock_irq(&q->lock);
2047                 spin_unlock_irq(&q->lock);
2048         }
2049 }
2050
2051 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2052 {
2053         struct adapter *adapter = dev->priv;
2054         struct port_info *pi = netdev_priv(dev);
2055
2056         pi->vlan_grp = grp;
2057         if (adapter->params.rev > 0)
2058                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2059         else {
2060                 /* single control for all ports */
2061                 unsigned int i, have_vlans = 0;
2062                 for_each_port(adapter, i)
2063                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2064
2065                 t3_set_vlan_accel(adapter, 1, have_vlans);
2066         }
2067         t3_synchronize_rx(adapter, pi);
2068 }
2069
2070 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2071 {
2072         /* nothing */
2073 }
2074
2075 #ifdef CONFIG_NET_POLL_CONTROLLER
2076 static void cxgb_netpoll(struct net_device *dev)
2077 {
2078         struct adapter *adapter = dev->priv;
2079         struct sge_qset *qs = dev2qset(dev);
2080
2081         t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2082                                                     adapter);
2083 }
2084 #endif
2085
2086 /*
2087  * Periodic accumulation of MAC statistics.
2088  */
2089 static void mac_stats_update(struct adapter *adapter)
2090 {
2091         int i;
2092
2093         for_each_port(adapter, i) {
2094                 struct net_device *dev = adapter->port[i];
2095                 struct port_info *p = netdev_priv(dev);
2096
2097                 if (netif_running(dev)) {
2098                         spin_lock(&adapter->stats_lock);
2099                         t3_mac_update_stats(&p->mac);
2100                         spin_unlock(&adapter->stats_lock);
2101                 }
2102         }
2103 }
2104
2105 static void check_link_status(struct adapter *adapter)
2106 {
2107         int i;
2108
2109         for_each_port(adapter, i) {
2110                 struct net_device *dev = adapter->port[i];
2111                 struct port_info *p = netdev_priv(dev);
2112
2113                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2114                         t3_link_changed(adapter, i);
2115         }
2116 }
2117
2118 static void check_t3b2_mac(struct adapter *adapter)
2119 {
2120         int i;
2121
2122         if (!rtnl_trylock())    /* synchronize with ifdown */
2123                 return;
2124
2125         for_each_port(adapter, i) {
2126                 struct net_device *dev = adapter->port[i];
2127                 struct port_info *p = netdev_priv(dev);
2128                 int status;
2129
2130                 if (!netif_running(dev))
2131                         continue;
2132
2133                 status = 0;
2134                 if (netif_running(dev) && netif_carrier_ok(dev))
2135                         status = t3b2_mac_watchdog_task(&p->mac);
2136                 if (status == 1)
2137                         p->mac.stats.num_toggled++;
2138                 else if (status == 2) {
2139                         struct cmac *mac = &p->mac;
2140
2141                         t3_mac_set_mtu(mac, dev->mtu);
2142                         t3_mac_set_address(mac, 0, dev->dev_addr);
2143                         cxgb_set_rxmode(dev);
2144                         t3_link_start(&p->phy, mac, &p->link_config);
2145                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2146                         t3_port_intr_enable(adapter, p->port_id);
2147                         p->mac.stats.num_resets++;
2148                 }
2149         }
2150         rtnl_unlock();
2151 }
2152
2153
2154 static void t3_adap_check_task(struct work_struct *work)
2155 {
2156         struct adapter *adapter = container_of(work, struct adapter,
2157                                                adap_check_task.work);
2158         const struct adapter_params *p = &adapter->params;
2159
2160         adapter->check_task_cnt++;
2161
2162         /* Check link status for PHYs without interrupts */
2163         if (p->linkpoll_period)
2164                 check_link_status(adapter);
2165
2166         /* Accumulate MAC stats if needed */
2167         if (!p->linkpoll_period ||
2168             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2169             p->stats_update_period) {
2170                 mac_stats_update(adapter);
2171                 adapter->check_task_cnt = 0;
2172         }
2173
2174         if (p->rev == T3_REV_B2)
2175                 check_t3b2_mac(adapter);
2176
2177         /* Schedule the next check update if any port is active. */
2178         spin_lock(&adapter->work_lock);
2179         if (adapter->open_device_map & PORT_MASK)
2180                 schedule_chk_task(adapter);
2181         spin_unlock(&adapter->work_lock);
2182 }
2183
2184 /*
2185  * Processes external (PHY) interrupts in process context.
2186  */
2187 static void ext_intr_task(struct work_struct *work)
2188 {
2189         struct adapter *adapter = container_of(work, struct adapter,
2190                                                ext_intr_handler_task);
2191
2192         t3_phy_intr_handler(adapter);
2193
2194         /* Now reenable external interrupts */
2195         spin_lock_irq(&adapter->work_lock);
2196         if (adapter->slow_intr_mask) {
2197                 adapter->slow_intr_mask |= F_T3DBG;
2198                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2199                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2200                              adapter->slow_intr_mask);
2201         }
2202         spin_unlock_irq(&adapter->work_lock);
2203 }
2204
2205 /*
2206  * Interrupt-context handler for external (PHY) interrupts.
2207  */
2208 void t3_os_ext_intr_handler(struct adapter *adapter)
2209 {
2210         /*
2211          * Schedule a task to handle external interrupts as they may be slow
2212          * and we use a mutex to protect MDIO registers.  We disable PHY
2213          * interrupts in the meantime and let the task reenable them when
2214          * it's done.
2215          */
2216         spin_lock(&adapter->work_lock);
2217         if (adapter->slow_intr_mask) {
2218                 adapter->slow_intr_mask &= ~F_T3DBG;
2219                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2220                              adapter->slow_intr_mask);
2221                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2222         }
2223         spin_unlock(&adapter->work_lock);
2224 }
2225
2226 void t3_fatal_err(struct adapter *adapter)
2227 {
2228         unsigned int fw_status[4];
2229
2230         if (adapter->flags & FULL_INIT_DONE) {
2231                 t3_sge_stop(adapter);
2232                 t3_intr_disable(adapter);
2233         }
2234         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2235         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2236                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2237                          fw_status[0], fw_status[1],
2238                          fw_status[2], fw_status[3]);
2239
2240 }
2241
2242 static int __devinit cxgb_enable_msix(struct adapter *adap)
2243 {
2244         struct msix_entry entries[SGE_QSETS + 1];
2245         int i, err;
2246
2247         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2248                 entries[i].entry = i;
2249
2250         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2251         if (!err) {
2252                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2253                         adap->msix_info[i].vec = entries[i].vector;
2254         } else if (err > 0)
2255                 dev_info(&adap->pdev->dev,
2256                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2257         return err;
2258 }
2259
2260 static void __devinit print_port_info(struct adapter *adap,
2261                                       const struct adapter_info *ai)
2262 {
2263         static const char *pci_variant[] = {
2264                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2265         };
2266
2267         int i;
2268         char buf[80];
2269
2270         if (is_pcie(adap))
2271                 snprintf(buf, sizeof(buf), "%s x%d",
2272                          pci_variant[adap->params.pci.variant],
2273                          adap->params.pci.width);
2274         else
2275                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2276                          pci_variant[adap->params.pci.variant],
2277                          adap->params.pci.speed, adap->params.pci.width);
2278
2279         for_each_port(adap, i) {
2280                 struct net_device *dev = adap->port[i];
2281                 const struct port_info *pi = netdev_priv(dev);
2282
2283                 if (!test_bit(i, &adap->registered_device_map))
2284                         continue;
2285                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2286                        dev->name, ai->desc, pi->port_type->desc,
2287                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2288                        (adap->flags & USING_MSIX) ? " MSI-X" :
2289                        (adap->flags & USING_MSI) ? " MSI" : "");
2290                 if (adap->name == dev->name && adap->params.vpd.mclk)
2291                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2292                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2293                                t3_mc7_size(&adap->pmtx) >> 20,
2294                                t3_mc7_size(&adap->pmrx) >> 20);
2295         }
2296 }
2297
2298 static int __devinit init_one(struct pci_dev *pdev,
2299                               const struct pci_device_id *ent)
2300 {
2301         static int version_printed;
2302
2303         int i, err, pci_using_dac = 0;
2304         unsigned long mmio_start, mmio_len;
2305         const struct adapter_info *ai;
2306         struct adapter *adapter = NULL;
2307         struct port_info *pi;
2308
2309         if (!version_printed) {
2310                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2311                 ++version_printed;
2312         }
2313
2314         if (!cxgb3_wq) {
2315                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2316                 if (!cxgb3_wq) {
2317                         printk(KERN_ERR DRV_NAME
2318                                ": cannot initialize work queue\n");
2319                         return -ENOMEM;
2320                 }
2321         }
2322
2323         err = pci_request_regions(pdev, DRV_NAME);
2324         if (err) {
2325                 /* Just info, some other driver may have claimed the device. */
2326                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2327                 return err;
2328         }
2329
2330         err = pci_enable_device(pdev);
2331         if (err) {
2332                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2333                 goto out_release_regions;
2334         }
2335
2336         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2337                 pci_using_dac = 1;
2338                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2339                 if (err) {
2340                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2341                                "coherent allocations\n");
2342                         goto out_disable_device;
2343                 }
2344         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2345                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2346                 goto out_disable_device;
2347         }
2348
2349         pci_set_master(pdev);
2350
2351         mmio_start = pci_resource_start(pdev, 0);
2352         mmio_len = pci_resource_len(pdev, 0);
2353         ai = t3_get_adapter_info(ent->driver_data);
2354
2355         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2356         if (!adapter) {
2357                 err = -ENOMEM;
2358                 goto out_disable_device;
2359         }
2360
2361         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2362         if (!adapter->regs) {
2363                 dev_err(&pdev->dev, "cannot map device registers\n");
2364                 err = -ENOMEM;
2365                 goto out_free_adapter;
2366         }
2367
2368         adapter->pdev = pdev;
2369         adapter->name = pci_name(pdev);
2370         adapter->msg_enable = dflt_msg_enable;
2371         adapter->mmio_len = mmio_len;
2372
2373         mutex_init(&adapter->mdio_lock);
2374         spin_lock_init(&adapter->work_lock);
2375         spin_lock_init(&adapter->stats_lock);
2376
2377         INIT_LIST_HEAD(&adapter->adapter_list);
2378         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2379         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2380
2381         for (i = 0; i < ai->nports; ++i) {
2382                 struct net_device *netdev;
2383
2384                 netdev = alloc_etherdev(sizeof(struct port_info));
2385                 if (!netdev) {
2386                         err = -ENOMEM;
2387                         goto out_free_dev;
2388                 }
2389
2390                 SET_MODULE_OWNER(netdev);
2391                 SET_NETDEV_DEV(netdev, &pdev->dev);
2392
2393                 adapter->port[i] = netdev;
2394                 pi = netdev_priv(netdev);
2395                 pi->rx_csum_offload = 1;
2396                 pi->nqsets = 1;
2397                 pi->first_qset = i;
2398                 pi->activity = 0;
2399                 pi->port_id = i;
2400                 netif_carrier_off(netdev);
2401                 netdev->irq = pdev->irq;
2402                 netdev->mem_start = mmio_start;
2403                 netdev->mem_end = mmio_start + mmio_len - 1;
2404                 netdev->priv = adapter;
2405                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2406                 netdev->features |= NETIF_F_LLTX;
2407                 if (pci_using_dac)
2408                         netdev->features |= NETIF_F_HIGHDMA;
2409
2410                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2411                 netdev->vlan_rx_register = vlan_rx_register;
2412                 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2413
2414                 netdev->open = cxgb_open;
2415                 netdev->stop = cxgb_close;
2416                 netdev->hard_start_xmit = t3_eth_xmit;
2417                 netdev->get_stats = cxgb_get_stats;
2418                 netdev->set_multicast_list = cxgb_set_rxmode;
2419                 netdev->do_ioctl = cxgb_ioctl;
2420                 netdev->change_mtu = cxgb_change_mtu;
2421                 netdev->set_mac_address = cxgb_set_mac_addr;
2422 #ifdef CONFIG_NET_POLL_CONTROLLER
2423                 netdev->poll_controller = cxgb_netpoll;
2424 #endif
2425                 netdev->weight = 64;
2426
2427                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2428         }
2429
2430         pci_set_drvdata(pdev, adapter->port[0]);
2431         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2432                 err = -ENODEV;
2433                 goto out_free_dev;
2434         }
2435
2436         /*
2437          * The card is now ready to go.  If any errors occur during device
2438          * registration we do not fail the whole card but rather proceed only
2439          * with the ports we manage to register successfully.  However we must
2440          * register at least one net device.
2441          */
2442         for_each_port(adapter, i) {
2443                 err = register_netdev(adapter->port[i]);
2444                 if (err)
2445                         dev_warn(&pdev->dev,
2446                                  "cannot register net device %s, skipping\n",
2447                                  adapter->port[i]->name);
2448                 else {
2449                         /*
2450                          * Change the name we use for messages to the name of
2451                          * the first successfully registered interface.
2452                          */
2453                         if (!adapter->registered_device_map)
2454                                 adapter->name = adapter->port[i]->name;
2455
2456                         __set_bit(i, &adapter->registered_device_map);
2457                 }
2458         }
2459         if (!adapter->registered_device_map) {
2460                 dev_err(&pdev->dev, "could not register any net devices\n");
2461                 goto out_free_dev;
2462         }
2463
2464         /* Driver's ready. Reflect it on LEDs */
2465         t3_led_ready(adapter);
2466
2467         if (is_offload(adapter)) {
2468                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2469                 cxgb3_adapter_ofld(adapter);
2470         }
2471
2472         /* See what interrupts we'll be using */
2473         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2474                 adapter->flags |= USING_MSIX;
2475         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2476                 adapter->flags |= USING_MSI;
2477
2478         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2479                                  &cxgb3_attr_group);
2480
2481         print_port_info(adapter, ai);
2482         return 0;
2483
2484 out_free_dev:
2485         iounmap(adapter->regs);
2486         for (i = ai->nports - 1; i >= 0; --i)
2487                 if (adapter->port[i])
2488                         free_netdev(adapter->port[i]);
2489
2490 out_free_adapter:
2491         kfree(adapter);
2492
2493 out_disable_device:
2494         pci_disable_device(pdev);
2495 out_release_regions:
2496         pci_release_regions(pdev);
2497         pci_set_drvdata(pdev, NULL);
2498         return err;
2499 }
2500
2501 static void __devexit remove_one(struct pci_dev *pdev)
2502 {
2503         struct net_device *dev = pci_get_drvdata(pdev);
2504
2505         if (dev) {
2506                 int i;
2507                 struct adapter *adapter = dev->priv;
2508
2509                 t3_sge_stop(adapter);
2510                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2511                                    &cxgb3_attr_group);
2512
2513                 for_each_port(adapter, i)
2514                     if (test_bit(i, &adapter->registered_device_map))
2515                         unregister_netdev(adapter->port[i]);
2516
2517                 if (is_offload(adapter)) {
2518                         cxgb3_adapter_unofld(adapter);
2519                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2520                                      &adapter->open_device_map))
2521                                 offload_close(&adapter->tdev);
2522                 }
2523
2524                 t3_free_sge_resources(adapter);
2525                 cxgb_disable_msi(adapter);
2526
2527                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2528                         if (adapter->dummy_netdev[i]) {
2529                                 free_netdev(adapter->dummy_netdev[i]);
2530                                 adapter->dummy_netdev[i] = NULL;
2531                         }
2532
2533                 for_each_port(adapter, i)
2534                         if (adapter->port[i])
2535                                 free_netdev(adapter->port[i]);
2536
2537                 iounmap(adapter->regs);
2538                 kfree(adapter);
2539                 pci_release_regions(pdev);
2540                 pci_disable_device(pdev);
2541                 pci_set_drvdata(pdev, NULL);
2542         }
2543 }
2544
2545 static struct pci_driver driver = {
2546         .name = DRV_NAME,
2547         .id_table = cxgb3_pci_tbl,
2548         .probe = init_one,
2549         .remove = __devexit_p(remove_one),
2550 };
2551
2552 static int __init cxgb3_init_module(void)
2553 {
2554         int ret;
2555
2556         cxgb3_offload_init();
2557
2558         ret = pci_register_driver(&driver);
2559         return ret;
2560 }
2561
2562 static void __exit cxgb3_cleanup_module(void)
2563 {
2564         pci_unregister_driver(&driver);
2565         if (cxgb3_wq)
2566                 destroy_workqueue(cxgb3_wq);
2567 }
2568
2569 module_init(cxgb3_init_module);
2570 module_exit(cxgb3_cleanup_module);