2 * This file is part of the Chelsio T3 Ethernet driver for Linux.
4 * Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/mii.h>
21 #include <linux/sockios.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <linux/rtnetlink.h>
25 #include <asm/uaccess.h>
28 #include "cxgb3_ioctl.h"
30 #include "cxgb3_offload.h"
33 #include "cxgb3_ctl_defs.h"
35 #include "firmware_exports.h"
38 MAX_TXQ_ENTRIES = 16384,
39 MAX_CTRL_TXQ_ENTRIES = 1024,
40 MAX_RSPQ_ENTRIES = 16384,
41 MAX_RX_BUFFERS = 16384,
42 MAX_RX_JUMBO_BUFFERS = 16384,
44 MIN_CTRL_TXQ_ENTRIES = 4,
45 MIN_RSPQ_ENTRIES = 32,
49 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
51 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
52 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
53 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
55 #define EEPROM_MAGIC 0x38E2F10C
57 #define to_net_dev(class) container_of(class, struct net_device, class_dev)
59 #define CH_DEVICE(devid, ssid, idx) \
60 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
62 static const struct pci_device_id cxgb3_pci_tbl[] = {
63 CH_DEVICE(0x20, 1, 0), /* PE9000 */
64 CH_DEVICE(0x21, 1, 1), /* T302E */
65 CH_DEVICE(0x22, 1, 2), /* T310E */
66 CH_DEVICE(0x23, 1, 3), /* T320X */
67 CH_DEVICE(0x24, 1, 1), /* T302X */
68 CH_DEVICE(0x25, 1, 3), /* T320E */
69 CH_DEVICE(0x26, 1, 2), /* T310X */
70 CH_DEVICE(0x30, 1, 2), /* T3B10 */
71 CH_DEVICE(0x31, 1, 3), /* T3B20 */
72 CH_DEVICE(0x32, 1, 1), /* T3B02 */
76 MODULE_DESCRIPTION(DRV_DESC);
77 MODULE_AUTHOR("Chelsio Communications");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_VERSION);
80 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
82 static int dflt_msg_enable = DFLT_MSG_ENABLE;
84 module_param(dflt_msg_enable, int, 0644);
85 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
88 * The driver uses the best interrupt scheme available on a platform in the
89 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
90 * of these schemes the driver may consider as follows:
92 * msi = 2: choose from among all three options
93 * msi = 1: only consider MSI and pin interrupts
94 * msi = 0: force pin interrupts
98 module_param(msi, int, 0644);
99 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
102 * The driver enables offload as a default.
103 * To disable it, use ofld_disable = 1.
106 static int ofld_disable = 0;
108 module_param(ofld_disable, int, 0644);
109 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
112 * We have work elements that we need to cancel when an interface is taken
113 * down. Normally the work elements would be executed by keventd but that
114 * can deadlock because of linkwatch. If our close method takes the rtnl
115 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
116 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
117 * for our work to complete. Get our own work queue to solve this.
119 static struct workqueue_struct *cxgb3_wq;
122 * link_report - show link status and link speed/duplex
123 * @p: the port whose settings are to be reported
125 * Shows the link status, speed, and duplex of a port.
127 static void link_report(struct net_device *dev)
129 if (!netif_carrier_ok(dev))
130 printk(KERN_INFO "%s: link down\n", dev->name);
132 const char *s = "10Mbps";
133 const struct port_info *p = netdev_priv(dev);
135 switch (p->link_config.speed) {
147 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
153 * t3_os_link_changed - handle link status changes
154 * @adapter: the adapter associated with the link change
155 * @port_id: the port index whose limk status has changed
156 * @link_stat: the new status of the link
157 * @speed: the new speed setting
158 * @duplex: the new duplex setting
159 * @pause: the new flow-control setting
161 * This is the OS-dependent handler for link status changes. The OS
162 * neutral handler takes care of most of the processing for these events,
163 * then calls this handler for any OS-specific processing.
165 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
166 int speed, int duplex, int pause)
168 struct net_device *dev = adapter->port[port_id];
170 /* Skip changes from disabled ports. */
171 if (!netif_running(dev))
174 if (link_stat != netif_carrier_ok(dev)) {
176 netif_carrier_on(dev);
178 netif_carrier_off(dev);
183 static void cxgb_set_rxmode(struct net_device *dev)
185 struct t3_rx_mode rm;
186 struct port_info *pi = netdev_priv(dev);
188 init_rx_mode(&rm, dev, dev->mc_list);
189 t3_mac_set_rx_mode(&pi->mac, &rm);
193 * link_start - enable a port
194 * @dev: the device to enable
196 * Performs the MAC and PHY actions needed to enable a port.
198 static void link_start(struct net_device *dev)
200 struct t3_rx_mode rm;
201 struct port_info *pi = netdev_priv(dev);
202 struct cmac *mac = &pi->mac;
204 init_rx_mode(&rm, dev, dev->mc_list);
206 t3_mac_set_mtu(mac, dev->mtu);
207 t3_mac_set_address(mac, 0, dev->dev_addr);
208 t3_mac_set_rx_mode(mac, &rm);
209 t3_link_start(&pi->phy, mac, &pi->link_config);
210 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
213 static inline void cxgb_disable_msi(struct adapter *adapter)
215 if (adapter->flags & USING_MSIX) {
216 pci_disable_msix(adapter->pdev);
217 adapter->flags &= ~USING_MSIX;
218 } else if (adapter->flags & USING_MSI) {
219 pci_disable_msi(adapter->pdev);
220 adapter->flags &= ~USING_MSI;
225 * Interrupt handler for asynchronous events used with MSI-X.
227 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
229 t3_slow_intr_handler(cookie);
234 * Name the MSI-X interrupts.
236 static void name_msix_vecs(struct adapter *adap)
238 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
240 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
241 adap->msix_info[0].desc[n] = 0;
243 for_each_port(adap, j) {
244 struct net_device *d = adap->port[j];
245 const struct port_info *pi = netdev_priv(d);
247 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
248 snprintf(adap->msix_info[msi_idx].desc, n,
249 "%s (queue %d)", d->name, i);
250 adap->msix_info[msi_idx].desc[n] = 0;
255 static int request_msix_data_irqs(struct adapter *adap)
257 int i, j, err, qidx = 0;
259 for_each_port(adap, i) {
260 int nqsets = adap2pinfo(adap, i)->nqsets;
262 for (j = 0; j < nqsets; ++j) {
263 err = request_irq(adap->msix_info[qidx + 1].vec,
264 t3_intr_handler(adap,
267 adap->msix_info[qidx + 1].desc,
268 &adap->sge.qs[qidx]);
271 free_irq(adap->msix_info[qidx + 1].vec,
272 &adap->sge.qs[qidx]);
282 * setup_rss - configure RSS
285 * Sets up RSS to distribute packets to multiple receive queues. We
286 * configure the RSS CPU lookup table to distribute to the number of HW
287 * receive queues, and the response queue lookup table to narrow that
288 * down to the response queues actually configured for each port.
289 * We always configure the RSS mapping for two ports since the mapping
290 * table has plenty of entries.
292 static void setup_rss(struct adapter *adap)
295 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
296 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
297 u8 cpus[SGE_QSETS + 1];
298 u16 rspq_map[RSS_TABLE_SIZE];
300 for (i = 0; i < SGE_QSETS; ++i)
302 cpus[SGE_QSETS] = 0xff; /* terminator */
304 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
305 rspq_map[i] = i % nq0;
306 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
309 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
310 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
311 V_RRCPLCPUSIZE(6), cpus, rspq_map);
315 * If we have multiple receive queues per port serviced by NAPI we need one
316 * netdevice per queue as NAPI operates on netdevices. We already have one
317 * netdevice, namely the one associated with the interface, so we use dummy
318 * ones for any additional queues. Note that these netdevices exist purely
319 * so that NAPI has something to work with, they do not represent network
320 * ports and are not registered.
322 static int init_dummy_netdevs(struct adapter *adap)
324 int i, j, dummy_idx = 0;
325 struct net_device *nd;
327 for_each_port(adap, i) {
328 struct net_device *dev = adap->port[i];
329 const struct port_info *pi = netdev_priv(dev);
331 for (j = 0; j < pi->nqsets - 1; j++) {
332 if (!adap->dummy_netdev[dummy_idx]) {
333 nd = alloc_netdev(0, "", ether_setup);
339 set_bit(__LINK_STATE_START, &nd->state);
340 adap->dummy_netdev[dummy_idx] = nd;
342 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
349 while (--dummy_idx >= 0) {
350 free_netdev(adap->dummy_netdev[dummy_idx]);
351 adap->dummy_netdev[dummy_idx] = NULL;
357 * Wait until all NAPI handlers are descheduled. This includes the handlers of
358 * both netdevices representing interfaces and the dummy ones for the extra
361 static void quiesce_rx(struct adapter *adap)
364 struct net_device *dev;
366 for_each_port(adap, i) {
368 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
372 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
373 dev = adap->dummy_netdev[i];
375 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
381 * setup_sge_qsets - configure SGE Tx/Rx/response queues
384 * Determines how many sets of SGE queues to use and initializes them.
385 * We support multiple queue sets per port if we have MSI-X, otherwise
386 * just one queue set per port.
388 static int setup_sge_qsets(struct adapter *adap)
390 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
391 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
393 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
396 for_each_port(adap, i) {
397 struct net_device *dev = adap->port[i];
398 const struct port_info *pi = netdev_priv(dev);
400 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
401 err = t3_sge_alloc_qset(adap, qset_idx, 1,
402 (adap->flags & USING_MSIX) ? qset_idx + 1 :
404 &adap->params.sge.qset[qset_idx], ntxq,
406 adap-> dummy_netdev[dummy_dev_idx++]);
408 t3_free_sge_resources(adap);
417 static ssize_t attr_show(struct class_device *cd, char *buf,
418 ssize_t(*format) (struct adapter *, char *))
421 struct adapter *adap = to_net_dev(cd)->priv;
423 /* Synchronize with ioctls that may shut down the device */
425 len = (*format) (adap, buf);
430 static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
431 ssize_t(*set) (struct adapter *, unsigned int),
432 unsigned int min_val, unsigned int max_val)
437 struct adapter *adap = to_net_dev(cd)->priv;
439 if (!capable(CAP_NET_ADMIN))
442 val = simple_strtoul(buf, &endp, 0);
443 if (endp == buf || val < min_val || val > max_val)
447 ret = (*set) (adap, val);
454 #define CXGB3_SHOW(name, val_expr) \
455 static ssize_t format_##name(struct adapter *adap, char *buf) \
457 return sprintf(buf, "%u\n", val_expr); \
459 static ssize_t show_##name(struct class_device *cd, char *buf) \
461 return attr_show(cd, buf, format_##name); \
464 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
466 if (adap->flags & FULL_INIT_DONE)
468 if (val && adap->params.rev == 0)
470 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
472 adap->params.mc5.nfilters = val;
476 static ssize_t store_nfilters(struct class_device *cd, const char *buf,
479 return attr_store(cd, buf, len, set_nfilters, 0, ~0);
482 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
484 if (adap->flags & FULL_INIT_DONE)
486 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
488 adap->params.mc5.nservers = val;
492 static ssize_t store_nservers(struct class_device *cd, const char *buf,
495 return attr_store(cd, buf, len, set_nservers, 0, ~0);
498 #define CXGB3_ATTR_R(name, val_expr) \
499 CXGB3_SHOW(name, val_expr) \
500 static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
502 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
503 CXGB3_SHOW(name, val_expr) \
504 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
506 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
507 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
508 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
510 static struct attribute *cxgb3_attrs[] = {
511 &class_device_attr_cam_size.attr,
512 &class_device_attr_nfilters.attr,
513 &class_device_attr_nservers.attr,
517 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
519 static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
522 unsigned int v, addr, bpt, cpt;
523 struct adapter *adap = to_net_dev(cd)->priv;
525 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
527 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
528 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
531 bpt = (v >> 8) & 0xff;
534 len = sprintf(buf, "disabled\n");
536 v = (adap->params.vpd.cclk * 1000) / cpt;
537 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
543 static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
544 size_t len, int sched)
549 struct adapter *adap = to_net_dev(cd)->priv;
551 if (!capable(CAP_NET_ADMIN))
554 val = simple_strtoul(buf, &endp, 0);
555 if (endp == buf || val > 10000000)
559 ret = t3_config_sched(adap, val, sched);
566 #define TM_ATTR(name, sched) \
567 static ssize_t show_##name(struct class_device *cd, char *buf) \
569 return tm_attr_show(cd, buf, sched); \
571 static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
573 return tm_attr_store(cd, buf, len, sched); \
575 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
586 static struct attribute *offload_attrs[] = {
587 &class_device_attr_sched0.attr,
588 &class_device_attr_sched1.attr,
589 &class_device_attr_sched2.attr,
590 &class_device_attr_sched3.attr,
591 &class_device_attr_sched4.attr,
592 &class_device_attr_sched5.attr,
593 &class_device_attr_sched6.attr,
594 &class_device_attr_sched7.attr,
598 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
601 * Sends an sk_buff to an offload queue driver
602 * after dealing with any active network taps.
604 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
609 ret = t3_offload_tx(tdev, skb);
614 static int write_smt_entry(struct adapter *adapter, int idx)
616 struct cpl_smt_write_req *req;
617 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
622 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
623 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
624 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
625 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
627 memset(req->src_mac1, 0, sizeof(req->src_mac1));
628 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
630 offload_tx(&adapter->tdev, skb);
634 static int init_smt(struct adapter *adapter)
638 for_each_port(adapter, i)
639 write_smt_entry(adapter, i);
643 static void init_port_mtus(struct adapter *adapter)
645 unsigned int mtus = adapter->port[0]->mtu;
647 if (adapter->port[1])
648 mtus |= adapter->port[1]->mtu << 16;
649 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
653 * cxgb_up - enable the adapter
654 * @adapter: adapter being enabled
656 * Called when the first port is enabled, this function performs the
657 * actions necessary to make an adapter operational, such as completing
658 * the initialization of HW modules, and enabling interrupts.
660 * Must be called with the rtnl lock held.
662 static int cxgb_up(struct adapter *adap)
666 if (!(adap->flags & FULL_INIT_DONE)) {
667 err = t3_check_fw_version(adap);
671 err = init_dummy_netdevs(adap);
675 err = t3_init_hw(adap, 0);
679 err = setup_sge_qsets(adap);
684 adap->flags |= FULL_INIT_DONE;
689 if (adap->flags & USING_MSIX) {
690 name_msix_vecs(adap);
691 err = request_irq(adap->msix_info[0].vec,
692 t3_async_intr_handler, 0,
693 adap->msix_info[0].desc, adap);
697 if (request_msix_data_irqs(adap)) {
698 free_irq(adap->msix_info[0].vec, adap);
701 } else if ((err = request_irq(adap->pdev->irq,
702 t3_intr_handler(adap,
703 adap->sge.qs[0].rspq.
705 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
710 t3_intr_enable(adap);
714 CH_ERR(adap, "request_irq failed, err %d\n", err);
719 * Release resources when all the ports and offloading have been stopped.
721 static void cxgb_down(struct adapter *adapter)
723 t3_sge_stop(adapter);
724 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
725 t3_intr_disable(adapter);
726 spin_unlock_irq(&adapter->work_lock);
728 if (adapter->flags & USING_MSIX) {
731 free_irq(adapter->msix_info[0].vec, adapter);
732 for_each_port(adapter, i)
733 n += adap2pinfo(adapter, i)->nqsets;
735 for (i = 0; i < n; ++i)
736 free_irq(adapter->msix_info[i + 1].vec,
737 &adapter->sge.qs[i]);
739 free_irq(adapter->pdev->irq, adapter);
741 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
745 static void schedule_chk_task(struct adapter *adap)
749 timeo = adap->params.linkpoll_period ?
750 (HZ * adap->params.linkpoll_period) / 10 :
751 adap->params.stats_update_period * HZ;
753 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
756 static int offload_open(struct net_device *dev)
758 struct adapter *adapter = dev->priv;
759 struct t3cdev *tdev = T3CDEV(dev);
760 int adap_up = adapter->open_device_map & PORT_MASK;
763 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
766 if (!adap_up && (err = cxgb_up(adapter)) < 0)
769 t3_tp_set_offload_mode(adapter, 1);
770 tdev->lldev = adapter->port[0];
771 err = cxgb3_offload_activate(adapter);
775 init_port_mtus(adapter);
776 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
777 adapter->params.b_wnd,
778 adapter->params.rev == 0 ?
779 adapter->port[0]->mtu : 0xffff);
782 /* Never mind if the next step fails */
783 sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
785 /* Call back all registered clients */
786 cxgb3_add_clients(tdev);
789 /* restore them in case the offload module has changed them */
791 t3_tp_set_offload_mode(adapter, 0);
792 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
793 cxgb3_set_dummy_ops(tdev);
798 static int offload_close(struct t3cdev *tdev)
800 struct adapter *adapter = tdev2adap(tdev);
802 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
805 /* Call back all registered clients */
806 cxgb3_remove_clients(tdev);
808 sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
811 cxgb3_set_dummy_ops(tdev);
812 t3_tp_set_offload_mode(adapter, 0);
813 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
815 if (!adapter->open_device_map)
818 cxgb3_offload_deactivate(adapter);
822 static int cxgb_open(struct net_device *dev)
825 struct adapter *adapter = dev->priv;
826 struct port_info *pi = netdev_priv(dev);
827 int other_ports = adapter->open_device_map & PORT_MASK;
829 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
832 set_bit(pi->port_id, &adapter->open_device_map);
834 err = offload_open(dev);
837 "Could not initialize offload capabilities\n");
841 t3_port_intr_enable(adapter, pi->port_id);
842 netif_start_queue(dev);
844 schedule_chk_task(adapter);
849 static int cxgb_close(struct net_device *dev)
851 struct adapter *adapter = dev->priv;
852 struct port_info *p = netdev_priv(dev);
854 t3_port_intr_disable(adapter, p->port_id);
855 netif_stop_queue(dev);
856 p->phy.ops->power_down(&p->phy, 1);
857 netif_carrier_off(dev);
858 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
860 spin_lock(&adapter->work_lock); /* sync with update task */
861 clear_bit(p->port_id, &adapter->open_device_map);
862 spin_unlock(&adapter->work_lock);
864 if (!(adapter->open_device_map & PORT_MASK))
865 cancel_rearming_delayed_workqueue(cxgb3_wq,
866 &adapter->adap_check_task);
868 if (!adapter->open_device_map)
874 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
876 struct adapter *adapter = dev->priv;
877 struct port_info *p = netdev_priv(dev);
878 struct net_device_stats *ns = &p->netstats;
879 const struct mac_stats *pstats;
881 spin_lock(&adapter->stats_lock);
882 pstats = t3_mac_update_stats(&p->mac);
883 spin_unlock(&adapter->stats_lock);
885 ns->tx_bytes = pstats->tx_octets;
886 ns->tx_packets = pstats->tx_frames;
887 ns->rx_bytes = pstats->rx_octets;
888 ns->rx_packets = pstats->rx_frames;
889 ns->multicast = pstats->rx_mcast_frames;
891 ns->tx_errors = pstats->tx_underrun;
892 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
893 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
894 pstats->rx_fifo_ovfl;
896 /* detailed rx_errors */
897 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
898 ns->rx_over_errors = 0;
899 ns->rx_crc_errors = pstats->rx_fcs_errs;
900 ns->rx_frame_errors = pstats->rx_symbol_errs;
901 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
902 ns->rx_missed_errors = pstats->rx_cong_drops;
904 /* detailed tx_errors */
905 ns->tx_aborted_errors = 0;
906 ns->tx_carrier_errors = 0;
907 ns->tx_fifo_errors = pstats->tx_underrun;
908 ns->tx_heartbeat_errors = 0;
909 ns->tx_window_errors = 0;
913 static u32 get_msglevel(struct net_device *dev)
915 struct adapter *adapter = dev->priv;
917 return adapter->msg_enable;
920 static void set_msglevel(struct net_device *dev, u32 val)
922 struct adapter *adapter = dev->priv;
924 adapter->msg_enable = val;
927 static char stats_strings[][ETH_GSTRING_LEN] = {
930 "TxMulticastFramesOK",
931 "TxBroadcastFramesOK",
940 "TxFrames512To1023 ",
941 "TxFrames1024To1518 ",
942 "TxFrames1519ToMax ",
946 "RxMulticastFramesOK",
947 "RxBroadcastFramesOK",
960 "RxFrames512To1023 ",
961 "RxFrames1024To1518 ",
962 "RxFrames1519ToMax ",
973 static int get_stats_count(struct net_device *dev)
975 return ARRAY_SIZE(stats_strings);
978 #define T3_REGMAP_SIZE (3 * 1024)
980 static int get_regs_len(struct net_device *dev)
982 return T3_REGMAP_SIZE;
985 static int get_eeprom_len(struct net_device *dev)
990 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
993 struct adapter *adapter = dev->priv;
995 t3_get_fw_version(adapter, &fw_vers);
997 strcpy(info->driver, DRV_NAME);
998 strcpy(info->version, DRV_VERSION);
999 strcpy(info->bus_info, pci_name(adapter->pdev));
1001 strcpy(info->fw_version, "N/A");
1003 snprintf(info->fw_version, sizeof(info->fw_version),
1005 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1006 G_FW_VERSION_MAJOR(fw_vers),
1007 G_FW_VERSION_MINOR(fw_vers),
1008 G_FW_VERSION_MICRO(fw_vers));
1012 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1014 if (stringset == ETH_SS_STATS)
1015 memcpy(data, stats_strings, sizeof(stats_strings));
1018 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1019 struct port_info *p, int idx)
1022 unsigned long tot = 0;
1024 for (i = 0; i < p->nqsets; ++i)
1025 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1029 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1032 struct adapter *adapter = dev->priv;
1033 struct port_info *pi = netdev_priv(dev);
1034 const struct mac_stats *s;
1036 spin_lock(&adapter->stats_lock);
1037 s = t3_mac_update_stats(&pi->mac);
1038 spin_unlock(&adapter->stats_lock);
1040 *data++ = s->tx_octets;
1041 *data++ = s->tx_frames;
1042 *data++ = s->tx_mcast_frames;
1043 *data++ = s->tx_bcast_frames;
1044 *data++ = s->tx_pause;
1045 *data++ = s->tx_underrun;
1046 *data++ = s->tx_fifo_urun;
1048 *data++ = s->tx_frames_64;
1049 *data++ = s->tx_frames_65_127;
1050 *data++ = s->tx_frames_128_255;
1051 *data++ = s->tx_frames_256_511;
1052 *data++ = s->tx_frames_512_1023;
1053 *data++ = s->tx_frames_1024_1518;
1054 *data++ = s->tx_frames_1519_max;
1056 *data++ = s->rx_octets;
1057 *data++ = s->rx_frames;
1058 *data++ = s->rx_mcast_frames;
1059 *data++ = s->rx_bcast_frames;
1060 *data++ = s->rx_pause;
1061 *data++ = s->rx_fcs_errs;
1062 *data++ = s->rx_symbol_errs;
1063 *data++ = s->rx_short;
1064 *data++ = s->rx_jabber;
1065 *data++ = s->rx_too_long;
1066 *data++ = s->rx_fifo_ovfl;
1068 *data++ = s->rx_frames_64;
1069 *data++ = s->rx_frames_65_127;
1070 *data++ = s->rx_frames_128_255;
1071 *data++ = s->rx_frames_256_511;
1072 *data++ = s->rx_frames_512_1023;
1073 *data++ = s->rx_frames_1024_1518;
1074 *data++ = s->rx_frames_1519_max;
1076 *data++ = pi->phy.fifo_errors;
1078 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1079 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1080 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1081 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1082 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1083 *data++ = s->rx_cong_drops;
1086 static inline void reg_block_dump(struct adapter *ap, void *buf,
1087 unsigned int start, unsigned int end)
1089 u32 *p = buf + start;
1091 for (; start <= end; start += sizeof(u32))
1092 *p++ = t3_read_reg(ap, start);
1095 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1098 struct adapter *ap = dev->priv;
1102 * bits 0..9: chip version
1103 * bits 10..15: chip revision
1104 * bit 31: set for PCIe cards
1106 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1109 * We skip the MAC statistics registers because they are clear-on-read.
1110 * Also reading multi-register stats would need to synchronize with the
1111 * periodic mac stats accumulation. Hard to justify the complexity.
1113 memset(buf, 0, T3_REGMAP_SIZE);
1114 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1115 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1116 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1117 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1118 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1119 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1120 XGM_REG(A_XGM_SERDES_STAT3, 1));
1121 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1122 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1125 static int restart_autoneg(struct net_device *dev)
1127 struct port_info *p = netdev_priv(dev);
1129 if (!netif_running(dev))
1131 if (p->link_config.autoneg != AUTONEG_ENABLE)
1133 p->phy.ops->autoneg_restart(&p->phy);
1137 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1140 struct adapter *adapter = dev->priv;
1145 for (i = 0; i < data * 2; i++) {
1146 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1147 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1148 if (msleep_interruptible(500))
1151 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1156 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1158 struct port_info *p = netdev_priv(dev);
1160 cmd->supported = p->link_config.supported;
1161 cmd->advertising = p->link_config.advertising;
1163 if (netif_carrier_ok(dev)) {
1164 cmd->speed = p->link_config.speed;
1165 cmd->duplex = p->link_config.duplex;
1171 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1172 cmd->phy_address = p->phy.addr;
1173 cmd->transceiver = XCVR_EXTERNAL;
1174 cmd->autoneg = p->link_config.autoneg;
1180 static int speed_duplex_to_caps(int speed, int duplex)
1186 if (duplex == DUPLEX_FULL)
1187 cap = SUPPORTED_10baseT_Full;
1189 cap = SUPPORTED_10baseT_Half;
1192 if (duplex == DUPLEX_FULL)
1193 cap = SUPPORTED_100baseT_Full;
1195 cap = SUPPORTED_100baseT_Half;
1198 if (duplex == DUPLEX_FULL)
1199 cap = SUPPORTED_1000baseT_Full;
1201 cap = SUPPORTED_1000baseT_Half;
1204 if (duplex == DUPLEX_FULL)
1205 cap = SUPPORTED_10000baseT_Full;
1210 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1211 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1212 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1213 ADVERTISED_10000baseT_Full)
1215 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1217 struct port_info *p = netdev_priv(dev);
1218 struct link_config *lc = &p->link_config;
1220 if (!(lc->supported & SUPPORTED_Autoneg))
1221 return -EOPNOTSUPP; /* can't change speed/duplex */
1223 if (cmd->autoneg == AUTONEG_DISABLE) {
1224 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1226 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1228 lc->requested_speed = cmd->speed;
1229 lc->requested_duplex = cmd->duplex;
1230 lc->advertising = 0;
1232 cmd->advertising &= ADVERTISED_MASK;
1233 cmd->advertising &= lc->supported;
1234 if (!cmd->advertising)
1236 lc->requested_speed = SPEED_INVALID;
1237 lc->requested_duplex = DUPLEX_INVALID;
1238 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1240 lc->autoneg = cmd->autoneg;
1241 if (netif_running(dev))
1242 t3_link_start(&p->phy, &p->mac, lc);
1246 static void get_pauseparam(struct net_device *dev,
1247 struct ethtool_pauseparam *epause)
1249 struct port_info *p = netdev_priv(dev);
1251 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1252 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1253 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1256 static int set_pauseparam(struct net_device *dev,
1257 struct ethtool_pauseparam *epause)
1259 struct port_info *p = netdev_priv(dev);
1260 struct link_config *lc = &p->link_config;
1262 if (epause->autoneg == AUTONEG_DISABLE)
1263 lc->requested_fc = 0;
1264 else if (lc->supported & SUPPORTED_Autoneg)
1265 lc->requested_fc = PAUSE_AUTONEG;
1269 if (epause->rx_pause)
1270 lc->requested_fc |= PAUSE_RX;
1271 if (epause->tx_pause)
1272 lc->requested_fc |= PAUSE_TX;
1273 if (lc->autoneg == AUTONEG_ENABLE) {
1274 if (netif_running(dev))
1275 t3_link_start(&p->phy, &p->mac, lc);
1277 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1278 if (netif_running(dev))
1279 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1284 static u32 get_rx_csum(struct net_device *dev)
1286 struct port_info *p = netdev_priv(dev);
1288 return p->rx_csum_offload;
1291 static int set_rx_csum(struct net_device *dev, u32 data)
1293 struct port_info *p = netdev_priv(dev);
1295 p->rx_csum_offload = data;
1299 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1301 struct adapter *adapter = dev->priv;
1303 e->rx_max_pending = MAX_RX_BUFFERS;
1304 e->rx_mini_max_pending = 0;
1305 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1306 e->tx_max_pending = MAX_TXQ_ENTRIES;
1308 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1309 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1310 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1311 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1314 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1317 struct adapter *adapter = dev->priv;
1319 if (e->rx_pending > MAX_RX_BUFFERS ||
1320 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1321 e->tx_pending > MAX_TXQ_ENTRIES ||
1322 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1323 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1324 e->rx_pending < MIN_FL_ENTRIES ||
1325 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1326 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1329 if (adapter->flags & FULL_INIT_DONE)
1332 for (i = 0; i < SGE_QSETS; ++i) {
1333 struct qset_params *q = &adapter->params.sge.qset[i];
1335 q->rspq_size = e->rx_mini_pending;
1336 q->fl_size = e->rx_pending;
1337 q->jumbo_size = e->rx_jumbo_pending;
1338 q->txq_size[0] = e->tx_pending;
1339 q->txq_size[1] = e->tx_pending;
1340 q->txq_size[2] = e->tx_pending;
1345 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1347 struct adapter *adapter = dev->priv;
1348 struct qset_params *qsp = &adapter->params.sge.qset[0];
1349 struct sge_qset *qs = &adapter->sge.qs[0];
1351 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1354 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1355 t3_update_qset_coalesce(qs, qsp);
1359 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1361 struct adapter *adapter = dev->priv;
1362 struct qset_params *q = adapter->params.sge.qset;
1364 c->rx_coalesce_usecs = q->coalesce_usecs;
1368 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1372 struct adapter *adapter = dev->priv;
1374 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1378 e->magic = EEPROM_MAGIC;
1379 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1380 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1383 memcpy(data, buf + e->offset, e->len);
1388 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1393 u32 aligned_offset, aligned_len, *p;
1394 struct adapter *adapter = dev->priv;
1396 if (eeprom->magic != EEPROM_MAGIC)
1399 aligned_offset = eeprom->offset & ~3;
1400 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1402 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1403 buf = kmalloc(aligned_len, GFP_KERNEL);
1406 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1407 if (!err && aligned_len > 4)
1408 err = t3_seeprom_read(adapter,
1409 aligned_offset + aligned_len - 4,
1410 (u32 *) & buf[aligned_len - 4]);
1413 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1417 err = t3_seeprom_wp(adapter, 0);
1421 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1422 err = t3_seeprom_write(adapter, aligned_offset, *p);
1423 aligned_offset += 4;
1427 err = t3_seeprom_wp(adapter, 1);
1434 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1438 memset(&wol->sopass, 0, sizeof(wol->sopass));
1441 static const struct ethtool_ops cxgb_ethtool_ops = {
1442 .get_settings = get_settings,
1443 .set_settings = set_settings,
1444 .get_drvinfo = get_drvinfo,
1445 .get_msglevel = get_msglevel,
1446 .set_msglevel = set_msglevel,
1447 .get_ringparam = get_sge_param,
1448 .set_ringparam = set_sge_param,
1449 .get_coalesce = get_coalesce,
1450 .set_coalesce = set_coalesce,
1451 .get_eeprom_len = get_eeprom_len,
1452 .get_eeprom = get_eeprom,
1453 .set_eeprom = set_eeprom,
1454 .get_pauseparam = get_pauseparam,
1455 .set_pauseparam = set_pauseparam,
1456 .get_rx_csum = get_rx_csum,
1457 .set_rx_csum = set_rx_csum,
1458 .get_tx_csum = ethtool_op_get_tx_csum,
1459 .set_tx_csum = ethtool_op_set_tx_csum,
1460 .get_sg = ethtool_op_get_sg,
1461 .set_sg = ethtool_op_set_sg,
1462 .get_link = ethtool_op_get_link,
1463 .get_strings = get_strings,
1464 .phys_id = cxgb3_phys_id,
1465 .nway_reset = restart_autoneg,
1466 .get_stats_count = get_stats_count,
1467 .get_ethtool_stats = get_stats,
1468 .get_regs_len = get_regs_len,
1469 .get_regs = get_regs,
1471 .get_tso = ethtool_op_get_tso,
1472 .set_tso = ethtool_op_set_tso,
1473 .get_perm_addr = ethtool_op_get_perm_addr
1476 static int in_range(int val, int lo, int hi)
1478 return val < 0 || (val <= hi && val >= lo);
1481 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1485 struct adapter *adapter = dev->priv;
1487 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1491 case CHELSIO_SETREG:{
1492 struct ch_reg edata;
1494 if (!capable(CAP_NET_ADMIN))
1496 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1498 if ((edata.addr & 3) != 0
1499 || edata.addr >= adapter->mmio_len)
1501 writel(edata.val, adapter->regs + edata.addr);
1504 case CHELSIO_GETREG:{
1505 struct ch_reg edata;
1507 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1509 if ((edata.addr & 3) != 0
1510 || edata.addr >= adapter->mmio_len)
1512 edata.val = readl(adapter->regs + edata.addr);
1513 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1517 case CHELSIO_SET_QSET_PARAMS:{
1519 struct qset_params *q;
1520 struct ch_qset_params t;
1522 if (!capable(CAP_NET_ADMIN))
1524 if (copy_from_user(&t, useraddr, sizeof(t)))
1526 if (t.qset_idx >= SGE_QSETS)
1528 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1529 !in_range(t.cong_thres, 0, 255) ||
1530 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1532 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1534 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1535 MAX_CTRL_TXQ_ENTRIES) ||
1536 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1538 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1539 MAX_RX_JUMBO_BUFFERS)
1540 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1543 if ((adapter->flags & FULL_INIT_DONE) &&
1544 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1545 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1546 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1547 t.polling >= 0 || t.cong_thres >= 0))
1550 q = &adapter->params.sge.qset[t.qset_idx];
1552 if (t.rspq_size >= 0)
1553 q->rspq_size = t.rspq_size;
1554 if (t.fl_size[0] >= 0)
1555 q->fl_size = t.fl_size[0];
1556 if (t.fl_size[1] >= 0)
1557 q->jumbo_size = t.fl_size[1];
1558 if (t.txq_size[0] >= 0)
1559 q->txq_size[0] = t.txq_size[0];
1560 if (t.txq_size[1] >= 0)
1561 q->txq_size[1] = t.txq_size[1];
1562 if (t.txq_size[2] >= 0)
1563 q->txq_size[2] = t.txq_size[2];
1564 if (t.cong_thres >= 0)
1565 q->cong_thres = t.cong_thres;
1566 if (t.intr_lat >= 0) {
1567 struct sge_qset *qs =
1568 &adapter->sge.qs[t.qset_idx];
1570 q->coalesce_usecs = t.intr_lat;
1571 t3_update_qset_coalesce(qs, q);
1573 if (t.polling >= 0) {
1574 if (adapter->flags & USING_MSIX)
1575 q->polling = t.polling;
1577 /* No polling with INTx for T3A */
1578 if (adapter->params.rev == 0 &&
1579 !(adapter->flags & USING_MSI))
1582 for (i = 0; i < SGE_QSETS; i++) {
1583 q = &adapter->params.sge.
1585 q->polling = t.polling;
1591 case CHELSIO_GET_QSET_PARAMS:{
1592 struct qset_params *q;
1593 struct ch_qset_params t;
1595 if (copy_from_user(&t, useraddr, sizeof(t)))
1597 if (t.qset_idx >= SGE_QSETS)
1600 q = &adapter->params.sge.qset[t.qset_idx];
1601 t.rspq_size = q->rspq_size;
1602 t.txq_size[0] = q->txq_size[0];
1603 t.txq_size[1] = q->txq_size[1];
1604 t.txq_size[2] = q->txq_size[2];
1605 t.fl_size[0] = q->fl_size;
1606 t.fl_size[1] = q->jumbo_size;
1607 t.polling = q->polling;
1608 t.intr_lat = q->coalesce_usecs;
1609 t.cong_thres = q->cong_thres;
1611 if (copy_to_user(useraddr, &t, sizeof(t)))
1615 case CHELSIO_SET_QSET_NUM:{
1616 struct ch_reg edata;
1617 struct port_info *pi = netdev_priv(dev);
1618 unsigned int i, first_qset = 0, other_qsets = 0;
1620 if (!capable(CAP_NET_ADMIN))
1622 if (adapter->flags & FULL_INIT_DONE)
1624 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1626 if (edata.val < 1 ||
1627 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1630 for_each_port(adapter, i)
1631 if (adapter->port[i] && adapter->port[i] != dev)
1632 other_qsets += adap2pinfo(adapter, i)->nqsets;
1634 if (edata.val + other_qsets > SGE_QSETS)
1637 pi->nqsets = edata.val;
1639 for_each_port(adapter, i)
1640 if (adapter->port[i]) {
1641 pi = adap2pinfo(adapter, i);
1642 pi->first_qset = first_qset;
1643 first_qset += pi->nqsets;
1647 case CHELSIO_GET_QSET_NUM:{
1648 struct ch_reg edata;
1649 struct port_info *pi = netdev_priv(dev);
1651 edata.cmd = CHELSIO_GET_QSET_NUM;
1652 edata.val = pi->nqsets;
1653 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1657 case CHELSIO_LOAD_FW:{
1659 struct ch_mem_range t;
1661 if (!capable(CAP_NET_ADMIN))
1663 if (copy_from_user(&t, useraddr, sizeof(t)))
1666 fw_data = kmalloc(t.len, GFP_KERNEL);
1671 (fw_data, useraddr + sizeof(t), t.len)) {
1676 ret = t3_load_fw(adapter, fw_data, t.len);
1682 case CHELSIO_SETMTUTAB:{
1686 if (!is_offload(adapter))
1688 if (!capable(CAP_NET_ADMIN))
1690 if (offload_running(adapter))
1692 if (copy_from_user(&m, useraddr, sizeof(m)))
1694 if (m.nmtus != NMTUS)
1696 if (m.mtus[0] < 81) /* accommodate SACK */
1699 /* MTUs must be in ascending order */
1700 for (i = 1; i < NMTUS; ++i)
1701 if (m.mtus[i] < m.mtus[i - 1])
1704 memcpy(adapter->params.mtus, m.mtus,
1705 sizeof(adapter->params.mtus));
1708 case CHELSIO_GET_PM:{
1709 struct tp_params *p = &adapter->params.tp;
1710 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1712 if (!is_offload(adapter))
1714 m.tx_pg_sz = p->tx_pg_size;
1715 m.tx_num_pg = p->tx_num_pgs;
1716 m.rx_pg_sz = p->rx_pg_size;
1717 m.rx_num_pg = p->rx_num_pgs;
1718 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1719 if (copy_to_user(useraddr, &m, sizeof(m)))
1723 case CHELSIO_SET_PM:{
1725 struct tp_params *p = &adapter->params.tp;
1727 if (!is_offload(adapter))
1729 if (!capable(CAP_NET_ADMIN))
1731 if (adapter->flags & FULL_INIT_DONE)
1733 if (copy_from_user(&m, useraddr, sizeof(m)))
1735 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1736 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1737 return -EINVAL; /* not power of 2 */
1738 if (!(m.rx_pg_sz & 0x14000))
1739 return -EINVAL; /* not 16KB or 64KB */
1740 if (!(m.tx_pg_sz & 0x1554000))
1742 if (m.tx_num_pg == -1)
1743 m.tx_num_pg = p->tx_num_pgs;
1744 if (m.rx_num_pg == -1)
1745 m.rx_num_pg = p->rx_num_pgs;
1746 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1748 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1749 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1751 p->rx_pg_size = m.rx_pg_sz;
1752 p->tx_pg_size = m.tx_pg_sz;
1753 p->rx_num_pgs = m.rx_num_pg;
1754 p->tx_num_pgs = m.tx_num_pg;
1757 case CHELSIO_GET_MEM:{
1758 struct ch_mem_range t;
1762 if (!is_offload(adapter))
1764 if (!(adapter->flags & FULL_INIT_DONE))
1765 return -EIO; /* need the memory controllers */
1766 if (copy_from_user(&t, useraddr, sizeof(t)))
1768 if ((t.addr & 7) || (t.len & 7))
1770 if (t.mem_id == MEM_CM)
1772 else if (t.mem_id == MEM_PMRX)
1773 mem = &adapter->pmrx;
1774 else if (t.mem_id == MEM_PMTX)
1775 mem = &adapter->pmtx;
1781 * bits 0..9: chip version
1782 * bits 10..15: chip revision
1784 t.version = 3 | (adapter->params.rev << 10);
1785 if (copy_to_user(useraddr, &t, sizeof(t)))
1789 * Read 256 bytes at a time as len can be large and we don't
1790 * want to use huge intermediate buffers.
1792 useraddr += sizeof(t); /* advance to start of buffer */
1794 unsigned int chunk =
1795 min_t(unsigned int, t.len, sizeof(buf));
1798 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1802 if (copy_to_user(useraddr, buf, chunk))
1810 case CHELSIO_SET_TRACE_FILTER:{
1812 const struct trace_params *tp;
1814 if (!capable(CAP_NET_ADMIN))
1816 if (!offload_running(adapter))
1818 if (copy_from_user(&t, useraddr, sizeof(t)))
1821 tp = (const struct trace_params *)&t.sip;
1823 t3_config_trace_filter(adapter, tp, 0,
1827 t3_config_trace_filter(adapter, tp, 1,
1832 case CHELSIO_SET_PKTSCHED:{
1833 struct sk_buff *skb;
1834 struct ch_pktsched_params p;
1835 struct mngt_pktsched_wr *req;
1837 if (!(adapter->flags & FULL_INIT_DONE))
1838 return -EIO; /* uP must be up and running */
1839 if (copy_from_user(&p, useraddr, sizeof(p)))
1841 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1845 (struct mngt_pktsched_wr *)skb_put(skb,
1847 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1848 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1849 req->sched = p.sched;
1853 req->binding = p.binding;
1855 "pktsched: sched %u idx %u min %u max %u binding %u\n",
1856 req->sched, req->idx, req->min, req->max,
1859 offload_tx(&adapter->tdev, skb);
1868 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1871 struct adapter *adapter = dev->priv;
1872 struct port_info *pi = netdev_priv(dev);
1873 struct mii_ioctl_data *data = if_mii(req);
1877 data->phy_id = pi->phy.addr;
1881 struct cphy *phy = &pi->phy;
1883 if (!phy->mdio_read)
1885 if (is_10G(adapter)) {
1886 mmd = data->phy_id >> 8;
1889 else if (mmd > MDIO_DEV_XGXS)
1893 phy->mdio_read(adapter, data->phy_id & 0x1f,
1894 mmd, data->reg_num, &val);
1897 phy->mdio_read(adapter, data->phy_id & 0x1f,
1898 0, data->reg_num & 0x1f,
1901 data->val_out = val;
1905 struct cphy *phy = &pi->phy;
1907 if (!capable(CAP_NET_ADMIN))
1909 if (!phy->mdio_write)
1911 if (is_10G(adapter)) {
1912 mmd = data->phy_id >> 8;
1915 else if (mmd > MDIO_DEV_XGXS)
1919 phy->mdio_write(adapter,
1920 data->phy_id & 0x1f, mmd,
1925 phy->mdio_write(adapter,
1926 data->phy_id & 0x1f, 0,
1927 data->reg_num & 0x1f,
1932 return cxgb_extension_ioctl(dev, req->ifr_data);
1939 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1942 struct adapter *adapter = dev->priv;
1943 struct port_info *pi = netdev_priv(dev);
1945 if (new_mtu < 81) /* accommodate SACK */
1947 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1950 init_port_mtus(adapter);
1951 if (adapter->params.rev == 0 && offload_running(adapter))
1952 t3_load_mtus(adapter, adapter->params.mtus,
1953 adapter->params.a_wnd, adapter->params.b_wnd,
1954 adapter->port[0]->mtu);
1958 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1960 struct adapter *adapter = dev->priv;
1961 struct port_info *pi = netdev_priv(dev);
1962 struct sockaddr *addr = p;
1964 if (!is_valid_ether_addr(addr->sa_data))
1967 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1968 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
1969 if (offload_running(adapter))
1970 write_smt_entry(adapter, pi->port_id);
1975 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1976 * @adap: the adapter
1979 * Ensures that current Rx processing on any of the queues associated with
1980 * the given port completes before returning. We do this by acquiring and
1981 * releasing the locks of the response queues associated with the port.
1983 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1987 for (i = 0; i < p->nqsets; i++) {
1988 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
1990 spin_lock_irq(&q->lock);
1991 spin_unlock_irq(&q->lock);
1995 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1997 struct adapter *adapter = dev->priv;
1998 struct port_info *pi = netdev_priv(dev);
2001 if (adapter->params.rev > 0)
2002 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2004 /* single control for all ports */
2005 unsigned int i, have_vlans = 0;
2006 for_each_port(adapter, i)
2007 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2009 t3_set_vlan_accel(adapter, 1, have_vlans);
2011 t3_synchronize_rx(adapter, pi);
2014 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2019 #ifdef CONFIG_NET_POLL_CONTROLLER
2020 static void cxgb_netpoll(struct net_device *dev)
2022 struct adapter *adapter = dev->priv;
2023 struct sge_qset *qs = dev2qset(dev);
2025 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2031 * Periodic accumulation of MAC statistics.
2033 static void mac_stats_update(struct adapter *adapter)
2037 for_each_port(adapter, i) {
2038 struct net_device *dev = adapter->port[i];
2039 struct port_info *p = netdev_priv(dev);
2041 if (netif_running(dev)) {
2042 spin_lock(&adapter->stats_lock);
2043 t3_mac_update_stats(&p->mac);
2044 spin_unlock(&adapter->stats_lock);
2049 static void check_link_status(struct adapter *adapter)
2053 for_each_port(adapter, i) {
2054 struct net_device *dev = adapter->port[i];
2055 struct port_info *p = netdev_priv(dev);
2057 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2058 t3_link_changed(adapter, i);
2062 static void t3_adap_check_task(struct work_struct *work)
2064 struct adapter *adapter = container_of(work, struct adapter,
2065 adap_check_task.work);
2066 const struct adapter_params *p = &adapter->params;
2068 adapter->check_task_cnt++;
2070 /* Check link status for PHYs without interrupts */
2071 if (p->linkpoll_period)
2072 check_link_status(adapter);
2074 /* Accumulate MAC stats if needed */
2075 if (!p->linkpoll_period ||
2076 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2077 p->stats_update_period) {
2078 mac_stats_update(adapter);
2079 adapter->check_task_cnt = 0;
2082 /* Schedule the next check update if any port is active. */
2083 spin_lock(&adapter->work_lock);
2084 if (adapter->open_device_map & PORT_MASK)
2085 schedule_chk_task(adapter);
2086 spin_unlock(&adapter->work_lock);
2090 * Processes external (PHY) interrupts in process context.
2092 static void ext_intr_task(struct work_struct *work)
2094 struct adapter *adapter = container_of(work, struct adapter,
2095 ext_intr_handler_task);
2097 t3_phy_intr_handler(adapter);
2099 /* Now reenable external interrupts */
2100 spin_lock_irq(&adapter->work_lock);
2101 if (adapter->slow_intr_mask) {
2102 adapter->slow_intr_mask |= F_T3DBG;
2103 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2104 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2105 adapter->slow_intr_mask);
2107 spin_unlock_irq(&adapter->work_lock);
2111 * Interrupt-context handler for external (PHY) interrupts.
2113 void t3_os_ext_intr_handler(struct adapter *adapter)
2116 * Schedule a task to handle external interrupts as they may be slow
2117 * and we use a mutex to protect MDIO registers. We disable PHY
2118 * interrupts in the meantime and let the task reenable them when
2121 spin_lock(&adapter->work_lock);
2122 if (adapter->slow_intr_mask) {
2123 adapter->slow_intr_mask &= ~F_T3DBG;
2124 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2125 adapter->slow_intr_mask);
2126 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2128 spin_unlock(&adapter->work_lock);
2131 void t3_fatal_err(struct adapter *adapter)
2133 unsigned int fw_status[4];
2135 if (adapter->flags & FULL_INIT_DONE) {
2136 t3_sge_stop(adapter);
2137 t3_intr_disable(adapter);
2139 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2140 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2141 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2142 fw_status[0], fw_status[1],
2143 fw_status[2], fw_status[3]);
2147 static int __devinit cxgb_enable_msix(struct adapter *adap)
2149 struct msix_entry entries[SGE_QSETS + 1];
2152 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2153 entries[i].entry = i;
2155 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2157 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2158 adap->msix_info[i].vec = entries[i].vector;
2160 dev_info(&adap->pdev->dev,
2161 "only %d MSI-X vectors left, not using MSI-X\n", err);
2165 static void __devinit print_port_info(struct adapter *adap,
2166 const struct adapter_info *ai)
2168 static const char *pci_variant[] = {
2169 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2176 snprintf(buf, sizeof(buf), "%s x%d",
2177 pci_variant[adap->params.pci.variant],
2178 adap->params.pci.width);
2180 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2181 pci_variant[adap->params.pci.variant],
2182 adap->params.pci.speed, adap->params.pci.width);
2184 for_each_port(adap, i) {
2185 struct net_device *dev = adap->port[i];
2186 const struct port_info *pi = netdev_priv(dev);
2188 if (!test_bit(i, &adap->registered_device_map))
2190 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2191 dev->name, ai->desc, pi->port_type->desc,
2192 adap->params.rev, buf,
2193 (adap->flags & USING_MSIX) ? " MSI-X" :
2194 (adap->flags & USING_MSI) ? " MSI" : "");
2195 if (adap->name == dev->name && adap->params.vpd.mclk)
2196 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2197 adap->name, t3_mc7_size(&adap->cm) >> 20,
2198 t3_mc7_size(&adap->pmtx) >> 20,
2199 t3_mc7_size(&adap->pmrx) >> 20);
2203 static int __devinit init_one(struct pci_dev *pdev,
2204 const struct pci_device_id *ent)
2206 static int version_printed;
2208 int i, err, pci_using_dac = 0;
2209 unsigned long mmio_start, mmio_len;
2210 const struct adapter_info *ai;
2211 struct adapter *adapter = NULL;
2212 struct port_info *pi;
2214 if (!version_printed) {
2215 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2220 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2222 printk(KERN_ERR DRV_NAME
2223 ": cannot initialize work queue\n");
2228 err = pci_request_regions(pdev, DRV_NAME);
2230 /* Just info, some other driver may have claimed the device. */
2231 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2235 err = pci_enable_device(pdev);
2237 dev_err(&pdev->dev, "cannot enable PCI device\n");
2238 goto out_release_regions;
2241 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2243 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2245 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2246 "coherent allocations\n");
2247 goto out_disable_device;
2249 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2250 dev_err(&pdev->dev, "no usable DMA configuration\n");
2251 goto out_disable_device;
2254 pci_set_master(pdev);
2256 mmio_start = pci_resource_start(pdev, 0);
2257 mmio_len = pci_resource_len(pdev, 0);
2258 ai = t3_get_adapter_info(ent->driver_data);
2260 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2263 goto out_disable_device;
2266 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2267 if (!adapter->regs) {
2268 dev_err(&pdev->dev, "cannot map device registers\n");
2270 goto out_free_adapter;
2273 adapter->pdev = pdev;
2274 adapter->name = pci_name(pdev);
2275 adapter->msg_enable = dflt_msg_enable;
2276 adapter->mmio_len = mmio_len;
2278 mutex_init(&adapter->mdio_lock);
2279 spin_lock_init(&adapter->work_lock);
2280 spin_lock_init(&adapter->stats_lock);
2282 INIT_LIST_HEAD(&adapter->adapter_list);
2283 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2284 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2286 for (i = 0; i < ai->nports; ++i) {
2287 struct net_device *netdev;
2289 netdev = alloc_etherdev(sizeof(struct port_info));
2295 SET_MODULE_OWNER(netdev);
2296 SET_NETDEV_DEV(netdev, &pdev->dev);
2298 adapter->port[i] = netdev;
2299 pi = netdev_priv(netdev);
2300 pi->rx_csum_offload = 1;
2305 netif_carrier_off(netdev);
2306 netdev->irq = pdev->irq;
2307 netdev->mem_start = mmio_start;
2308 netdev->mem_end = mmio_start + mmio_len - 1;
2309 netdev->priv = adapter;
2310 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2311 netdev->features |= NETIF_F_LLTX;
2313 netdev->features |= NETIF_F_HIGHDMA;
2315 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2316 netdev->vlan_rx_register = vlan_rx_register;
2317 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2319 netdev->open = cxgb_open;
2320 netdev->stop = cxgb_close;
2321 netdev->hard_start_xmit = t3_eth_xmit;
2322 netdev->get_stats = cxgb_get_stats;
2323 netdev->set_multicast_list = cxgb_set_rxmode;
2324 netdev->do_ioctl = cxgb_ioctl;
2325 netdev->change_mtu = cxgb_change_mtu;
2326 netdev->set_mac_address = cxgb_set_mac_addr;
2327 #ifdef CONFIG_NET_POLL_CONTROLLER
2328 netdev->poll_controller = cxgb_netpoll;
2330 netdev->weight = 64;
2332 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2335 pci_set_drvdata(pdev, adapter->port[0]);
2336 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2342 * The card is now ready to go. If any errors occur during device
2343 * registration we do not fail the whole card but rather proceed only
2344 * with the ports we manage to register successfully. However we must
2345 * register at least one net device.
2347 for_each_port(adapter, i) {
2348 err = register_netdev(adapter->port[i]);
2350 dev_warn(&pdev->dev,
2351 "cannot register net device %s, skipping\n",
2352 adapter->port[i]->name);
2355 * Change the name we use for messages to the name of
2356 * the first successfully registered interface.
2358 if (!adapter->registered_device_map)
2359 adapter->name = adapter->port[i]->name;
2361 __set_bit(i, &adapter->registered_device_map);
2364 if (!adapter->registered_device_map) {
2365 dev_err(&pdev->dev, "could not register any net devices\n");
2369 /* Driver's ready. Reflect it on LEDs */
2370 t3_led_ready(adapter);
2372 if (is_offload(adapter)) {
2373 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2374 cxgb3_adapter_ofld(adapter);
2377 /* See what interrupts we'll be using */
2378 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2379 adapter->flags |= USING_MSIX;
2380 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2381 adapter->flags |= USING_MSI;
2383 err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2386 print_port_info(adapter, ai);
2390 iounmap(adapter->regs);
2391 for (i = ai->nports - 1; i >= 0; --i)
2392 if (adapter->port[i])
2393 free_netdev(adapter->port[i]);
2399 pci_disable_device(pdev);
2400 out_release_regions:
2401 pci_release_regions(pdev);
2402 pci_set_drvdata(pdev, NULL);
2406 static void __devexit remove_one(struct pci_dev *pdev)
2408 struct net_device *dev = pci_get_drvdata(pdev);
2412 struct adapter *adapter = dev->priv;
2414 t3_sge_stop(adapter);
2415 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2418 for_each_port(adapter, i)
2419 if (test_bit(i, &adapter->registered_device_map))
2420 unregister_netdev(adapter->port[i]);
2422 if (is_offload(adapter)) {
2423 cxgb3_adapter_unofld(adapter);
2424 if (test_bit(OFFLOAD_DEVMAP_BIT,
2425 &adapter->open_device_map))
2426 offload_close(&adapter->tdev);
2429 t3_free_sge_resources(adapter);
2430 cxgb_disable_msi(adapter);
2432 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2433 if (adapter->dummy_netdev[i]) {
2434 free_netdev(adapter->dummy_netdev[i]);
2435 adapter->dummy_netdev[i] = NULL;
2438 for_each_port(adapter, i)
2439 if (adapter->port[i])
2440 free_netdev(adapter->port[i]);
2442 iounmap(adapter->regs);
2444 pci_release_regions(pdev);
2445 pci_disable_device(pdev);
2446 pci_set_drvdata(pdev, NULL);
2450 static struct pci_driver driver = {
2452 .id_table = cxgb3_pci_tbl,
2454 .remove = __devexit_p(remove_one),
2457 static int __init cxgb3_init_module(void)
2461 cxgb3_offload_init();
2463 ret = pci_register_driver(&driver);
2467 static void __exit cxgb3_cleanup_module(void)
2469 pci_unregister_driver(&driver);
2471 destroy_workqueue(cxgb3_wq);
2474 module_init(cxgb3_init_module);
2475 module_exit(cxgb3_cleanup_module);