cxgb3: simplify port type struct and usage
[linux-3.10.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 static void cxgb_set_rxmode(struct net_device *dev)
212 {
213         struct t3_rx_mode rm;
214         struct port_info *pi = netdev_priv(dev);
215
216         init_rx_mode(&rm, dev, dev->mc_list);
217         t3_mac_set_rx_mode(&pi->mac, &rm);
218 }
219
220 /**
221  *      link_start - enable a port
222  *      @dev: the device to enable
223  *
224  *      Performs the MAC and PHY actions needed to enable a port.
225  */
226 static void link_start(struct net_device *dev)
227 {
228         struct t3_rx_mode rm;
229         struct port_info *pi = netdev_priv(dev);
230         struct cmac *mac = &pi->mac;
231
232         init_rx_mode(&rm, dev, dev->mc_list);
233         t3_mac_reset(mac);
234         t3_mac_set_mtu(mac, dev->mtu);
235         t3_mac_set_address(mac, 0, dev->dev_addr);
236         t3_mac_set_rx_mode(mac, &rm);
237         t3_link_start(&pi->phy, mac, &pi->link_config);
238         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239 }
240
241 static inline void cxgb_disable_msi(struct adapter *adapter)
242 {
243         if (adapter->flags & USING_MSIX) {
244                 pci_disable_msix(adapter->pdev);
245                 adapter->flags &= ~USING_MSIX;
246         } else if (adapter->flags & USING_MSI) {
247                 pci_disable_msi(adapter->pdev);
248                 adapter->flags &= ~USING_MSI;
249         }
250 }
251
252 /*
253  * Interrupt handler for asynchronous events used with MSI-X.
254  */
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256 {
257         t3_slow_intr_handler(cookie);
258         return IRQ_HANDLED;
259 }
260
261 /*
262  * Name the MSI-X interrupts.
263  */
264 static void name_msix_vecs(struct adapter *adap)
265 {
266         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269         adap->msix_info[0].desc[n] = 0;
270
271         for_each_port(adap, j) {
272                 struct net_device *d = adap->port[j];
273                 const struct port_info *pi = netdev_priv(d);
274
275                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276                         snprintf(adap->msix_info[msi_idx].desc, n,
277                                  "%s-%d", d->name, pi->first_qset + i);
278                         adap->msix_info[msi_idx].desc[n] = 0;
279                 }
280         }
281 }
282
283 static int request_msix_data_irqs(struct adapter *adap)
284 {
285         int i, j, err, qidx = 0;
286
287         for_each_port(adap, i) {
288                 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290                 for (j = 0; j < nqsets; ++j) {
291                         err = request_irq(adap->msix_info[qidx + 1].vec,
292                                           t3_intr_handler(adap,
293                                                           adap->sge.qs[qidx].
294                                                           rspq.polling), 0,
295                                           adap->msix_info[qidx + 1].desc,
296                                           &adap->sge.qs[qidx]);
297                         if (err) {
298                                 while (--qidx >= 0)
299                                         free_irq(adap->msix_info[qidx + 1].vec,
300                                                  &adap->sge.qs[qidx]);
301                                 return err;
302                         }
303                         qidx++;
304                 }
305         }
306         return 0;
307 }
308
309 static void free_irq_resources(struct adapter *adapter)
310 {
311         if (adapter->flags & USING_MSIX) {
312                 int i, n = 0;
313
314                 free_irq(adapter->msix_info[0].vec, adapter);
315                 for_each_port(adapter, i)
316                     n += adap2pinfo(adapter, i)->nqsets;
317
318                 for (i = 0; i < n; ++i)
319                         free_irq(adapter->msix_info[i + 1].vec,
320                                  &adapter->sge.qs[i]);
321         } else
322                 free_irq(adapter->pdev->irq, adapter);
323 }
324
325 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
326                               unsigned long n)
327 {
328         int attempts = 5;
329
330         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
331                 if (!--attempts)
332                         return -ETIMEDOUT;
333                 msleep(10);
334         }
335         return 0;
336 }
337
338 static int init_tp_parity(struct adapter *adap)
339 {
340         int i;
341         struct sk_buff *skb;
342         struct cpl_set_tcb_field *greq;
343         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
344
345         t3_tp_set_offload_mode(adap, 1);
346
347         for (i = 0; i < 16; i++) {
348                 struct cpl_smt_write_req *req;
349
350                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
351                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
352                 memset(req, 0, sizeof(*req));
353                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
354                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
355                 req->iff = i;
356                 t3_mgmt_tx(adap, skb);
357         }
358
359         for (i = 0; i < 2048; i++) {
360                 struct cpl_l2t_write_req *req;
361
362                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
363                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
364                 memset(req, 0, sizeof(*req));
365                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
366                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
367                 req->params = htonl(V_L2T_W_IDX(i));
368                 t3_mgmt_tx(adap, skb);
369         }
370
371         for (i = 0; i < 2048; i++) {
372                 struct cpl_rte_write_req *req;
373
374                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
375                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
376                 memset(req, 0, sizeof(*req));
377                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
378                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
379                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
380                 t3_mgmt_tx(adap, skb);
381         }
382
383         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
384         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
385         memset(greq, 0, sizeof(*greq));
386         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
387         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
388         greq->mask = cpu_to_be64(1);
389         t3_mgmt_tx(adap, skb);
390
391         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
392         t3_tp_set_offload_mode(adap, 0);
393         return i;
394 }
395
396 /**
397  *      setup_rss - configure RSS
398  *      @adap: the adapter
399  *
400  *      Sets up RSS to distribute packets to multiple receive queues.  We
401  *      configure the RSS CPU lookup table to distribute to the number of HW
402  *      receive queues, and the response queue lookup table to narrow that
403  *      down to the response queues actually configured for each port.
404  *      We always configure the RSS mapping for two ports since the mapping
405  *      table has plenty of entries.
406  */
407 static void setup_rss(struct adapter *adap)
408 {
409         int i;
410         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
411         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
412         u8 cpus[SGE_QSETS + 1];
413         u16 rspq_map[RSS_TABLE_SIZE];
414
415         for (i = 0; i < SGE_QSETS; ++i)
416                 cpus[i] = i;
417         cpus[SGE_QSETS] = 0xff; /* terminator */
418
419         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
420                 rspq_map[i] = i % nq0;
421                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
422         }
423
424         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
425                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
426                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
427 }
428
429 static void init_napi(struct adapter *adap)
430 {
431         int i;
432
433         for (i = 0; i < SGE_QSETS; i++) {
434                 struct sge_qset *qs = &adap->sge.qs[i];
435
436                 if (qs->adap)
437                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
438                                        64);
439         }
440
441         /*
442          * netif_napi_add() can be called only once per napi_struct because it
443          * adds each new napi_struct to a list.  Be careful not to call it a
444          * second time, e.g., during EEH recovery, by making a note of it.
445          */
446         adap->flags |= NAPI_INIT;
447 }
448
449 /*
450  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
451  * both netdevices representing interfaces and the dummy ones for the extra
452  * queues.
453  */
454 static void quiesce_rx(struct adapter *adap)
455 {
456         int i;
457
458         for (i = 0; i < SGE_QSETS; i++)
459                 if (adap->sge.qs[i].adap)
460                         napi_disable(&adap->sge.qs[i].napi);
461 }
462
463 static void enable_all_napi(struct adapter *adap)
464 {
465         int i;
466         for (i = 0; i < SGE_QSETS; i++)
467                 if (adap->sge.qs[i].adap)
468                         napi_enable(&adap->sge.qs[i].napi);
469 }
470
471 /**
472  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
473  *      @adap: the adapter
474  *
475  *      Determines how many sets of SGE queues to use and initializes them.
476  *      We support multiple queue sets per port if we have MSI-X, otherwise
477  *      just one queue set per port.
478  */
479 static int setup_sge_qsets(struct adapter *adap)
480 {
481         int i, j, err, irq_idx = 0, qset_idx = 0;
482         unsigned int ntxq = SGE_TXQ_PER_SET;
483
484         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
485                 irq_idx = -1;
486
487         for_each_port(adap, i) {
488                 struct net_device *dev = adap->port[i];
489                 struct port_info *pi = netdev_priv(dev);
490
491                 pi->qs = &adap->sge.qs[pi->first_qset];
492                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
493                      ++j, ++qset_idx) {
494                         if (!pi->rx_csum_offload)
495                                 adap->params.sge.qset[qset_idx].lro = 0;
496                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
497                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
498                                                              irq_idx,
499                                 &adap->params.sge.qset[qset_idx], ntxq, dev);
500                         if (err) {
501                                 t3_stop_sge_timers(adap);
502                                 t3_free_sge_resources(adap);
503                                 return err;
504                         }
505                 }
506         }
507
508         return 0;
509 }
510
511 static ssize_t attr_show(struct device *d, char *buf,
512                          ssize_t(*format) (struct net_device *, char *))
513 {
514         ssize_t len;
515
516         /* Synchronize with ioctls that may shut down the device */
517         rtnl_lock();
518         len = (*format) (to_net_dev(d), buf);
519         rtnl_unlock();
520         return len;
521 }
522
523 static ssize_t attr_store(struct device *d,
524                           const char *buf, size_t len,
525                           ssize_t(*set) (struct net_device *, unsigned int),
526                           unsigned int min_val, unsigned int max_val)
527 {
528         char *endp;
529         ssize_t ret;
530         unsigned int val;
531
532         if (!capable(CAP_NET_ADMIN))
533                 return -EPERM;
534
535         val = simple_strtoul(buf, &endp, 0);
536         if (endp == buf || val < min_val || val > max_val)
537                 return -EINVAL;
538
539         rtnl_lock();
540         ret = (*set) (to_net_dev(d), val);
541         if (!ret)
542                 ret = len;
543         rtnl_unlock();
544         return ret;
545 }
546
547 #define CXGB3_SHOW(name, val_expr) \
548 static ssize_t format_##name(struct net_device *dev, char *buf) \
549 { \
550         struct port_info *pi = netdev_priv(dev); \
551         struct adapter *adap = pi->adapter; \
552         return sprintf(buf, "%u\n", val_expr); \
553 } \
554 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
555                            char *buf) \
556 { \
557         return attr_show(d, buf, format_##name); \
558 }
559
560 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
561 {
562         struct port_info *pi = netdev_priv(dev);
563         struct adapter *adap = pi->adapter;
564         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
565
566         if (adap->flags & FULL_INIT_DONE)
567                 return -EBUSY;
568         if (val && adap->params.rev == 0)
569                 return -EINVAL;
570         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
571             min_tids)
572                 return -EINVAL;
573         adap->params.mc5.nfilters = val;
574         return 0;
575 }
576
577 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
578                               const char *buf, size_t len)
579 {
580         return attr_store(d, buf, len, set_nfilters, 0, ~0);
581 }
582
583 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
584 {
585         struct port_info *pi = netdev_priv(dev);
586         struct adapter *adap = pi->adapter;
587
588         if (adap->flags & FULL_INIT_DONE)
589                 return -EBUSY;
590         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
591             MC5_MIN_TIDS)
592                 return -EINVAL;
593         adap->params.mc5.nservers = val;
594         return 0;
595 }
596
597 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
598                               const char *buf, size_t len)
599 {
600         return attr_store(d, buf, len, set_nservers, 0, ~0);
601 }
602
603 #define CXGB3_ATTR_R(name, val_expr) \
604 CXGB3_SHOW(name, val_expr) \
605 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
606
607 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
608 CXGB3_SHOW(name, val_expr) \
609 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
610
611 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
612 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
613 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
614
615 static struct attribute *cxgb3_attrs[] = {
616         &dev_attr_cam_size.attr,
617         &dev_attr_nfilters.attr,
618         &dev_attr_nservers.attr,
619         NULL
620 };
621
622 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
623
624 static ssize_t tm_attr_show(struct device *d,
625                             char *buf, int sched)
626 {
627         struct port_info *pi = netdev_priv(to_net_dev(d));
628         struct adapter *adap = pi->adapter;
629         unsigned int v, addr, bpt, cpt;
630         ssize_t len;
631
632         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
633         rtnl_lock();
634         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
635         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
636         if (sched & 1)
637                 v >>= 16;
638         bpt = (v >> 8) & 0xff;
639         cpt = v & 0xff;
640         if (!cpt)
641                 len = sprintf(buf, "disabled\n");
642         else {
643                 v = (adap->params.vpd.cclk * 1000) / cpt;
644                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
645         }
646         rtnl_unlock();
647         return len;
648 }
649
650 static ssize_t tm_attr_store(struct device *d,
651                              const char *buf, size_t len, int sched)
652 {
653         struct port_info *pi = netdev_priv(to_net_dev(d));
654         struct adapter *adap = pi->adapter;
655         unsigned int val;
656         char *endp;
657         ssize_t ret;
658
659         if (!capable(CAP_NET_ADMIN))
660                 return -EPERM;
661
662         val = simple_strtoul(buf, &endp, 0);
663         if (endp == buf || val > 10000000)
664                 return -EINVAL;
665
666         rtnl_lock();
667         ret = t3_config_sched(adap, val, sched);
668         if (!ret)
669                 ret = len;
670         rtnl_unlock();
671         return ret;
672 }
673
674 #define TM_ATTR(name, sched) \
675 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
676                            char *buf) \
677 { \
678         return tm_attr_show(d, buf, sched); \
679 } \
680 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
681                             const char *buf, size_t len) \
682 { \
683         return tm_attr_store(d, buf, len, sched); \
684 } \
685 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
686
687 TM_ATTR(sched0, 0);
688 TM_ATTR(sched1, 1);
689 TM_ATTR(sched2, 2);
690 TM_ATTR(sched3, 3);
691 TM_ATTR(sched4, 4);
692 TM_ATTR(sched5, 5);
693 TM_ATTR(sched6, 6);
694 TM_ATTR(sched7, 7);
695
696 static struct attribute *offload_attrs[] = {
697         &dev_attr_sched0.attr,
698         &dev_attr_sched1.attr,
699         &dev_attr_sched2.attr,
700         &dev_attr_sched3.attr,
701         &dev_attr_sched4.attr,
702         &dev_attr_sched5.attr,
703         &dev_attr_sched6.attr,
704         &dev_attr_sched7.attr,
705         NULL
706 };
707
708 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
709
710 /*
711  * Sends an sk_buff to an offload queue driver
712  * after dealing with any active network taps.
713  */
714 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
715 {
716         int ret;
717
718         local_bh_disable();
719         ret = t3_offload_tx(tdev, skb);
720         local_bh_enable();
721         return ret;
722 }
723
724 static int write_smt_entry(struct adapter *adapter, int idx)
725 {
726         struct cpl_smt_write_req *req;
727         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
728
729         if (!skb)
730                 return -ENOMEM;
731
732         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
733         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
734         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
735         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
736         req->iff = idx;
737         memset(req->src_mac1, 0, sizeof(req->src_mac1));
738         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
739         skb->priority = 1;
740         offload_tx(&adapter->tdev, skb);
741         return 0;
742 }
743
744 static int init_smt(struct adapter *adapter)
745 {
746         int i;
747
748         for_each_port(adapter, i)
749             write_smt_entry(adapter, i);
750         return 0;
751 }
752
753 static void init_port_mtus(struct adapter *adapter)
754 {
755         unsigned int mtus = adapter->port[0]->mtu;
756
757         if (adapter->port[1])
758                 mtus |= adapter->port[1]->mtu << 16;
759         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
760 }
761
762 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
763                               int hi, int port)
764 {
765         struct sk_buff *skb;
766         struct mngt_pktsched_wr *req;
767         int ret;
768
769         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
770         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
771         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
772         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
773         req->sched = sched;
774         req->idx = qidx;
775         req->min = lo;
776         req->max = hi;
777         req->binding = port;
778         ret = t3_mgmt_tx(adap, skb);
779
780         return ret;
781 }
782
783 static int bind_qsets(struct adapter *adap)
784 {
785         int i, j, err = 0;
786
787         for_each_port(adap, i) {
788                 const struct port_info *pi = adap2pinfo(adap, i);
789
790                 for (j = 0; j < pi->nqsets; ++j) {
791                         int ret = send_pktsched_cmd(adap, 1,
792                                                     pi->first_qset + j, -1,
793                                                     -1, i);
794                         if (ret)
795                                 err = ret;
796                 }
797         }
798
799         return err;
800 }
801
802 #define FW_FNAME "t3fw-%d.%d.%d.bin"
803 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
804
805 static int upgrade_fw(struct adapter *adap)
806 {
807         int ret;
808         char buf[64];
809         const struct firmware *fw;
810         struct device *dev = &adap->pdev->dev;
811
812         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
813                  FW_VERSION_MINOR, FW_VERSION_MICRO);
814         ret = request_firmware(&fw, buf, dev);
815         if (ret < 0) {
816                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
817                         buf);
818                 return ret;
819         }
820         ret = t3_load_fw(adap, fw->data, fw->size);
821         release_firmware(fw);
822
823         if (ret == 0)
824                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
825                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
826         else
827                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
828                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
829
830         return ret;
831 }
832
833 static inline char t3rev2char(struct adapter *adapter)
834 {
835         char rev = 0;
836
837         switch(adapter->params.rev) {
838         case T3_REV_B:
839         case T3_REV_B2:
840                 rev = 'b';
841                 break;
842         case T3_REV_C:
843                 rev = 'c';
844                 break;
845         }
846         return rev;
847 }
848
849 static int update_tpsram(struct adapter *adap)
850 {
851         const struct firmware *tpsram;
852         char buf[64];
853         struct device *dev = &adap->pdev->dev;
854         int ret;
855         char rev;
856
857         rev = t3rev2char(adap);
858         if (!rev)
859                 return 0;
860
861         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
862                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
863
864         ret = request_firmware(&tpsram, buf, dev);
865         if (ret < 0) {
866                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
867                         buf);
868                 return ret;
869         }
870
871         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
872         if (ret)
873                 goto release_tpsram;
874
875         ret = t3_set_proto_sram(adap, tpsram->data);
876         if (ret == 0)
877                 dev_info(dev,
878                          "successful update of protocol engine "
879                          "to %d.%d.%d\n",
880                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
881         else
882                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
883                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
884         if (ret)
885                 dev_err(dev, "loading protocol SRAM failed\n");
886
887 release_tpsram:
888         release_firmware(tpsram);
889
890         return ret;
891 }
892
893 /**
894  *      cxgb_up - enable the adapter
895  *      @adapter: adapter being enabled
896  *
897  *      Called when the first port is enabled, this function performs the
898  *      actions necessary to make an adapter operational, such as completing
899  *      the initialization of HW modules, and enabling interrupts.
900  *
901  *      Must be called with the rtnl lock held.
902  */
903 static int cxgb_up(struct adapter *adap)
904 {
905         int err;
906         int must_load;
907
908         if (!(adap->flags & FULL_INIT_DONE)) {
909                 err = t3_check_fw_version(adap, &must_load);
910                 if (err == -EINVAL) {
911                         err = upgrade_fw(adap);
912                         if (err && must_load)
913                                 goto out;
914                 }
915
916                 err = t3_check_tpsram_version(adap, &must_load);
917                 if (err == -EINVAL) {
918                         err = update_tpsram(adap);
919                         if (err && must_load)
920                                 goto out;
921                 }
922
923                 /*
924                  * Clear interrupts now to catch errors if t3_init_hw fails.
925                  * We clear them again later as initialization may trigger
926                  * conditions that can interrupt.
927                  */
928                 t3_intr_clear(adap);
929
930                 err = t3_init_hw(adap, 0);
931                 if (err)
932                         goto out;
933
934                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
935                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
936
937                 err = setup_sge_qsets(adap);
938                 if (err)
939                         goto out;
940
941                 setup_rss(adap);
942                 if (!(adap->flags & NAPI_INIT))
943                         init_napi(adap);
944                 adap->flags |= FULL_INIT_DONE;
945         }
946
947         t3_intr_clear(adap);
948
949         if (adap->flags & USING_MSIX) {
950                 name_msix_vecs(adap);
951                 err = request_irq(adap->msix_info[0].vec,
952                                   t3_async_intr_handler, 0,
953                                   adap->msix_info[0].desc, adap);
954                 if (err)
955                         goto irq_err;
956
957                 err = request_msix_data_irqs(adap);
958                 if (err) {
959                         free_irq(adap->msix_info[0].vec, adap);
960                         goto irq_err;
961                 }
962         } else if ((err = request_irq(adap->pdev->irq,
963                                       t3_intr_handler(adap,
964                                                       adap->sge.qs[0].rspq.
965                                                       polling),
966                                       (adap->flags & USING_MSI) ?
967                                        0 : IRQF_SHARED,
968                                       adap->name, adap)))
969                 goto irq_err;
970
971         enable_all_napi(adap);
972         t3_sge_start(adap);
973         t3_intr_enable(adap);
974
975         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
976             is_offload(adap) && init_tp_parity(adap) == 0)
977                 adap->flags |= TP_PARITY_INIT;
978
979         if (adap->flags & TP_PARITY_INIT) {
980                 t3_write_reg(adap, A_TP_INT_CAUSE,
981                              F_CMCACHEPERR | F_ARPLUTPERR);
982                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
983         }
984
985         if (!(adap->flags & QUEUES_BOUND)) {
986                 err = bind_qsets(adap);
987                 if (err) {
988                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
989                         t3_intr_disable(adap);
990                         free_irq_resources(adap);
991                         goto out;
992                 }
993                 adap->flags |= QUEUES_BOUND;
994         }
995
996 out:
997         return err;
998 irq_err:
999         CH_ERR(adap, "request_irq failed, err %d\n", err);
1000         goto out;
1001 }
1002
1003 /*
1004  * Release resources when all the ports and offloading have been stopped.
1005  */
1006 static void cxgb_down(struct adapter *adapter)
1007 {
1008         t3_sge_stop(adapter);
1009         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1010         t3_intr_disable(adapter);
1011         spin_unlock_irq(&adapter->work_lock);
1012
1013         free_irq_resources(adapter);
1014         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1015         quiesce_rx(adapter);
1016 }
1017
1018 static void schedule_chk_task(struct adapter *adap)
1019 {
1020         unsigned int timeo;
1021
1022         timeo = adap->params.linkpoll_period ?
1023             (HZ * adap->params.linkpoll_period) / 10 :
1024             adap->params.stats_update_period * HZ;
1025         if (timeo)
1026                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1027 }
1028
1029 static int offload_open(struct net_device *dev)
1030 {
1031         struct port_info *pi = netdev_priv(dev);
1032         struct adapter *adapter = pi->adapter;
1033         struct t3cdev *tdev = dev2t3cdev(dev);
1034         int adap_up = adapter->open_device_map & PORT_MASK;
1035         int err;
1036
1037         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1038                 return 0;
1039
1040         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1041                 goto out;
1042
1043         t3_tp_set_offload_mode(adapter, 1);
1044         tdev->lldev = adapter->port[0];
1045         err = cxgb3_offload_activate(adapter);
1046         if (err)
1047                 goto out;
1048
1049         init_port_mtus(adapter);
1050         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1051                      adapter->params.b_wnd,
1052                      adapter->params.rev == 0 ?
1053                      adapter->port[0]->mtu : 0xffff);
1054         init_smt(adapter);
1055
1056         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1057                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1058
1059         /* Call back all registered clients */
1060         cxgb3_add_clients(tdev);
1061
1062 out:
1063         /* restore them in case the offload module has changed them */
1064         if (err) {
1065                 t3_tp_set_offload_mode(adapter, 0);
1066                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1067                 cxgb3_set_dummy_ops(tdev);
1068         }
1069         return err;
1070 }
1071
1072 static int offload_close(struct t3cdev *tdev)
1073 {
1074         struct adapter *adapter = tdev2adap(tdev);
1075
1076         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1077                 return 0;
1078
1079         /* Call back all registered clients */
1080         cxgb3_remove_clients(tdev);
1081
1082         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1083
1084         tdev->lldev = NULL;
1085         cxgb3_set_dummy_ops(tdev);
1086         t3_tp_set_offload_mode(adapter, 0);
1087         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1088
1089         if (!adapter->open_device_map)
1090                 cxgb_down(adapter);
1091
1092         cxgb3_offload_deactivate(adapter);
1093         return 0;
1094 }
1095
1096 static int cxgb_open(struct net_device *dev)
1097 {
1098         struct port_info *pi = netdev_priv(dev);
1099         struct adapter *adapter = pi->adapter;
1100         int other_ports = adapter->open_device_map & PORT_MASK;
1101         int err;
1102
1103         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1104                 return err;
1105
1106         set_bit(pi->port_id, &adapter->open_device_map);
1107         if (is_offload(adapter) && !ofld_disable) {
1108                 err = offload_open(dev);
1109                 if (err)
1110                         printk(KERN_WARNING
1111                                "Could not initialize offload capabilities\n");
1112         }
1113
1114         link_start(dev);
1115         t3_port_intr_enable(adapter, pi->port_id);
1116         netif_start_queue(dev);
1117         if (!other_ports)
1118                 schedule_chk_task(adapter);
1119
1120         return 0;
1121 }
1122
1123 static int cxgb_close(struct net_device *dev)
1124 {
1125         struct port_info *pi = netdev_priv(dev);
1126         struct adapter *adapter = pi->adapter;
1127
1128         t3_port_intr_disable(adapter, pi->port_id);
1129         netif_stop_queue(dev);
1130         pi->phy.ops->power_down(&pi->phy, 1);
1131         netif_carrier_off(dev);
1132         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1133
1134         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1135         clear_bit(pi->port_id, &adapter->open_device_map);
1136         spin_unlock_irq(&adapter->work_lock);
1137
1138         if (!(adapter->open_device_map & PORT_MASK))
1139                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1140                                                   &adapter->adap_check_task);
1141
1142         if (!adapter->open_device_map)
1143                 cxgb_down(adapter);
1144
1145         return 0;
1146 }
1147
1148 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1149 {
1150         struct port_info *pi = netdev_priv(dev);
1151         struct adapter *adapter = pi->adapter;
1152         struct net_device_stats *ns = &pi->netstats;
1153         const struct mac_stats *pstats;
1154
1155         spin_lock(&adapter->stats_lock);
1156         pstats = t3_mac_update_stats(&pi->mac);
1157         spin_unlock(&adapter->stats_lock);
1158
1159         ns->tx_bytes = pstats->tx_octets;
1160         ns->tx_packets = pstats->tx_frames;
1161         ns->rx_bytes = pstats->rx_octets;
1162         ns->rx_packets = pstats->rx_frames;
1163         ns->multicast = pstats->rx_mcast_frames;
1164
1165         ns->tx_errors = pstats->tx_underrun;
1166         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1167             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1168             pstats->rx_fifo_ovfl;
1169
1170         /* detailed rx_errors */
1171         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1172         ns->rx_over_errors = 0;
1173         ns->rx_crc_errors = pstats->rx_fcs_errs;
1174         ns->rx_frame_errors = pstats->rx_symbol_errs;
1175         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1176         ns->rx_missed_errors = pstats->rx_cong_drops;
1177
1178         /* detailed tx_errors */
1179         ns->tx_aborted_errors = 0;
1180         ns->tx_carrier_errors = 0;
1181         ns->tx_fifo_errors = pstats->tx_underrun;
1182         ns->tx_heartbeat_errors = 0;
1183         ns->tx_window_errors = 0;
1184         return ns;
1185 }
1186
1187 static u32 get_msglevel(struct net_device *dev)
1188 {
1189         struct port_info *pi = netdev_priv(dev);
1190         struct adapter *adapter = pi->adapter;
1191
1192         return adapter->msg_enable;
1193 }
1194
1195 static void set_msglevel(struct net_device *dev, u32 val)
1196 {
1197         struct port_info *pi = netdev_priv(dev);
1198         struct adapter *adapter = pi->adapter;
1199
1200         adapter->msg_enable = val;
1201 }
1202
1203 static char stats_strings[][ETH_GSTRING_LEN] = {
1204         "TxOctetsOK         ",
1205         "TxFramesOK         ",
1206         "TxMulticastFramesOK",
1207         "TxBroadcastFramesOK",
1208         "TxPauseFrames      ",
1209         "TxUnderrun         ",
1210         "TxExtUnderrun      ",
1211
1212         "TxFrames64         ",
1213         "TxFrames65To127    ",
1214         "TxFrames128To255   ",
1215         "TxFrames256To511   ",
1216         "TxFrames512To1023  ",
1217         "TxFrames1024To1518 ",
1218         "TxFrames1519ToMax  ",
1219
1220         "RxOctetsOK         ",
1221         "RxFramesOK         ",
1222         "RxMulticastFramesOK",
1223         "RxBroadcastFramesOK",
1224         "RxPauseFrames      ",
1225         "RxFCSErrors        ",
1226         "RxSymbolErrors     ",
1227         "RxShortErrors      ",
1228         "RxJabberErrors     ",
1229         "RxLengthErrors     ",
1230         "RxFIFOoverflow     ",
1231
1232         "RxFrames64         ",
1233         "RxFrames65To127    ",
1234         "RxFrames128To255   ",
1235         "RxFrames256To511   ",
1236         "RxFrames512To1023  ",
1237         "RxFrames1024To1518 ",
1238         "RxFrames1519ToMax  ",
1239
1240         "PhyFIFOErrors      ",
1241         "TSO                ",
1242         "VLANextractions    ",
1243         "VLANinsertions     ",
1244         "TxCsumOffload      ",
1245         "RxCsumGood         ",
1246         "LroAggregated      ",
1247         "LroFlushed         ",
1248         "LroNoDesc          ",
1249         "RxDrops            ",
1250
1251         "CheckTXEnToggled   ",
1252         "CheckResets        ",
1253
1254 };
1255
1256 static int get_sset_count(struct net_device *dev, int sset)
1257 {
1258         switch (sset) {
1259         case ETH_SS_STATS:
1260                 return ARRAY_SIZE(stats_strings);
1261         default:
1262                 return -EOPNOTSUPP;
1263         }
1264 }
1265
1266 #define T3_REGMAP_SIZE (3 * 1024)
1267
1268 static int get_regs_len(struct net_device *dev)
1269 {
1270         return T3_REGMAP_SIZE;
1271 }
1272
1273 static int get_eeprom_len(struct net_device *dev)
1274 {
1275         return EEPROMSIZE;
1276 }
1277
1278 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1279 {
1280         struct port_info *pi = netdev_priv(dev);
1281         struct adapter *adapter = pi->adapter;
1282         u32 fw_vers = 0;
1283         u32 tp_vers = 0;
1284
1285         t3_get_fw_version(adapter, &fw_vers);
1286         t3_get_tp_version(adapter, &tp_vers);
1287
1288         strcpy(info->driver, DRV_NAME);
1289         strcpy(info->version, DRV_VERSION);
1290         strcpy(info->bus_info, pci_name(adapter->pdev));
1291         if (!fw_vers)
1292                 strcpy(info->fw_version, "N/A");
1293         else {
1294                 snprintf(info->fw_version, sizeof(info->fw_version),
1295                          "%s %u.%u.%u TP %u.%u.%u",
1296                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1297                          G_FW_VERSION_MAJOR(fw_vers),
1298                          G_FW_VERSION_MINOR(fw_vers),
1299                          G_FW_VERSION_MICRO(fw_vers),
1300                          G_TP_VERSION_MAJOR(tp_vers),
1301                          G_TP_VERSION_MINOR(tp_vers),
1302                          G_TP_VERSION_MICRO(tp_vers));
1303         }
1304 }
1305
1306 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1307 {
1308         if (stringset == ETH_SS_STATS)
1309                 memcpy(data, stats_strings, sizeof(stats_strings));
1310 }
1311
1312 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1313                                             struct port_info *p, int idx)
1314 {
1315         int i;
1316         unsigned long tot = 0;
1317
1318         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1319                 tot += adapter->sge.qs[i].port_stats[idx];
1320         return tot;
1321 }
1322
1323 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1324                       u64 *data)
1325 {
1326         struct port_info *pi = netdev_priv(dev);
1327         struct adapter *adapter = pi->adapter;
1328         const struct mac_stats *s;
1329
1330         spin_lock(&adapter->stats_lock);
1331         s = t3_mac_update_stats(&pi->mac);
1332         spin_unlock(&adapter->stats_lock);
1333
1334         *data++ = s->tx_octets;
1335         *data++ = s->tx_frames;
1336         *data++ = s->tx_mcast_frames;
1337         *data++ = s->tx_bcast_frames;
1338         *data++ = s->tx_pause;
1339         *data++ = s->tx_underrun;
1340         *data++ = s->tx_fifo_urun;
1341
1342         *data++ = s->tx_frames_64;
1343         *data++ = s->tx_frames_65_127;
1344         *data++ = s->tx_frames_128_255;
1345         *data++ = s->tx_frames_256_511;
1346         *data++ = s->tx_frames_512_1023;
1347         *data++ = s->tx_frames_1024_1518;
1348         *data++ = s->tx_frames_1519_max;
1349
1350         *data++ = s->rx_octets;
1351         *data++ = s->rx_frames;
1352         *data++ = s->rx_mcast_frames;
1353         *data++ = s->rx_bcast_frames;
1354         *data++ = s->rx_pause;
1355         *data++ = s->rx_fcs_errs;
1356         *data++ = s->rx_symbol_errs;
1357         *data++ = s->rx_short;
1358         *data++ = s->rx_jabber;
1359         *data++ = s->rx_too_long;
1360         *data++ = s->rx_fifo_ovfl;
1361
1362         *data++ = s->rx_frames_64;
1363         *data++ = s->rx_frames_65_127;
1364         *data++ = s->rx_frames_128_255;
1365         *data++ = s->rx_frames_256_511;
1366         *data++ = s->rx_frames_512_1023;
1367         *data++ = s->rx_frames_1024_1518;
1368         *data++ = s->rx_frames_1519_max;
1369
1370         *data++ = pi->phy.fifo_errors;
1371
1372         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1373         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1374         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1375         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1376         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1377         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1378         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1379         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1380         *data++ = s->rx_cong_drops;
1381
1382         *data++ = s->num_toggled;
1383         *data++ = s->num_resets;
1384 }
1385
1386 static inline void reg_block_dump(struct adapter *ap, void *buf,
1387                                   unsigned int start, unsigned int end)
1388 {
1389         u32 *p = buf + start;
1390
1391         for (; start <= end; start += sizeof(u32))
1392                 *p++ = t3_read_reg(ap, start);
1393 }
1394
1395 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1396                      void *buf)
1397 {
1398         struct port_info *pi = netdev_priv(dev);
1399         struct adapter *ap = pi->adapter;
1400
1401         /*
1402          * Version scheme:
1403          * bits 0..9: chip version
1404          * bits 10..15: chip revision
1405          * bit 31: set for PCIe cards
1406          */
1407         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1408
1409         /*
1410          * We skip the MAC statistics registers because they are clear-on-read.
1411          * Also reading multi-register stats would need to synchronize with the
1412          * periodic mac stats accumulation.  Hard to justify the complexity.
1413          */
1414         memset(buf, 0, T3_REGMAP_SIZE);
1415         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1416         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1417         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1418         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1419         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1420         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1421                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1422         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1423                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1424 }
1425
1426 static int restart_autoneg(struct net_device *dev)
1427 {
1428         struct port_info *p = netdev_priv(dev);
1429
1430         if (!netif_running(dev))
1431                 return -EAGAIN;
1432         if (p->link_config.autoneg != AUTONEG_ENABLE)
1433                 return -EINVAL;
1434         p->phy.ops->autoneg_restart(&p->phy);
1435         return 0;
1436 }
1437
1438 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1439 {
1440         struct port_info *pi = netdev_priv(dev);
1441         struct adapter *adapter = pi->adapter;
1442         int i;
1443
1444         if (data == 0)
1445                 data = 2;
1446
1447         for (i = 0; i < data * 2; i++) {
1448                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1449                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1450                 if (msleep_interruptible(500))
1451                         break;
1452         }
1453         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1454                          F_GPIO0_OUT_VAL);
1455         return 0;
1456 }
1457
1458 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1459 {
1460         struct port_info *p = netdev_priv(dev);
1461
1462         cmd->supported = p->link_config.supported;
1463         cmd->advertising = p->link_config.advertising;
1464
1465         if (netif_carrier_ok(dev)) {
1466                 cmd->speed = p->link_config.speed;
1467                 cmd->duplex = p->link_config.duplex;
1468         } else {
1469                 cmd->speed = -1;
1470                 cmd->duplex = -1;
1471         }
1472
1473         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1474         cmd->phy_address = p->phy.addr;
1475         cmd->transceiver = XCVR_EXTERNAL;
1476         cmd->autoneg = p->link_config.autoneg;
1477         cmd->maxtxpkt = 0;
1478         cmd->maxrxpkt = 0;
1479         return 0;
1480 }
1481
1482 static int speed_duplex_to_caps(int speed, int duplex)
1483 {
1484         int cap = 0;
1485
1486         switch (speed) {
1487         case SPEED_10:
1488                 if (duplex == DUPLEX_FULL)
1489                         cap = SUPPORTED_10baseT_Full;
1490                 else
1491                         cap = SUPPORTED_10baseT_Half;
1492                 break;
1493         case SPEED_100:
1494                 if (duplex == DUPLEX_FULL)
1495                         cap = SUPPORTED_100baseT_Full;
1496                 else
1497                         cap = SUPPORTED_100baseT_Half;
1498                 break;
1499         case SPEED_1000:
1500                 if (duplex == DUPLEX_FULL)
1501                         cap = SUPPORTED_1000baseT_Full;
1502                 else
1503                         cap = SUPPORTED_1000baseT_Half;
1504                 break;
1505         case SPEED_10000:
1506                 if (duplex == DUPLEX_FULL)
1507                         cap = SUPPORTED_10000baseT_Full;
1508         }
1509         return cap;
1510 }
1511
1512 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1513                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1514                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1515                       ADVERTISED_10000baseT_Full)
1516
1517 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1518 {
1519         struct port_info *p = netdev_priv(dev);
1520         struct link_config *lc = &p->link_config;
1521
1522         if (!(lc->supported & SUPPORTED_Autoneg))
1523                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1524
1525         if (cmd->autoneg == AUTONEG_DISABLE) {
1526                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1527
1528                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1529                         return -EINVAL;
1530                 lc->requested_speed = cmd->speed;
1531                 lc->requested_duplex = cmd->duplex;
1532                 lc->advertising = 0;
1533         } else {
1534                 cmd->advertising &= ADVERTISED_MASK;
1535                 cmd->advertising &= lc->supported;
1536                 if (!cmd->advertising)
1537                         return -EINVAL;
1538                 lc->requested_speed = SPEED_INVALID;
1539                 lc->requested_duplex = DUPLEX_INVALID;
1540                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1541         }
1542         lc->autoneg = cmd->autoneg;
1543         if (netif_running(dev))
1544                 t3_link_start(&p->phy, &p->mac, lc);
1545         return 0;
1546 }
1547
1548 static void get_pauseparam(struct net_device *dev,
1549                            struct ethtool_pauseparam *epause)
1550 {
1551         struct port_info *p = netdev_priv(dev);
1552
1553         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1554         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1555         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1556 }
1557
1558 static int set_pauseparam(struct net_device *dev,
1559                           struct ethtool_pauseparam *epause)
1560 {
1561         struct port_info *p = netdev_priv(dev);
1562         struct link_config *lc = &p->link_config;
1563
1564         if (epause->autoneg == AUTONEG_DISABLE)
1565                 lc->requested_fc = 0;
1566         else if (lc->supported & SUPPORTED_Autoneg)
1567                 lc->requested_fc = PAUSE_AUTONEG;
1568         else
1569                 return -EINVAL;
1570
1571         if (epause->rx_pause)
1572                 lc->requested_fc |= PAUSE_RX;
1573         if (epause->tx_pause)
1574                 lc->requested_fc |= PAUSE_TX;
1575         if (lc->autoneg == AUTONEG_ENABLE) {
1576                 if (netif_running(dev))
1577                         t3_link_start(&p->phy, &p->mac, lc);
1578         } else {
1579                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1580                 if (netif_running(dev))
1581                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1582         }
1583         return 0;
1584 }
1585
1586 static u32 get_rx_csum(struct net_device *dev)
1587 {
1588         struct port_info *p = netdev_priv(dev);
1589
1590         return p->rx_csum_offload;
1591 }
1592
1593 static int set_rx_csum(struct net_device *dev, u32 data)
1594 {
1595         struct port_info *p = netdev_priv(dev);
1596
1597         p->rx_csum_offload = data;
1598         if (!data) {
1599                 struct adapter *adap = p->adapter;
1600                 int i;
1601
1602                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1603                         adap->params.sge.qset[i].lro = 0;
1604                         adap->sge.qs[i].lro_enabled = 0;
1605                 }
1606         }
1607         return 0;
1608 }
1609
1610 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1611 {
1612         struct port_info *pi = netdev_priv(dev);
1613         struct adapter *adapter = pi->adapter;
1614         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1615
1616         e->rx_max_pending = MAX_RX_BUFFERS;
1617         e->rx_mini_max_pending = 0;
1618         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1619         e->tx_max_pending = MAX_TXQ_ENTRIES;
1620
1621         e->rx_pending = q->fl_size;
1622         e->rx_mini_pending = q->rspq_size;
1623         e->rx_jumbo_pending = q->jumbo_size;
1624         e->tx_pending = q->txq_size[0];
1625 }
1626
1627 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1628 {
1629         struct port_info *pi = netdev_priv(dev);
1630         struct adapter *adapter = pi->adapter;
1631         struct qset_params *q;
1632         int i;
1633
1634         if (e->rx_pending > MAX_RX_BUFFERS ||
1635             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1636             e->tx_pending > MAX_TXQ_ENTRIES ||
1637             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1638             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1639             e->rx_pending < MIN_FL_ENTRIES ||
1640             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1641             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1642                 return -EINVAL;
1643
1644         if (adapter->flags & FULL_INIT_DONE)
1645                 return -EBUSY;
1646
1647         q = &adapter->params.sge.qset[pi->first_qset];
1648         for (i = 0; i < pi->nqsets; ++i, ++q) {
1649                 q->rspq_size = e->rx_mini_pending;
1650                 q->fl_size = e->rx_pending;
1651                 q->jumbo_size = e->rx_jumbo_pending;
1652                 q->txq_size[0] = e->tx_pending;
1653                 q->txq_size[1] = e->tx_pending;
1654                 q->txq_size[2] = e->tx_pending;
1655         }
1656         return 0;
1657 }
1658
1659 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1660 {
1661         struct port_info *pi = netdev_priv(dev);
1662         struct adapter *adapter = pi->adapter;
1663         struct qset_params *qsp = &adapter->params.sge.qset[0];
1664         struct sge_qset *qs = &adapter->sge.qs[0];
1665
1666         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1667                 return -EINVAL;
1668
1669         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1670         t3_update_qset_coalesce(qs, qsp);
1671         return 0;
1672 }
1673
1674 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1675 {
1676         struct port_info *pi = netdev_priv(dev);
1677         struct adapter *adapter = pi->adapter;
1678         struct qset_params *q = adapter->params.sge.qset;
1679
1680         c->rx_coalesce_usecs = q->coalesce_usecs;
1681         return 0;
1682 }
1683
1684 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1685                       u8 * data)
1686 {
1687         struct port_info *pi = netdev_priv(dev);
1688         struct adapter *adapter = pi->adapter;
1689         int i, err = 0;
1690
1691         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1692         if (!buf)
1693                 return -ENOMEM;
1694
1695         e->magic = EEPROM_MAGIC;
1696         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1697                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1698
1699         if (!err)
1700                 memcpy(data, buf + e->offset, e->len);
1701         kfree(buf);
1702         return err;
1703 }
1704
1705 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1706                       u8 * data)
1707 {
1708         struct port_info *pi = netdev_priv(dev);
1709         struct adapter *adapter = pi->adapter;
1710         u32 aligned_offset, aligned_len;
1711         __le32 *p;
1712         u8 *buf;
1713         int err;
1714
1715         if (eeprom->magic != EEPROM_MAGIC)
1716                 return -EINVAL;
1717
1718         aligned_offset = eeprom->offset & ~3;
1719         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1720
1721         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1722                 buf = kmalloc(aligned_len, GFP_KERNEL);
1723                 if (!buf)
1724                         return -ENOMEM;
1725                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1726                 if (!err && aligned_len > 4)
1727                         err = t3_seeprom_read(adapter,
1728                                               aligned_offset + aligned_len - 4,
1729                                               (__le32 *) & buf[aligned_len - 4]);
1730                 if (err)
1731                         goto out;
1732                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1733         } else
1734                 buf = data;
1735
1736         err = t3_seeprom_wp(adapter, 0);
1737         if (err)
1738                 goto out;
1739
1740         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1741                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1742                 aligned_offset += 4;
1743         }
1744
1745         if (!err)
1746                 err = t3_seeprom_wp(adapter, 1);
1747 out:
1748         if (buf != data)
1749                 kfree(buf);
1750         return err;
1751 }
1752
1753 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1754 {
1755         wol->supported = 0;
1756         wol->wolopts = 0;
1757         memset(&wol->sopass, 0, sizeof(wol->sopass));
1758 }
1759
1760 static const struct ethtool_ops cxgb_ethtool_ops = {
1761         .get_settings = get_settings,
1762         .set_settings = set_settings,
1763         .get_drvinfo = get_drvinfo,
1764         .get_msglevel = get_msglevel,
1765         .set_msglevel = set_msglevel,
1766         .get_ringparam = get_sge_param,
1767         .set_ringparam = set_sge_param,
1768         .get_coalesce = get_coalesce,
1769         .set_coalesce = set_coalesce,
1770         .get_eeprom_len = get_eeprom_len,
1771         .get_eeprom = get_eeprom,
1772         .set_eeprom = set_eeprom,
1773         .get_pauseparam = get_pauseparam,
1774         .set_pauseparam = set_pauseparam,
1775         .get_rx_csum = get_rx_csum,
1776         .set_rx_csum = set_rx_csum,
1777         .set_tx_csum = ethtool_op_set_tx_csum,
1778         .set_sg = ethtool_op_set_sg,
1779         .get_link = ethtool_op_get_link,
1780         .get_strings = get_strings,
1781         .phys_id = cxgb3_phys_id,
1782         .nway_reset = restart_autoneg,
1783         .get_sset_count = get_sset_count,
1784         .get_ethtool_stats = get_stats,
1785         .get_regs_len = get_regs_len,
1786         .get_regs = get_regs,
1787         .get_wol = get_wol,
1788         .set_tso = ethtool_op_set_tso,
1789 };
1790
1791 static int in_range(int val, int lo, int hi)
1792 {
1793         return val < 0 || (val <= hi && val >= lo);
1794 }
1795
1796 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1797 {
1798         struct port_info *pi = netdev_priv(dev);
1799         struct adapter *adapter = pi->adapter;
1800         u32 cmd;
1801         int ret;
1802
1803         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1804                 return -EFAULT;
1805
1806         switch (cmd) {
1807         case CHELSIO_SET_QSET_PARAMS:{
1808                 int i;
1809                 struct qset_params *q;
1810                 struct ch_qset_params t;
1811                 int q1 = pi->first_qset;
1812                 int nqsets = pi->nqsets;
1813
1814                 if (!capable(CAP_NET_ADMIN))
1815                         return -EPERM;
1816                 if (copy_from_user(&t, useraddr, sizeof(t)))
1817                         return -EFAULT;
1818                 if (t.qset_idx >= SGE_QSETS)
1819                         return -EINVAL;
1820                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1821                         !in_range(t.cong_thres, 0, 255) ||
1822                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1823                                 MAX_TXQ_ENTRIES) ||
1824                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1825                                 MAX_TXQ_ENTRIES) ||
1826                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1827                                 MAX_CTRL_TXQ_ENTRIES) ||
1828                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1829                                 MAX_RX_BUFFERS)
1830                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1831                                         MAX_RX_JUMBO_BUFFERS)
1832                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1833                                         MAX_RSPQ_ENTRIES))
1834                         return -EINVAL;
1835
1836                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1837                         for_each_port(adapter, i) {
1838                                 pi = adap2pinfo(adapter, i);
1839                                 if (t.qset_idx >= pi->first_qset &&
1840                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1841                                     !pi->rx_csum_offload)
1842                                         return -EINVAL;
1843                         }
1844
1845                 if ((adapter->flags & FULL_INIT_DONE) &&
1846                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1847                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1848                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1849                         t.polling >= 0 || t.cong_thres >= 0))
1850                         return -EBUSY;
1851
1852                 /* Allow setting of any available qset when offload enabled */
1853                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1854                         q1 = 0;
1855                         for_each_port(adapter, i) {
1856                                 pi = adap2pinfo(adapter, i);
1857                                 nqsets += pi->first_qset + pi->nqsets;
1858                         }
1859                 }
1860
1861                 if (t.qset_idx < q1)
1862                         return -EINVAL;
1863                 if (t.qset_idx > q1 + nqsets - 1)
1864                         return -EINVAL;
1865
1866                 q = &adapter->params.sge.qset[t.qset_idx];
1867
1868                 if (t.rspq_size >= 0)
1869                         q->rspq_size = t.rspq_size;
1870                 if (t.fl_size[0] >= 0)
1871                         q->fl_size = t.fl_size[0];
1872                 if (t.fl_size[1] >= 0)
1873                         q->jumbo_size = t.fl_size[1];
1874                 if (t.txq_size[0] >= 0)
1875                         q->txq_size[0] = t.txq_size[0];
1876                 if (t.txq_size[1] >= 0)
1877                         q->txq_size[1] = t.txq_size[1];
1878                 if (t.txq_size[2] >= 0)
1879                         q->txq_size[2] = t.txq_size[2];
1880                 if (t.cong_thres >= 0)
1881                         q->cong_thres = t.cong_thres;
1882                 if (t.intr_lat >= 0) {
1883                         struct sge_qset *qs =
1884                                 &adapter->sge.qs[t.qset_idx];
1885
1886                         q->coalesce_usecs = t.intr_lat;
1887                         t3_update_qset_coalesce(qs, q);
1888                 }
1889                 if (t.polling >= 0) {
1890                         if (adapter->flags & USING_MSIX)
1891                                 q->polling = t.polling;
1892                         else {
1893                                 /* No polling with INTx for T3A */
1894                                 if (adapter->params.rev == 0 &&
1895                                         !(adapter->flags & USING_MSI))
1896                                         t.polling = 0;
1897
1898                                 for (i = 0; i < SGE_QSETS; i++) {
1899                                         q = &adapter->params.sge.
1900                                                 qset[i];
1901                                         q->polling = t.polling;
1902                                 }
1903                         }
1904                 }
1905                 if (t.lro >= 0) {
1906                         struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1907                         q->lro = t.lro;
1908                         qs->lro_enabled = t.lro;
1909                 }
1910                 break;
1911         }
1912         case CHELSIO_GET_QSET_PARAMS:{
1913                 struct qset_params *q;
1914                 struct ch_qset_params t;
1915                 int q1 = pi->first_qset;
1916                 int nqsets = pi->nqsets;
1917                 int i;
1918
1919                 if (copy_from_user(&t, useraddr, sizeof(t)))
1920                         return -EFAULT;
1921
1922                 /* Display qsets for all ports when offload enabled */
1923                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1924                         q1 = 0;
1925                         for_each_port(adapter, i) {
1926                                 pi = adap2pinfo(adapter, i);
1927                                 nqsets = pi->first_qset + pi->nqsets;
1928                         }
1929                 }
1930
1931                 if (t.qset_idx >= nqsets)
1932                         return -EINVAL;
1933
1934                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
1935                 t.rspq_size = q->rspq_size;
1936                 t.txq_size[0] = q->txq_size[0];
1937                 t.txq_size[1] = q->txq_size[1];
1938                 t.txq_size[2] = q->txq_size[2];
1939                 t.fl_size[0] = q->fl_size;
1940                 t.fl_size[1] = q->jumbo_size;
1941                 t.polling = q->polling;
1942                 t.lro = q->lro;
1943                 t.intr_lat = q->coalesce_usecs;
1944                 t.cong_thres = q->cong_thres;
1945                 t.qnum = q1;
1946
1947                 if (adapter->flags & USING_MSIX)
1948                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
1949                 else
1950                         t.vector = adapter->pdev->irq;
1951
1952                 if (copy_to_user(useraddr, &t, sizeof(t)))
1953                         return -EFAULT;
1954                 break;
1955         }
1956         case CHELSIO_SET_QSET_NUM:{
1957                 struct ch_reg edata;
1958                 unsigned int i, first_qset = 0, other_qsets = 0;
1959
1960                 if (!capable(CAP_NET_ADMIN))
1961                         return -EPERM;
1962                 if (adapter->flags & FULL_INIT_DONE)
1963                         return -EBUSY;
1964                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1965                         return -EFAULT;
1966                 if (edata.val < 1 ||
1967                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1968                         return -EINVAL;
1969
1970                 for_each_port(adapter, i)
1971                         if (adapter->port[i] && adapter->port[i] != dev)
1972                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1973
1974                 if (edata.val + other_qsets > SGE_QSETS)
1975                         return -EINVAL;
1976
1977                 pi->nqsets = edata.val;
1978
1979                 for_each_port(adapter, i)
1980                         if (adapter->port[i]) {
1981                                 pi = adap2pinfo(adapter, i);
1982                                 pi->first_qset = first_qset;
1983                                 first_qset += pi->nqsets;
1984                         }
1985                 break;
1986         }
1987         case CHELSIO_GET_QSET_NUM:{
1988                 struct ch_reg edata;
1989
1990                 edata.cmd = CHELSIO_GET_QSET_NUM;
1991                 edata.val = pi->nqsets;
1992                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1993                         return -EFAULT;
1994                 break;
1995         }
1996         case CHELSIO_LOAD_FW:{
1997                 u8 *fw_data;
1998                 struct ch_mem_range t;
1999
2000                 if (!capable(CAP_SYS_RAWIO))
2001                         return -EPERM;
2002                 if (copy_from_user(&t, useraddr, sizeof(t)))
2003                         return -EFAULT;
2004                 /* Check t.len sanity ? */
2005                 fw_data = kmalloc(t.len, GFP_KERNEL);
2006                 if (!fw_data)
2007                         return -ENOMEM;
2008
2009                 if (copy_from_user
2010                         (fw_data, useraddr + sizeof(t), t.len)) {
2011                         kfree(fw_data);
2012                         return -EFAULT;
2013                 }
2014
2015                 ret = t3_load_fw(adapter, fw_data, t.len);
2016                 kfree(fw_data);
2017                 if (ret)
2018                         return ret;
2019                 break;
2020         }
2021         case CHELSIO_SETMTUTAB:{
2022                 struct ch_mtus m;
2023                 int i;
2024
2025                 if (!is_offload(adapter))
2026                         return -EOPNOTSUPP;
2027                 if (!capable(CAP_NET_ADMIN))
2028                         return -EPERM;
2029                 if (offload_running(adapter))
2030                         return -EBUSY;
2031                 if (copy_from_user(&m, useraddr, sizeof(m)))
2032                         return -EFAULT;
2033                 if (m.nmtus != NMTUS)
2034                         return -EINVAL;
2035                 if (m.mtus[0] < 81)     /* accommodate SACK */
2036                         return -EINVAL;
2037
2038                 /* MTUs must be in ascending order */
2039                 for (i = 1; i < NMTUS; ++i)
2040                         if (m.mtus[i] < m.mtus[i - 1])
2041                                 return -EINVAL;
2042
2043                 memcpy(adapter->params.mtus, m.mtus,
2044                         sizeof(adapter->params.mtus));
2045                 break;
2046         }
2047         case CHELSIO_GET_PM:{
2048                 struct tp_params *p = &adapter->params.tp;
2049                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2050
2051                 if (!is_offload(adapter))
2052                         return -EOPNOTSUPP;
2053                 m.tx_pg_sz = p->tx_pg_size;
2054                 m.tx_num_pg = p->tx_num_pgs;
2055                 m.rx_pg_sz = p->rx_pg_size;
2056                 m.rx_num_pg = p->rx_num_pgs;
2057                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2058                 if (copy_to_user(useraddr, &m, sizeof(m)))
2059                         return -EFAULT;
2060                 break;
2061         }
2062         case CHELSIO_SET_PM:{
2063                 struct ch_pm m;
2064                 struct tp_params *p = &adapter->params.tp;
2065
2066                 if (!is_offload(adapter))
2067                         return -EOPNOTSUPP;
2068                 if (!capable(CAP_NET_ADMIN))
2069                         return -EPERM;
2070                 if (adapter->flags & FULL_INIT_DONE)
2071                         return -EBUSY;
2072                 if (copy_from_user(&m, useraddr, sizeof(m)))
2073                         return -EFAULT;
2074                 if (!is_power_of_2(m.rx_pg_sz) ||
2075                         !is_power_of_2(m.tx_pg_sz))
2076                         return -EINVAL; /* not power of 2 */
2077                 if (!(m.rx_pg_sz & 0x14000))
2078                         return -EINVAL; /* not 16KB or 64KB */
2079                 if (!(m.tx_pg_sz & 0x1554000))
2080                         return -EINVAL;
2081                 if (m.tx_num_pg == -1)
2082                         m.tx_num_pg = p->tx_num_pgs;
2083                 if (m.rx_num_pg == -1)
2084                         m.rx_num_pg = p->rx_num_pgs;
2085                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2086                         return -EINVAL;
2087                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2088                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2089                         return -EINVAL;
2090                 p->rx_pg_size = m.rx_pg_sz;
2091                 p->tx_pg_size = m.tx_pg_sz;
2092                 p->rx_num_pgs = m.rx_num_pg;
2093                 p->tx_num_pgs = m.tx_num_pg;
2094                 break;
2095         }
2096         case CHELSIO_GET_MEM:{
2097                 struct ch_mem_range t;
2098                 struct mc7 *mem;
2099                 u64 buf[32];
2100
2101                 if (!is_offload(adapter))
2102                         return -EOPNOTSUPP;
2103                 if (!(adapter->flags & FULL_INIT_DONE))
2104                         return -EIO;    /* need the memory controllers */
2105                 if (copy_from_user(&t, useraddr, sizeof(t)))
2106                         return -EFAULT;
2107                 if ((t.addr & 7) || (t.len & 7))
2108                         return -EINVAL;
2109                 if (t.mem_id == MEM_CM)
2110                         mem = &adapter->cm;
2111                 else if (t.mem_id == MEM_PMRX)
2112                         mem = &adapter->pmrx;
2113                 else if (t.mem_id == MEM_PMTX)
2114                         mem = &adapter->pmtx;
2115                 else
2116                         return -EINVAL;
2117
2118                 /*
2119                  * Version scheme:
2120                  * bits 0..9: chip version
2121                  * bits 10..15: chip revision
2122                  */
2123                 t.version = 3 | (adapter->params.rev << 10);
2124                 if (copy_to_user(useraddr, &t, sizeof(t)))
2125                         return -EFAULT;
2126
2127                 /*
2128                  * Read 256 bytes at a time as len can be large and we don't
2129                  * want to use huge intermediate buffers.
2130                  */
2131                 useraddr += sizeof(t);  /* advance to start of buffer */
2132                 while (t.len) {
2133                         unsigned int chunk =
2134                                 min_t(unsigned int, t.len, sizeof(buf));
2135
2136                         ret =
2137                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2138                                                 buf);
2139                         if (ret)
2140                                 return ret;
2141                         if (copy_to_user(useraddr, buf, chunk))
2142                                 return -EFAULT;
2143                         useraddr += chunk;
2144                         t.addr += chunk;
2145                         t.len -= chunk;
2146                 }
2147                 break;
2148         }
2149         case CHELSIO_SET_TRACE_FILTER:{
2150                 struct ch_trace t;
2151                 const struct trace_params *tp;
2152
2153                 if (!capable(CAP_NET_ADMIN))
2154                         return -EPERM;
2155                 if (!offload_running(adapter))
2156                         return -EAGAIN;
2157                 if (copy_from_user(&t, useraddr, sizeof(t)))
2158                         return -EFAULT;
2159
2160                 tp = (const struct trace_params *)&t.sip;
2161                 if (t.config_tx)
2162                         t3_config_trace_filter(adapter, tp, 0,
2163                                                 t.invert_match,
2164                                                 t.trace_tx);
2165                 if (t.config_rx)
2166                         t3_config_trace_filter(adapter, tp, 1,
2167                                                 t.invert_match,
2168                                                 t.trace_rx);
2169                 break;
2170         }
2171         default:
2172                 return -EOPNOTSUPP;
2173         }
2174         return 0;
2175 }
2176
2177 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2178 {
2179         struct mii_ioctl_data *data = if_mii(req);
2180         struct port_info *pi = netdev_priv(dev);
2181         struct adapter *adapter = pi->adapter;
2182         int ret, mmd;
2183
2184         switch (cmd) {
2185         case SIOCGMIIPHY:
2186                 data->phy_id = pi->phy.addr;
2187                 /* FALLTHRU */
2188         case SIOCGMIIREG:{
2189                 u32 val;
2190                 struct cphy *phy = &pi->phy;
2191
2192                 if (!phy->mdio_read)
2193                         return -EOPNOTSUPP;
2194                 if (is_10G(adapter)) {
2195                         mmd = data->phy_id >> 8;
2196                         if (!mmd)
2197                                 mmd = MDIO_DEV_PCS;
2198                         else if (mmd > MDIO_DEV_XGXS)
2199                                 return -EINVAL;
2200
2201                         ret =
2202                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2203                                                 mmd, data->reg_num, &val);
2204                 } else
2205                         ret =
2206                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2207                                                 0, data->reg_num & 0x1f,
2208                                                 &val);
2209                 if (!ret)
2210                         data->val_out = val;
2211                 break;
2212         }
2213         case SIOCSMIIREG:{
2214                 struct cphy *phy = &pi->phy;
2215
2216                 if (!capable(CAP_NET_ADMIN))
2217                         return -EPERM;
2218                 if (!phy->mdio_write)
2219                         return -EOPNOTSUPP;
2220                 if (is_10G(adapter)) {
2221                         mmd = data->phy_id >> 8;
2222                         if (!mmd)
2223                                 mmd = MDIO_DEV_PCS;
2224                         else if (mmd > MDIO_DEV_XGXS)
2225                                 return -EINVAL;
2226
2227                         ret =
2228                                 phy->mdio_write(adapter,
2229                                                 data->phy_id & 0x1f, mmd,
2230                                                 data->reg_num,
2231                                                 data->val_in);
2232                 } else
2233                         ret =
2234                                 phy->mdio_write(adapter,
2235                                                 data->phy_id & 0x1f, 0,
2236                                                 data->reg_num & 0x1f,
2237                                                 data->val_in);
2238                 break;
2239         }
2240         case SIOCCHIOCTL:
2241                 return cxgb_extension_ioctl(dev, req->ifr_data);
2242         default:
2243                 return -EOPNOTSUPP;
2244         }
2245         return ret;
2246 }
2247
2248 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2249 {
2250         struct port_info *pi = netdev_priv(dev);
2251         struct adapter *adapter = pi->adapter;
2252         int ret;
2253
2254         if (new_mtu < 81)       /* accommodate SACK */
2255                 return -EINVAL;
2256         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2257                 return ret;
2258         dev->mtu = new_mtu;
2259         init_port_mtus(adapter);
2260         if (adapter->params.rev == 0 && offload_running(adapter))
2261                 t3_load_mtus(adapter, adapter->params.mtus,
2262                              adapter->params.a_wnd, adapter->params.b_wnd,
2263                              adapter->port[0]->mtu);
2264         return 0;
2265 }
2266
2267 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2268 {
2269         struct port_info *pi = netdev_priv(dev);
2270         struct adapter *adapter = pi->adapter;
2271         struct sockaddr *addr = p;
2272
2273         if (!is_valid_ether_addr(addr->sa_data))
2274                 return -EINVAL;
2275
2276         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2277         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2278         if (offload_running(adapter))
2279                 write_smt_entry(adapter, pi->port_id);
2280         return 0;
2281 }
2282
2283 /**
2284  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2285  * @adap: the adapter
2286  * @p: the port
2287  *
2288  * Ensures that current Rx processing on any of the queues associated with
2289  * the given port completes before returning.  We do this by acquiring and
2290  * releasing the locks of the response queues associated with the port.
2291  */
2292 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2293 {
2294         int i;
2295
2296         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2297                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2298
2299                 spin_lock_irq(&q->lock);
2300                 spin_unlock_irq(&q->lock);
2301         }
2302 }
2303
2304 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2305 {
2306         struct port_info *pi = netdev_priv(dev);
2307         struct adapter *adapter = pi->adapter;
2308
2309         pi->vlan_grp = grp;
2310         if (adapter->params.rev > 0)
2311                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2312         else {
2313                 /* single control for all ports */
2314                 unsigned int i, have_vlans = 0;
2315                 for_each_port(adapter, i)
2316                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2317
2318                 t3_set_vlan_accel(adapter, 1, have_vlans);
2319         }
2320         t3_synchronize_rx(adapter, pi);
2321 }
2322
2323 #ifdef CONFIG_NET_POLL_CONTROLLER
2324 static void cxgb_netpoll(struct net_device *dev)
2325 {
2326         struct port_info *pi = netdev_priv(dev);
2327         struct adapter *adapter = pi->adapter;
2328         int qidx;
2329
2330         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2331                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2332                 void *source;
2333
2334                 if (adapter->flags & USING_MSIX)
2335                         source = qs;
2336                 else
2337                         source = adapter;
2338
2339                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2340         }
2341 }
2342 #endif
2343
2344 /*
2345  * Periodic accumulation of MAC statistics.
2346  */
2347 static void mac_stats_update(struct adapter *adapter)
2348 {
2349         int i;
2350
2351         for_each_port(adapter, i) {
2352                 struct net_device *dev = adapter->port[i];
2353                 struct port_info *p = netdev_priv(dev);
2354
2355                 if (netif_running(dev)) {
2356                         spin_lock(&adapter->stats_lock);
2357                         t3_mac_update_stats(&p->mac);
2358                         spin_unlock(&adapter->stats_lock);
2359                 }
2360         }
2361 }
2362
2363 static void check_link_status(struct adapter *adapter)
2364 {
2365         int i;
2366
2367         for_each_port(adapter, i) {
2368                 struct net_device *dev = adapter->port[i];
2369                 struct port_info *p = netdev_priv(dev);
2370
2371                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2372                         t3_link_changed(adapter, i);
2373         }
2374 }
2375
2376 static void check_t3b2_mac(struct adapter *adapter)
2377 {
2378         int i;
2379
2380         if (!rtnl_trylock())    /* synchronize with ifdown */
2381                 return;
2382
2383         for_each_port(adapter, i) {
2384                 struct net_device *dev = adapter->port[i];
2385                 struct port_info *p = netdev_priv(dev);
2386                 int status;
2387
2388                 if (!netif_running(dev))
2389                         continue;
2390
2391                 status = 0;
2392                 if (netif_running(dev) && netif_carrier_ok(dev))
2393                         status = t3b2_mac_watchdog_task(&p->mac);
2394                 if (status == 1)
2395                         p->mac.stats.num_toggled++;
2396                 else if (status == 2) {
2397                         struct cmac *mac = &p->mac;
2398
2399                         t3_mac_set_mtu(mac, dev->mtu);
2400                         t3_mac_set_address(mac, 0, dev->dev_addr);
2401                         cxgb_set_rxmode(dev);
2402                         t3_link_start(&p->phy, mac, &p->link_config);
2403                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2404                         t3_port_intr_enable(adapter, p->port_id);
2405                         p->mac.stats.num_resets++;
2406                 }
2407         }
2408         rtnl_unlock();
2409 }
2410
2411
2412 static void t3_adap_check_task(struct work_struct *work)
2413 {
2414         struct adapter *adapter = container_of(work, struct adapter,
2415                                                adap_check_task.work);
2416         const struct adapter_params *p = &adapter->params;
2417
2418         adapter->check_task_cnt++;
2419
2420         /* Check link status for PHYs without interrupts */
2421         if (p->linkpoll_period)
2422                 check_link_status(adapter);
2423
2424         /* Accumulate MAC stats if needed */
2425         if (!p->linkpoll_period ||
2426             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2427             p->stats_update_period) {
2428                 mac_stats_update(adapter);
2429                 adapter->check_task_cnt = 0;
2430         }
2431
2432         if (p->rev == T3_REV_B2)
2433                 check_t3b2_mac(adapter);
2434
2435         /* Schedule the next check update if any port is active. */
2436         spin_lock_irq(&adapter->work_lock);
2437         if (adapter->open_device_map & PORT_MASK)
2438                 schedule_chk_task(adapter);
2439         spin_unlock_irq(&adapter->work_lock);
2440 }
2441
2442 /*
2443  * Processes external (PHY) interrupts in process context.
2444  */
2445 static void ext_intr_task(struct work_struct *work)
2446 {
2447         struct adapter *adapter = container_of(work, struct adapter,
2448                                                ext_intr_handler_task);
2449
2450         t3_phy_intr_handler(adapter);
2451
2452         /* Now reenable external interrupts */
2453         spin_lock_irq(&adapter->work_lock);
2454         if (adapter->slow_intr_mask) {
2455                 adapter->slow_intr_mask |= F_T3DBG;
2456                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2457                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2458                              adapter->slow_intr_mask);
2459         }
2460         spin_unlock_irq(&adapter->work_lock);
2461 }
2462
2463 /*
2464  * Interrupt-context handler for external (PHY) interrupts.
2465  */
2466 void t3_os_ext_intr_handler(struct adapter *adapter)
2467 {
2468         /*
2469          * Schedule a task to handle external interrupts as they may be slow
2470          * and we use a mutex to protect MDIO registers.  We disable PHY
2471          * interrupts in the meantime and let the task reenable them when
2472          * it's done.
2473          */
2474         spin_lock(&adapter->work_lock);
2475         if (adapter->slow_intr_mask) {
2476                 adapter->slow_intr_mask &= ~F_T3DBG;
2477                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2478                              adapter->slow_intr_mask);
2479                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2480         }
2481         spin_unlock(&adapter->work_lock);
2482 }
2483
2484 static int t3_adapter_error(struct adapter *adapter, int reset)
2485 {
2486         int i, ret = 0;
2487
2488         /* Stop all ports */
2489         for_each_port(adapter, i) {
2490                 struct net_device *netdev = adapter->port[i];
2491
2492                 if (netif_running(netdev))
2493                         cxgb_close(netdev);
2494         }
2495
2496         if (is_offload(adapter) &&
2497             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2498                 offload_close(&adapter->tdev);
2499
2500         /* Stop SGE timers */
2501         t3_stop_sge_timers(adapter);
2502
2503         adapter->flags &= ~FULL_INIT_DONE;
2504
2505         if (reset)
2506                 ret = t3_reset_adapter(adapter);
2507
2508         pci_disable_device(adapter->pdev);
2509
2510         return ret;
2511 }
2512
2513 static int t3_reenable_adapter(struct adapter *adapter)
2514 {
2515         if (pci_enable_device(adapter->pdev)) {
2516                 dev_err(&adapter->pdev->dev,
2517                         "Cannot re-enable PCI device after reset.\n");
2518                 goto err;
2519         }
2520         pci_set_master(adapter->pdev);
2521         pci_restore_state(adapter->pdev);
2522
2523         /* Free sge resources */
2524         t3_free_sge_resources(adapter);
2525
2526         if (t3_replay_prep_adapter(adapter))
2527                 goto err;
2528
2529         return 0;
2530 err:
2531         return -1;
2532 }
2533
2534 static void t3_resume_ports(struct adapter *adapter)
2535 {
2536         int i;
2537
2538         /* Restart the ports */
2539         for_each_port(adapter, i) {
2540                 struct net_device *netdev = adapter->port[i];
2541
2542                 if (netif_running(netdev)) {
2543                         if (cxgb_open(netdev)) {
2544                                 dev_err(&adapter->pdev->dev,
2545                                         "can't bring device back up"
2546                                         " after reset\n");
2547                                 continue;
2548                         }
2549                 }
2550         }
2551 }
2552
2553 /*
2554  * processes a fatal error.
2555  * Bring the ports down, reset the chip, bring the ports back up.
2556  */
2557 static void fatal_error_task(struct work_struct *work)
2558 {
2559         struct adapter *adapter = container_of(work, struct adapter,
2560                                                fatal_error_handler_task);
2561         int err = 0;
2562
2563         rtnl_lock();
2564         err = t3_adapter_error(adapter, 1);
2565         if (!err)
2566                 err = t3_reenable_adapter(adapter);
2567         if (!err)
2568                 t3_resume_ports(adapter);
2569
2570         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2571         rtnl_unlock();
2572 }
2573
2574 void t3_fatal_err(struct adapter *adapter)
2575 {
2576         unsigned int fw_status[4];
2577
2578         if (adapter->flags & FULL_INIT_DONE) {
2579                 t3_sge_stop(adapter);
2580                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2581                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2582                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2583                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2584
2585                 spin_lock(&adapter->work_lock);
2586                 t3_intr_disable(adapter);
2587                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2588                 spin_unlock(&adapter->work_lock);
2589         }
2590         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2591         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2592                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2593                          fw_status[0], fw_status[1],
2594                          fw_status[2], fw_status[3]);
2595
2596 }
2597
2598 /**
2599  * t3_io_error_detected - called when PCI error is detected
2600  * @pdev: Pointer to PCI device
2601  * @state: The current pci connection state
2602  *
2603  * This function is called after a PCI bus error affecting
2604  * this device has been detected.
2605  */
2606 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2607                                              pci_channel_state_t state)
2608 {
2609         struct adapter *adapter = pci_get_drvdata(pdev);
2610         int ret;
2611
2612         ret = t3_adapter_error(adapter, 0);
2613
2614         /* Request a slot reset. */
2615         return PCI_ERS_RESULT_NEED_RESET;
2616 }
2617
2618 /**
2619  * t3_io_slot_reset - called after the pci bus has been reset.
2620  * @pdev: Pointer to PCI device
2621  *
2622  * Restart the card from scratch, as if from a cold-boot.
2623  */
2624 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2625 {
2626         struct adapter *adapter = pci_get_drvdata(pdev);
2627
2628         if (!t3_reenable_adapter(adapter))
2629                 return PCI_ERS_RESULT_RECOVERED;
2630
2631         return PCI_ERS_RESULT_DISCONNECT;
2632 }
2633
2634 /**
2635  * t3_io_resume - called when traffic can start flowing again.
2636  * @pdev: Pointer to PCI device
2637  *
2638  * This callback is called when the error recovery driver tells us that
2639  * its OK to resume normal operation.
2640  */
2641 static void t3_io_resume(struct pci_dev *pdev)
2642 {
2643         struct adapter *adapter = pci_get_drvdata(pdev);
2644
2645         t3_resume_ports(adapter);
2646 }
2647
2648 static struct pci_error_handlers t3_err_handler = {
2649         .error_detected = t3_io_error_detected,
2650         .slot_reset = t3_io_slot_reset,
2651         .resume = t3_io_resume,
2652 };
2653
2654 /*
2655  * Set the number of qsets based on the number of CPUs and the number of ports,
2656  * not to exceed the number of available qsets, assuming there are enough qsets
2657  * per port in HW.
2658  */
2659 static void set_nqsets(struct adapter *adap)
2660 {
2661         int i, j = 0;
2662         int num_cpus = num_online_cpus();
2663         int hwports = adap->params.nports;
2664         int nqsets = SGE_QSETS;
2665
2666         if (adap->params.rev > 0) {
2667                 if (hwports == 2 &&
2668                     (hwports * nqsets > SGE_QSETS ||
2669                      num_cpus >= nqsets / hwports))
2670                         nqsets /= hwports;
2671                 if (nqsets > num_cpus)
2672                         nqsets = num_cpus;
2673                 if (nqsets < 1 || hwports == 4)
2674                         nqsets = 1;
2675         } else
2676                 nqsets = 1;
2677
2678         for_each_port(adap, i) {
2679                 struct port_info *pi = adap2pinfo(adap, i);
2680
2681                 pi->first_qset = j;
2682                 pi->nqsets = nqsets;
2683                 j = pi->first_qset + nqsets;
2684
2685                 dev_info(&adap->pdev->dev,
2686                          "Port %d using %d queue sets.\n", i, nqsets);
2687         }
2688 }
2689
2690 static int __devinit cxgb_enable_msix(struct adapter *adap)
2691 {
2692         struct msix_entry entries[SGE_QSETS + 1];
2693         int i, err;
2694
2695         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2696                 entries[i].entry = i;
2697
2698         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2699         if (!err) {
2700                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2701                         adap->msix_info[i].vec = entries[i].vector;
2702         } else if (err > 0)
2703                 dev_info(&adap->pdev->dev,
2704                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2705         return err;
2706 }
2707
2708 static void __devinit print_port_info(struct adapter *adap,
2709                                       const struct adapter_info *ai)
2710 {
2711         static const char *pci_variant[] = {
2712                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2713         };
2714
2715         int i;
2716         char buf[80];
2717
2718         if (is_pcie(adap))
2719                 snprintf(buf, sizeof(buf), "%s x%d",
2720                          pci_variant[adap->params.pci.variant],
2721                          adap->params.pci.width);
2722         else
2723                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2724                          pci_variant[adap->params.pci.variant],
2725                          adap->params.pci.speed, adap->params.pci.width);
2726
2727         for_each_port(adap, i) {
2728                 struct net_device *dev = adap->port[i];
2729                 const struct port_info *pi = netdev_priv(dev);
2730
2731                 if (!test_bit(i, &adap->registered_device_map))
2732                         continue;
2733                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2734                        dev->name, ai->desc, pi->phy.desc,
2735                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2736                        (adap->flags & USING_MSIX) ? " MSI-X" :
2737                        (adap->flags & USING_MSI) ? " MSI" : "");
2738                 if (adap->name == dev->name && adap->params.vpd.mclk)
2739                         printk(KERN_INFO
2740                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2741                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2742                                t3_mc7_size(&adap->pmtx) >> 20,
2743                                t3_mc7_size(&adap->pmrx) >> 20,
2744                                adap->params.vpd.sn);
2745         }
2746 }
2747
2748 static int __devinit init_one(struct pci_dev *pdev,
2749                               const struct pci_device_id *ent)
2750 {
2751         static int version_printed;
2752
2753         int i, err, pci_using_dac = 0;
2754         unsigned long mmio_start, mmio_len;
2755         const struct adapter_info *ai;
2756         struct adapter *adapter = NULL;
2757         struct port_info *pi;
2758
2759         if (!version_printed) {
2760                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2761                 ++version_printed;
2762         }
2763
2764         if (!cxgb3_wq) {
2765                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2766                 if (!cxgb3_wq) {
2767                         printk(KERN_ERR DRV_NAME
2768                                ": cannot initialize work queue\n");
2769                         return -ENOMEM;
2770                 }
2771         }
2772
2773         err = pci_request_regions(pdev, DRV_NAME);
2774         if (err) {
2775                 /* Just info, some other driver may have claimed the device. */
2776                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2777                 return err;
2778         }
2779
2780         err = pci_enable_device(pdev);
2781         if (err) {
2782                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2783                 goto out_release_regions;
2784         }
2785
2786         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2787                 pci_using_dac = 1;
2788                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2789                 if (err) {
2790                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2791                                "coherent allocations\n");
2792                         goto out_disable_device;
2793                 }
2794         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2795                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2796                 goto out_disable_device;
2797         }
2798
2799         pci_set_master(pdev);
2800         pci_save_state(pdev);
2801
2802         mmio_start = pci_resource_start(pdev, 0);
2803         mmio_len = pci_resource_len(pdev, 0);
2804         ai = t3_get_adapter_info(ent->driver_data);
2805
2806         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2807         if (!adapter) {
2808                 err = -ENOMEM;
2809                 goto out_disable_device;
2810         }
2811
2812         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2813         if (!adapter->regs) {
2814                 dev_err(&pdev->dev, "cannot map device registers\n");
2815                 err = -ENOMEM;
2816                 goto out_free_adapter;
2817         }
2818
2819         adapter->pdev = pdev;
2820         adapter->name = pci_name(pdev);
2821         adapter->msg_enable = dflt_msg_enable;
2822         adapter->mmio_len = mmio_len;
2823
2824         mutex_init(&adapter->mdio_lock);
2825         spin_lock_init(&adapter->work_lock);
2826         spin_lock_init(&adapter->stats_lock);
2827
2828         INIT_LIST_HEAD(&adapter->adapter_list);
2829         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2830         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2831         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2832
2833         for (i = 0; i < ai->nports; ++i) {
2834                 struct net_device *netdev;
2835
2836                 netdev = alloc_etherdev(sizeof(struct port_info));
2837                 if (!netdev) {
2838                         err = -ENOMEM;
2839                         goto out_free_dev;
2840                 }
2841
2842                 SET_NETDEV_DEV(netdev, &pdev->dev);
2843
2844                 adapter->port[i] = netdev;
2845                 pi = netdev_priv(netdev);
2846                 pi->adapter = adapter;
2847                 pi->rx_csum_offload = 1;
2848                 pi->port_id = i;
2849                 netif_carrier_off(netdev);
2850                 netdev->irq = pdev->irq;
2851                 netdev->mem_start = mmio_start;
2852                 netdev->mem_end = mmio_start + mmio_len - 1;
2853                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2854                 netdev->features |= NETIF_F_LLTX;
2855                 if (pci_using_dac)
2856                         netdev->features |= NETIF_F_HIGHDMA;
2857
2858                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2859                 netdev->vlan_rx_register = vlan_rx_register;
2860
2861                 netdev->open = cxgb_open;
2862                 netdev->stop = cxgb_close;
2863                 netdev->hard_start_xmit = t3_eth_xmit;
2864                 netdev->get_stats = cxgb_get_stats;
2865                 netdev->set_multicast_list = cxgb_set_rxmode;
2866                 netdev->do_ioctl = cxgb_ioctl;
2867                 netdev->change_mtu = cxgb_change_mtu;
2868                 netdev->set_mac_address = cxgb_set_mac_addr;
2869 #ifdef CONFIG_NET_POLL_CONTROLLER
2870                 netdev->poll_controller = cxgb_netpoll;
2871 #endif
2872
2873                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2874         }
2875
2876         pci_set_drvdata(pdev, adapter);
2877         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2878                 err = -ENODEV;
2879                 goto out_free_dev;
2880         }
2881
2882         /*
2883          * The card is now ready to go.  If any errors occur during device
2884          * registration we do not fail the whole card but rather proceed only
2885          * with the ports we manage to register successfully.  However we must
2886          * register at least one net device.
2887          */
2888         for_each_port(adapter, i) {
2889                 err = register_netdev(adapter->port[i]);
2890                 if (err)
2891                         dev_warn(&pdev->dev,
2892                                  "cannot register net device %s, skipping\n",
2893                                  adapter->port[i]->name);
2894                 else {
2895                         /*
2896                          * Change the name we use for messages to the name of
2897                          * the first successfully registered interface.
2898                          */
2899                         if (!adapter->registered_device_map)
2900                                 adapter->name = adapter->port[i]->name;
2901
2902                         __set_bit(i, &adapter->registered_device_map);
2903                 }
2904         }
2905         if (!adapter->registered_device_map) {
2906                 dev_err(&pdev->dev, "could not register any net devices\n");
2907                 goto out_free_dev;
2908         }
2909
2910         /* Driver's ready. Reflect it on LEDs */
2911         t3_led_ready(adapter);
2912
2913         if (is_offload(adapter)) {
2914                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2915                 cxgb3_adapter_ofld(adapter);
2916         }
2917
2918         /* See what interrupts we'll be using */
2919         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2920                 adapter->flags |= USING_MSIX;
2921         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2922                 adapter->flags |= USING_MSI;
2923
2924         set_nqsets(adapter);
2925
2926         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2927                                  &cxgb3_attr_group);
2928
2929         print_port_info(adapter, ai);
2930         return 0;
2931
2932 out_free_dev:
2933         iounmap(adapter->regs);
2934         for (i = ai->nports - 1; i >= 0; --i)
2935                 if (adapter->port[i])
2936                         free_netdev(adapter->port[i]);
2937
2938 out_free_adapter:
2939         kfree(adapter);
2940
2941 out_disable_device:
2942         pci_disable_device(pdev);
2943 out_release_regions:
2944         pci_release_regions(pdev);
2945         pci_set_drvdata(pdev, NULL);
2946         return err;
2947 }
2948
2949 static void __devexit remove_one(struct pci_dev *pdev)
2950 {
2951         struct adapter *adapter = pci_get_drvdata(pdev);
2952
2953         if (adapter) {
2954                 int i;
2955
2956                 t3_sge_stop(adapter);
2957                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2958                                    &cxgb3_attr_group);
2959
2960                 if (is_offload(adapter)) {
2961                         cxgb3_adapter_unofld(adapter);
2962                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2963                                      &adapter->open_device_map))
2964                                 offload_close(&adapter->tdev);
2965                 }
2966
2967                 for_each_port(adapter, i)
2968                     if (test_bit(i, &adapter->registered_device_map))
2969                         unregister_netdev(adapter->port[i]);
2970
2971                 t3_stop_sge_timers(adapter);
2972                 t3_free_sge_resources(adapter);
2973                 cxgb_disable_msi(adapter);
2974
2975                 for_each_port(adapter, i)
2976                         if (adapter->port[i])
2977                                 free_netdev(adapter->port[i]);
2978
2979                 iounmap(adapter->regs);
2980                 kfree(adapter);
2981                 pci_release_regions(pdev);
2982                 pci_disable_device(pdev);
2983                 pci_set_drvdata(pdev, NULL);
2984         }
2985 }
2986
2987 static struct pci_driver driver = {
2988         .name = DRV_NAME,
2989         .id_table = cxgb3_pci_tbl,
2990         .probe = init_one,
2991         .remove = __devexit_p(remove_one),
2992         .err_handler = &t3_err_handler,
2993 };
2994
2995 static int __init cxgb3_init_module(void)
2996 {
2997         int ret;
2998
2999         cxgb3_offload_init();
3000
3001         ret = pci_register_driver(&driver);
3002         return ret;
3003 }
3004
3005 static void __exit cxgb3_cleanup_module(void)
3006 {
3007         pci_unregister_driver(&driver);
3008         if (cxgb3_wq)
3009                 destroy_workqueue(cxgb3_wq);
3010 }
3011
3012 module_init(cxgb3_init_module);
3013 module_exit(cxgb3_cleanup_module);