qlge: Size RX buffers based on MTU.
[linux-2.6.git] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
42
43 #include "qlge.h"
44
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg =
54     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER |    */
56     NETIF_MSG_IFDOWN |
57     NETIF_MSG_IFUP |
58     NETIF_MSG_RX_ERR |
59     NETIF_MSG_TX_ERR |
60 /*  NETIF_MSG_TX_QUEUED | */
61 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65 static int debug = 0x00007fff;  /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int irq_type = MSIX_IRQ;
73 module_param(irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
77         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
78         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79         /* required last entry */
80         {0,}
81 };
82
83 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85 /* This hardware semaphore causes exclusive access to
86  * resources shared between the NIC driver, MPI firmware,
87  * FCOE firmware and the FC driver.
88  */
89 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90 {
91         u32 sem_bits = 0;
92
93         switch (sem_mask) {
94         case SEM_XGMAC0_MASK:
95                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96                 break;
97         case SEM_XGMAC1_MASK:
98                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99                 break;
100         case SEM_ICB_MASK:
101                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102                 break;
103         case SEM_MAC_ADDR_MASK:
104                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105                 break;
106         case SEM_FLASH_MASK:
107                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108                 break;
109         case SEM_PROBE_MASK:
110                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111                 break;
112         case SEM_RT_IDX_MASK:
113                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114                 break;
115         case SEM_PROC_REG_MASK:
116                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117                 break;
118         default:
119                 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120                 return -EINVAL;
121         }
122
123         ql_write32(qdev, SEM, sem_bits | sem_mask);
124         return !(ql_read32(qdev, SEM) & sem_bits);
125 }
126
127 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128 {
129         unsigned int wait_count = 30;
130         do {
131                 if (!ql_sem_trylock(qdev, sem_mask))
132                         return 0;
133                 udelay(100);
134         } while (--wait_count);
135         return -ETIMEDOUT;
136 }
137
138 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139 {
140         ql_write32(qdev, SEM, sem_mask);
141         ql_read32(qdev, SEM);   /* flush */
142 }
143
144 /* This function waits for a specific bit to come ready
145  * in a given register.  It is used mostly by the initialize
146  * process, but is also used in kernel thread API such as
147  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148  */
149 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150 {
151         u32 temp;
152         int count = UDELAY_COUNT;
153
154         while (count) {
155                 temp = ql_read32(qdev, reg);
156
157                 /* check for errors */
158                 if (temp & err_bit) {
159                         QPRINTK(qdev, PROBE, ALERT,
160                                 "register 0x%.08x access error, value = 0x%.08x!.\n",
161                                 reg, temp);
162                         return -EIO;
163                 } else if (temp & bit)
164                         return 0;
165                 udelay(UDELAY_DELAY);
166                 count--;
167         }
168         QPRINTK(qdev, PROBE, ALERT,
169                 "Timed out waiting for reg %x to come ready.\n", reg);
170         return -ETIMEDOUT;
171 }
172
173 /* The CFG register is used to download TX and RX control blocks
174  * to the chip. This function waits for an operation to complete.
175  */
176 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177 {
178         int count = UDELAY_COUNT;
179         u32 temp;
180
181         while (count) {
182                 temp = ql_read32(qdev, CFG);
183                 if (temp & CFG_LE)
184                         return -EIO;
185                 if (!(temp & bit))
186                         return 0;
187                 udelay(UDELAY_DELAY);
188                 count--;
189         }
190         return -ETIMEDOUT;
191 }
192
193
194 /* Used to issue init control blocks to hw. Maps control block,
195  * sets address, triggers download, waits for completion.
196  */
197 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198                  u16 q_id)
199 {
200         u64 map;
201         int status = 0;
202         int direction;
203         u32 mask;
204         u32 value;
205
206         direction =
207             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208             PCI_DMA_FROMDEVICE;
209
210         map = pci_map_single(qdev->pdev, ptr, size, direction);
211         if (pci_dma_mapping_error(qdev->pdev, map)) {
212                 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213                 return -ENOMEM;
214         }
215
216         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217         if (status)
218                 return status;
219
220         status = ql_wait_cfg(qdev, bit);
221         if (status) {
222                 QPRINTK(qdev, IFUP, ERR,
223                         "Timed out waiting for CFG to come ready.\n");
224                 goto exit;
225         }
226
227         ql_write32(qdev, ICB_L, (u32) map);
228         ql_write32(qdev, ICB_H, (u32) (map >> 32));
229
230         mask = CFG_Q_MASK | (bit << 16);
231         value = bit | (q_id << CFG_Q_SHIFT);
232         ql_write32(qdev, CFG, (mask | value));
233
234         /*
235          * Wait for the bit to clear after signaling hw.
236          */
237         status = ql_wait_cfg(qdev, bit);
238 exit:
239         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
240         pci_unmap_single(qdev->pdev, map, size, direction);
241         return status;
242 }
243
244 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246                         u32 *value)
247 {
248         u32 offset = 0;
249         int status;
250
251         switch (type) {
252         case MAC_ADDR_TYPE_MULTI_MAC:
253         case MAC_ADDR_TYPE_CAM_MAC:
254                 {
255                         status =
256                             ql_wait_reg_rdy(qdev,
257                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
258                         if (status)
259                                 goto exit;
260                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
262                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263                         status =
264                             ql_wait_reg_rdy(qdev,
265                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
266                         if (status)
267                                 goto exit;
268                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269                         status =
270                             ql_wait_reg_rdy(qdev,
271                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
272                         if (status)
273                                 goto exit;
274                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
276                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277                         status =
278                             ql_wait_reg_rdy(qdev,
279                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
280                         if (status)
281                                 goto exit;
282                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
284                                 status =
285                                     ql_wait_reg_rdy(qdev,
286                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
287                                 if (status)
288                                         goto exit;
289                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
291                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292                                 status =
293                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
294                                                     MAC_ADDR_MR, 0);
295                                 if (status)
296                                         goto exit;
297                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298                         }
299                         break;
300                 }
301         case MAC_ADDR_TYPE_VLAN:
302         case MAC_ADDR_TYPE_MULTI_FLTR:
303         default:
304                 QPRINTK(qdev, IFUP, CRIT,
305                         "Address type %d not yet supported.\n", type);
306                 status = -EPERM;
307         }
308 exit:
309         return status;
310 }
311
312 /* Set up a MAC, multicast or VLAN address for the
313  * inbound frame matching.
314  */
315 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316                                u16 index)
317 {
318         u32 offset = 0;
319         int status = 0;
320
321         switch (type) {
322         case MAC_ADDR_TYPE_MULTI_MAC:
323                 {
324                         u32 upper = (addr[0] << 8) | addr[1];
325                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326                                         (addr[4] << 8) | (addr[5]);
327
328                         status =
329                                 ql_wait_reg_rdy(qdev,
330                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331                         if (status)
332                                 goto exit;
333                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334                                 (index << MAC_ADDR_IDX_SHIFT) |
335                                 type | MAC_ADDR_E);
336                         ql_write32(qdev, MAC_ADDR_DATA, lower);
337                         status =
338                                 ql_wait_reg_rdy(qdev,
339                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340                         if (status)
341                                 goto exit;
342                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343                                 (index << MAC_ADDR_IDX_SHIFT) |
344                                 type | MAC_ADDR_E);
345
346                         ql_write32(qdev, MAC_ADDR_DATA, upper);
347                         status =
348                                 ql_wait_reg_rdy(qdev,
349                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350                         if (status)
351                                 goto exit;
352                         break;
353                 }
354         case MAC_ADDR_TYPE_CAM_MAC:
355                 {
356                         u32 cam_output;
357                         u32 upper = (addr[0] << 8) | addr[1];
358                         u32 lower =
359                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360                             (addr[5]);
361
362                         QPRINTK(qdev, IFUP, DEBUG,
363                                 "Adding %s address %pM"
364                                 " at index %d in the CAM.\n",
365                                 ((type ==
366                                   MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
367                                  "UNICAST"), addr, index);
368
369                         status =
370                             ql_wait_reg_rdy(qdev,
371                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
372                         if (status)
373                                 goto exit;
374                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
376                                    type);       /* type */
377                         ql_write32(qdev, MAC_ADDR_DATA, lower);
378                         status =
379                             ql_wait_reg_rdy(qdev,
380                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381                         if (status)
382                                 goto exit;
383                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
385                                    type);       /* type */
386                         ql_write32(qdev, MAC_ADDR_DATA, upper);
387                         status =
388                             ql_wait_reg_rdy(qdev,
389                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390                         if (status)
391                                 goto exit;
392                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
393                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
394                                    type);       /* type */
395                         /* This field should also include the queue id
396                            and possibly the function id.  Right now we hardcode
397                            the route field to NIC core.
398                          */
399                         cam_output = (CAM_OUT_ROUTE_NIC |
400                                       (qdev->
401                                        func << CAM_OUT_FUNC_SHIFT) |
402                                         (0 << CAM_OUT_CQ_ID_SHIFT));
403                         if (qdev->vlgrp)
404                                 cam_output |= CAM_OUT_RV;
405                         /* route to NIC core */
406                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
407                         break;
408                 }
409         case MAC_ADDR_TYPE_VLAN:
410                 {
411                         u32 enable_bit = *((u32 *) &addr[0]);
412                         /* For VLAN, the addr actually holds a bit that
413                          * either enables or disables the vlan id we are
414                          * addressing. It's either MAC_ADDR_E on or off.
415                          * That's bit-27 we're talking about.
416                          */
417                         QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418                                 (enable_bit ? "Adding" : "Removing"),
419                                 index, (enable_bit ? "to" : "from"));
420
421                         status =
422                             ql_wait_reg_rdy(qdev,
423                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
424                         if (status)
425                                 goto exit;
426                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
428                                    type |       /* type */
429                                    enable_bit); /* enable/disable */
430                         break;
431                 }
432         case MAC_ADDR_TYPE_MULTI_FLTR:
433         default:
434                 QPRINTK(qdev, IFUP, CRIT,
435                         "Address type %d not yet supported.\n", type);
436                 status = -EPERM;
437         }
438 exit:
439         return status;
440 }
441
442 /* Set or clear MAC address in hardware. We sometimes
443  * have to clear it to prevent wrong frame routing
444  * especially in a bonding environment.
445  */
446 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447 {
448         int status;
449         char zero_mac_addr[ETH_ALEN];
450         char *addr;
451
452         if (set) {
453                 addr = &qdev->ndev->dev_addr[0];
454                 QPRINTK(qdev, IFUP, DEBUG,
455                         "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456                         addr[0], addr[1], addr[2], addr[3],
457                         addr[4], addr[5]);
458         } else {
459                 memset(zero_mac_addr, 0, ETH_ALEN);
460                 addr = &zero_mac_addr[0];
461                 QPRINTK(qdev, IFUP, DEBUG,
462                                 "Clearing MAC address on %s\n",
463                                 qdev->ndev->name);
464         }
465         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466         if (status)
467                 return status;
468         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471         if (status)
472                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473                         "address.\n");
474         return status;
475 }
476
477 void ql_link_on(struct ql_adapter *qdev)
478 {
479         QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480                                  qdev->ndev->name);
481         netif_carrier_on(qdev->ndev);
482         ql_set_mac_addr(qdev, 1);
483 }
484
485 void ql_link_off(struct ql_adapter *qdev)
486 {
487         QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488                                  qdev->ndev->name);
489         netif_carrier_off(qdev->ndev);
490         ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498         int status = 0;
499
500         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501         if (status)
502                 goto exit;
503
504         ql_write32(qdev, RT_IDX,
505                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507         if (status)
508                 goto exit;
509         *value = ql_read32(qdev, RT_DATA);
510 exit:
511         return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520                               int enable)
521 {
522         int status = -EINVAL; /* Return error if no mask match. */
523         u32 value = 0;
524
525         QPRINTK(qdev, IFUP, DEBUG,
526                 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527                 (enable ? "Adding" : "Removing"),
528                 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529                 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530                 ((index ==
531                   RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532                 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533                 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534                 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535                 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536                 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537                 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538                 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539                 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540                 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541                 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542                 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543                 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544                 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545                 (enable ? "to" : "from"));
546
547         switch (mask) {
548         case RT_IDX_CAM_HIT:
549                 {
550                         value = RT_IDX_DST_CAM_Q |      /* dest */
551                             RT_IDX_TYPE_NICQ |  /* type */
552                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553                         break;
554                 }
555         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
556                 {
557                         value = RT_IDX_DST_DFLT_Q |     /* dest */
558                             RT_IDX_TYPE_NICQ |  /* type */
559                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560                         break;
561                 }
562         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
563                 {
564                         value = RT_IDX_DST_DFLT_Q |     /* dest */
565                             RT_IDX_TYPE_NICQ |  /* type */
566                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567                         break;
568                 }
569         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
570                 {
571                         value = RT_IDX_DST_DFLT_Q |     /* dest */
572                             RT_IDX_TYPE_NICQ |  /* type */
573                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574                         break;
575                 }
576         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
577                 {
578                         value = RT_IDX_DST_DFLT_Q |     /* dest */
579                             RT_IDX_TYPE_NICQ |  /* type */
580                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581                         break;
582                 }
583         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
584                 {
585                         value = RT_IDX_DST_DFLT_Q |     /* dest */
586                             RT_IDX_TYPE_NICQ |  /* type */
587                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588                         break;
589                 }
590         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
591                 {
592                         value = RT_IDX_DST_RSS |        /* dest */
593                             RT_IDX_TYPE_NICQ |  /* type */
594                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595                         break;
596                 }
597         case 0:         /* Clear the E-bit on an entry. */
598                 {
599                         value = RT_IDX_DST_DFLT_Q |     /* dest */
600                             RT_IDX_TYPE_NICQ |  /* type */
601                             (index << RT_IDX_IDX_SHIFT);/* index */
602                         break;
603                 }
604         default:
605                 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606                         mask);
607                 status = -EPERM;
608                 goto exit;
609         }
610
611         if (value) {
612                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613                 if (status)
614                         goto exit;
615                 value |= (enable ? RT_IDX_E : 0);
616                 ql_write32(qdev, RT_IDX, value);
617                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618         }
619 exit:
620         return status;
621 }
622
623 static void ql_enable_interrupts(struct ql_adapter *qdev)
624 {
625         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626 }
627
628 static void ql_disable_interrupts(struct ql_adapter *qdev)
629 {
630         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631 }
632
633 /* If we're running with multiple MSI-X vectors then we enable on the fly.
634  * Otherwise, we may have multiple outstanding workers and don't want to
635  * enable until the last one finishes. In this case, the irq_cnt gets
636  * incremented everytime we queue a worker and decremented everytime
637  * a worker finishes.  Once it hits zero we enable the interrupt.
638  */
639 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
640 {
641         u32 var = 0;
642         unsigned long hw_flags = 0;
643         struct intr_context *ctx = qdev->intr_context + intr;
644
645         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646                 /* Always enable if we're MSIX multi interrupts and
647                  * it's not the default (zeroeth) interrupt.
648                  */
649                 ql_write32(qdev, INTR_EN,
650                            ctx->intr_en_mask);
651                 var = ql_read32(qdev, STS);
652                 return var;
653         }
654
655         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656         if (atomic_dec_and_test(&ctx->irq_cnt)) {
657                 ql_write32(qdev, INTR_EN,
658                            ctx->intr_en_mask);
659                 var = ql_read32(qdev, STS);
660         }
661         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662         return var;
663 }
664
665 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666 {
667         u32 var = 0;
668         struct intr_context *ctx;
669
670         /* HW disables for us if we're MSIX multi interrupts and
671          * it's not the default (zeroeth) interrupt.
672          */
673         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674                 return 0;
675
676         ctx = qdev->intr_context + intr;
677         spin_lock(&qdev->hw_lock);
678         if (!atomic_read(&ctx->irq_cnt)) {
679                 ql_write32(qdev, INTR_EN,
680                 ctx->intr_dis_mask);
681                 var = ql_read32(qdev, STS);
682         }
683         atomic_inc(&ctx->irq_cnt);
684         spin_unlock(&qdev->hw_lock);
685         return var;
686 }
687
688 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689 {
690         int i;
691         for (i = 0; i < qdev->intr_count; i++) {
692                 /* The enable call does a atomic_dec_and_test
693                  * and enables only if the result is zero.
694                  * So we precharge it here.
695                  */
696                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697                         i == 0))
698                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
699                 ql_enable_completion_interrupt(qdev, i);
700         }
701
702 }
703
704 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705 {
706         int status, i;
707         u16 csum = 0;
708         __le16 *flash = (__le16 *)&qdev->flash;
709
710         status = strncmp((char *)&qdev->flash, str, 4);
711         if (status) {
712                 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713                 return  status;
714         }
715
716         for (i = 0; i < size; i++)
717                 csum += le16_to_cpu(*flash++);
718
719         if (csum)
720                 QPRINTK(qdev, IFUP, ERR,
721                         "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723         return csum;
724 }
725
726 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
727 {
728         int status = 0;
729         /* wait for reg to come ready */
730         status = ql_wait_reg_rdy(qdev,
731                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732         if (status)
733                 goto exit;
734         /* set up for reg read */
735         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736         /* wait for reg to come ready */
737         status = ql_wait_reg_rdy(qdev,
738                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739         if (status)
740                 goto exit;
741          /* This data is stored on flash as an array of
742          * __le32.  Since ql_read32() returns cpu endian
743          * we need to swap it back.
744          */
745         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
746 exit:
747         return status;
748 }
749
750 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751 {
752         u32 i, size;
753         int status;
754         __le32 *p = (__le32 *)&qdev->flash;
755         u32 offset;
756         u8 mac_addr[6];
757
758         /* Get flash offset for function and adjust
759          * for dword access.
760          */
761         if (!qdev->port)
762                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763         else
764                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767                 return -ETIMEDOUT;
768
769         size = sizeof(struct flash_params_8000) / sizeof(u32);
770         for (i = 0; i < size; i++, p++) {
771                 status = ql_read_flash_word(qdev, i+offset, p);
772                 if (status) {
773                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774                         goto exit;
775                 }
776         }
777
778         status = ql_validate_flash(qdev,
779                         sizeof(struct flash_params_8000) / sizeof(u16),
780                         "8000");
781         if (status) {
782                 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783                 status = -EINVAL;
784                 goto exit;
785         }
786
787         /* Extract either manufacturer or BOFM modified
788          * MAC address.
789          */
790         if (qdev->flash.flash_params_8000.data_type1 == 2)
791                 memcpy(mac_addr,
792                         qdev->flash.flash_params_8000.mac_addr1,
793                         qdev->ndev->addr_len);
794         else
795                 memcpy(mac_addr,
796                         qdev->flash.flash_params_8000.mac_addr,
797                         qdev->ndev->addr_len);
798
799         if (!is_valid_ether_addr(mac_addr)) {
800                 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801                 status = -EINVAL;
802                 goto exit;
803         }
804
805         memcpy(qdev->ndev->dev_addr,
806                 mac_addr,
807                 qdev->ndev->addr_len);
808
809 exit:
810         ql_sem_unlock(qdev, SEM_FLASH_MASK);
811         return status;
812 }
813
814 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
815 {
816         int i;
817         int status;
818         __le32 *p = (__le32 *)&qdev->flash;
819         u32 offset = 0;
820         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
821
822         /* Second function's parameters follow the first
823          * function's.
824          */
825         if (qdev->port)
826                 offset = size;
827
828         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829                 return -ETIMEDOUT;
830
831         for (i = 0; i < size; i++, p++) {
832                 status = ql_read_flash_word(qdev, i+offset, p);
833                 if (status) {
834                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835                         goto exit;
836                 }
837
838         }
839
840         status = ql_validate_flash(qdev,
841                         sizeof(struct flash_params_8012) / sizeof(u16),
842                         "8012");
843         if (status) {
844                 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845                 status = -EINVAL;
846                 goto exit;
847         }
848
849         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850                 status = -EINVAL;
851                 goto exit;
852         }
853
854         memcpy(qdev->ndev->dev_addr,
855                 qdev->flash.flash_params_8012.mac_addr,
856                 qdev->ndev->addr_len);
857
858 exit:
859         ql_sem_unlock(qdev, SEM_FLASH_MASK);
860         return status;
861 }
862
863 /* xgmac register are located behind the xgmac_addr and xgmac_data
864  * register pair.  Each read/write requires us to wait for the ready
865  * bit before reading/writing the data.
866  */
867 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868 {
869         int status;
870         /* wait for reg to come ready */
871         status = ql_wait_reg_rdy(qdev,
872                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873         if (status)
874                 return status;
875         /* write the data to the data reg */
876         ql_write32(qdev, XGMAC_DATA, data);
877         /* trigger the write */
878         ql_write32(qdev, XGMAC_ADDR, reg);
879         return status;
880 }
881
882 /* xgmac register are located behind the xgmac_addr and xgmac_data
883  * register pair.  Each read/write requires us to wait for the ready
884  * bit before reading/writing the data.
885  */
886 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887 {
888         int status = 0;
889         /* wait for reg to come ready */
890         status = ql_wait_reg_rdy(qdev,
891                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892         if (status)
893                 goto exit;
894         /* set up for reg read */
895         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896         /* wait for reg to come ready */
897         status = ql_wait_reg_rdy(qdev,
898                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899         if (status)
900                 goto exit;
901         /* get the data */
902         *data = ql_read32(qdev, XGMAC_DATA);
903 exit:
904         return status;
905 }
906
907 /* This is used for reading the 64-bit statistics regs. */
908 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909 {
910         int status = 0;
911         u32 hi = 0;
912         u32 lo = 0;
913
914         status = ql_read_xgmac_reg(qdev, reg, &lo);
915         if (status)
916                 goto exit;
917
918         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919         if (status)
920                 goto exit;
921
922         *data = (u64) lo | ((u64) hi << 32);
923
924 exit:
925         return status;
926 }
927
928 static int ql_8000_port_initialize(struct ql_adapter *qdev)
929 {
930         int status;
931         /*
932          * Get MPI firmware version for driver banner
933          * and ethool info.
934          */
935         status = ql_mb_about_fw(qdev);
936         if (status)
937                 goto exit;
938         status = ql_mb_get_fw_state(qdev);
939         if (status)
940                 goto exit;
941         /* Wake up a worker to get/set the TX/RX frame sizes. */
942         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943 exit:
944         return status;
945 }
946
947 /* Take the MAC Core out of reset.
948  * Enable statistics counting.
949  * Take the transmitter/receiver out of reset.
950  * This functionality may be done in the MPI firmware at a
951  * later date.
952  */
953 static int ql_8012_port_initialize(struct ql_adapter *qdev)
954 {
955         int status = 0;
956         u32 data;
957
958         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959                 /* Another function has the semaphore, so
960                  * wait for the port init bit to come ready.
961                  */
962                 QPRINTK(qdev, LINK, INFO,
963                         "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965                 if (status) {
966                         QPRINTK(qdev, LINK, CRIT,
967                                 "Port initialize timed out.\n");
968                 }
969                 return status;
970         }
971
972         QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973         /* Set the core reset. */
974         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975         if (status)
976                 goto end;
977         data |= GLOBAL_CFG_RESET;
978         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979         if (status)
980                 goto end;
981
982         /* Clear the core reset and turn on jumbo for receiver. */
983         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
984         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
985         data |= GLOBAL_CFG_TX_STAT_EN;
986         data |= GLOBAL_CFG_RX_STAT_EN;
987         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988         if (status)
989                 goto end;
990
991         /* Enable transmitter, and clear it's reset. */
992         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993         if (status)
994                 goto end;
995         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
996         data |= TX_CFG_EN;      /* Enable the transmitter. */
997         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998         if (status)
999                 goto end;
1000
1001         /* Enable receiver and clear it's reset. */
1002         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003         if (status)
1004                 goto end;
1005         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1006         data |= RX_CFG_EN;      /* Enable the receiver. */
1007         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008         if (status)
1009                 goto end;
1010
1011         /* Turn on jumbo. */
1012         status =
1013             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014         if (status)
1015                 goto end;
1016         status =
1017             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018         if (status)
1019                 goto end;
1020
1021         /* Signal to the world that the port is enabled.        */
1022         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023 end:
1024         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025         return status;
1026 }
1027
1028 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029 {
1030         return PAGE_SIZE << qdev->lbq_buf_order;
1031 }
1032
1033 /* Get the next large buffer. */
1034 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1035 {
1036         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1037         rx_ring->lbq_curr_idx++;
1038         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1039                 rx_ring->lbq_curr_idx = 0;
1040         rx_ring->lbq_free_cnt++;
1041         return lbq_desc;
1042 }
1043
1044 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045                 struct rx_ring *rx_ring)
1046 {
1047         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049         pci_dma_sync_single_for_cpu(qdev->pdev,
1050                                         pci_unmap_addr(lbq_desc, mapaddr),
1051                                     rx_ring->lbq_buf_size,
1052                                         PCI_DMA_FROMDEVICE);
1053
1054         /* If it's the last chunk of our master page then
1055          * we unmap it.
1056          */
1057         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058                                         == ql_lbq_block_size(qdev))
1059                 pci_unmap_page(qdev->pdev,
1060                                 lbq_desc->p.pg_chunk.map,
1061                                 ql_lbq_block_size(qdev),
1062                                 PCI_DMA_FROMDEVICE);
1063         return lbq_desc;
1064 }
1065
1066 /* Get the next small buffer. */
1067 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1068 {
1069         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1070         rx_ring->sbq_curr_idx++;
1071         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1072                 rx_ring->sbq_curr_idx = 0;
1073         rx_ring->sbq_free_cnt++;
1074         return sbq_desc;
1075 }
1076
1077 /* Update an rx ring index. */
1078 static void ql_update_cq(struct rx_ring *rx_ring)
1079 {
1080         rx_ring->cnsmr_idx++;
1081         rx_ring->curr_entry++;
1082         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1083                 rx_ring->cnsmr_idx = 0;
1084                 rx_ring->curr_entry = rx_ring->cq_base;
1085         }
1086 }
1087
1088 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1089 {
1090         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1091 }
1092
1093 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094                                                 struct bq_desc *lbq_desc)
1095 {
1096         if (!rx_ring->pg_chunk.page) {
1097                 u64 map;
1098                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099                                                 GFP_ATOMIC,
1100                                                 qdev->lbq_buf_order);
1101                 if (unlikely(!rx_ring->pg_chunk.page)) {
1102                         QPRINTK(qdev, DRV, ERR,
1103                                 "page allocation failed.\n");
1104                         return -ENOMEM;
1105                 }
1106                 rx_ring->pg_chunk.offset = 0;
1107                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108                                         0, ql_lbq_block_size(qdev),
1109                                         PCI_DMA_FROMDEVICE);
1110                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111                         __free_pages(rx_ring->pg_chunk.page,
1112                                         qdev->lbq_buf_order);
1113                         QPRINTK(qdev, DRV, ERR,
1114                                 "PCI mapping failed.\n");
1115                         return -ENOMEM;
1116                 }
1117                 rx_ring->pg_chunk.map = map;
1118                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119         }
1120
1121         /* Copy the current master pg_chunk info
1122          * to the current descriptor.
1123          */
1124         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126         /* Adjust the master page chunk for next
1127          * buffer get.
1128          */
1129         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131                 rx_ring->pg_chunk.page = NULL;
1132                 lbq_desc->p.pg_chunk.last_flag = 1;
1133         } else {
1134                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135                 get_page(rx_ring->pg_chunk.page);
1136                 lbq_desc->p.pg_chunk.last_flag = 0;
1137         }
1138         return 0;
1139 }
1140 /* Process (refill) a large buffer queue. */
1141 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1142 {
1143         u32 clean_idx = rx_ring->lbq_clean_idx;
1144         u32 start_idx = clean_idx;
1145         struct bq_desc *lbq_desc;
1146         u64 map;
1147         int i;
1148
1149         while (rx_ring->lbq_free_cnt > 32) {
1150                 for (i = 0; i < 16; i++) {
1151                         QPRINTK(qdev, RX_STATUS, DEBUG,
1152                                 "lbq: try cleaning clean_idx = %d.\n",
1153                                 clean_idx);
1154                         lbq_desc = &rx_ring->lbq[clean_idx];
1155                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1156                                 QPRINTK(qdev, IFUP, ERR,
1157                                         "Could not get a page chunk.\n");
1158                                         return;
1159                                 }
1160
1161                         map = lbq_desc->p.pg_chunk.map +
1162                                 lbq_desc->p.pg_chunk.offset;
1163                                 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1164                         pci_unmap_len_set(lbq_desc, maplen,
1165                                         rx_ring->lbq_buf_size);
1166                                 *lbq_desc->addr = cpu_to_le64(map);
1167
1168                         pci_dma_sync_single_for_device(qdev->pdev, map,
1169                                                 rx_ring->lbq_buf_size,
1170                                                 PCI_DMA_FROMDEVICE);
1171                         clean_idx++;
1172                         if (clean_idx == rx_ring->lbq_len)
1173                                 clean_idx = 0;
1174                 }
1175
1176                 rx_ring->lbq_clean_idx = clean_idx;
1177                 rx_ring->lbq_prod_idx += 16;
1178                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179                         rx_ring->lbq_prod_idx = 0;
1180                 rx_ring->lbq_free_cnt -= 16;
1181         }
1182
1183         if (start_idx != clean_idx) {
1184                 QPRINTK(qdev, RX_STATUS, DEBUG,
1185                         "lbq: updating prod idx = %d.\n",
1186                         rx_ring->lbq_prod_idx);
1187                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188                                 rx_ring->lbq_prod_idx_db_reg);
1189         }
1190 }
1191
1192 /* Process (refill) a small buffer queue. */
1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194 {
1195         u32 clean_idx = rx_ring->sbq_clean_idx;
1196         u32 start_idx = clean_idx;
1197         struct bq_desc *sbq_desc;
1198         u64 map;
1199         int i;
1200
1201         while (rx_ring->sbq_free_cnt > 16) {
1202                 for (i = 0; i < 16; i++) {
1203                         sbq_desc = &rx_ring->sbq[clean_idx];
1204                         QPRINTK(qdev, RX_STATUS, DEBUG,
1205                                 "sbq: try cleaning clean_idx = %d.\n",
1206                                 clean_idx);
1207                         if (sbq_desc->p.skb == NULL) {
1208                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1209                                         "sbq: getting new skb for index %d.\n",
1210                                         sbq_desc->index);
1211                                 sbq_desc->p.skb =
1212                                     netdev_alloc_skb(qdev->ndev,
1213                                                      SMALL_BUFFER_SIZE);
1214                                 if (sbq_desc->p.skb == NULL) {
1215                                         QPRINTK(qdev, PROBE, ERR,
1216                                                 "Couldn't get an skb.\n");
1217                                         rx_ring->sbq_clean_idx = clean_idx;
1218                                         return;
1219                                 }
1220                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221                                 map = pci_map_single(qdev->pdev,
1222                                                      sbq_desc->p.skb->data,
1223                                                      rx_ring->sbq_buf_size,
1224                                                      PCI_DMA_FROMDEVICE);
1225                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1226                                         QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1227                                         rx_ring->sbq_clean_idx = clean_idx;
1228                                         dev_kfree_skb_any(sbq_desc->p.skb);
1229                                         sbq_desc->p.skb = NULL;
1230                                         return;
1231                                 }
1232                                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1233                                 pci_unmap_len_set(sbq_desc, maplen,
1234                                                   rx_ring->sbq_buf_size);
1235                                 *sbq_desc->addr = cpu_to_le64(map);
1236                         }
1237
1238                         clean_idx++;
1239                         if (clean_idx == rx_ring->sbq_len)
1240                                 clean_idx = 0;
1241                 }
1242                 rx_ring->sbq_clean_idx = clean_idx;
1243                 rx_ring->sbq_prod_idx += 16;
1244                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245                         rx_ring->sbq_prod_idx = 0;
1246                 rx_ring->sbq_free_cnt -= 16;
1247         }
1248
1249         if (start_idx != clean_idx) {
1250                 QPRINTK(qdev, RX_STATUS, DEBUG,
1251                         "sbq: updating prod idx = %d.\n",
1252                         rx_ring->sbq_prod_idx);
1253                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254                                 rx_ring->sbq_prod_idx_db_reg);
1255         }
1256 }
1257
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259                                     struct rx_ring *rx_ring)
1260 {
1261         ql_update_sbq(qdev, rx_ring);
1262         ql_update_lbq(qdev, rx_ring);
1263 }
1264
1265 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266  * fails at some stage, or from the interrupt when a tx completes.
1267  */
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269                           struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271         int i;
1272         for (i = 0; i < mapped; i++) {
1273                 if (i == 0 || (i == 7 && mapped > 7)) {
1274                         /*
1275                          * Unmap the skb->data area, or the
1276                          * external sglist (AKA the Outbound
1277                          * Address List (OAL)).
1278                          * If its the zeroeth element, then it's
1279                          * the skb->data area.  If it's the 7th
1280                          * element and there is more than 6 frags,
1281                          * then its an OAL.
1282                          */
1283                         if (i == 7) {
1284                                 QPRINTK(qdev, TX_DONE, DEBUG,
1285                                         "unmapping OAL area.\n");
1286                         }
1287                         pci_unmap_single(qdev->pdev,
1288                                          pci_unmap_addr(&tx_ring_desc->map[i],
1289                                                         mapaddr),
1290                                          pci_unmap_len(&tx_ring_desc->map[i],
1291                                                        maplen),
1292                                          PCI_DMA_TODEVICE);
1293                 } else {
1294                         QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1295                                 i);
1296                         pci_unmap_page(qdev->pdev,
1297                                        pci_unmap_addr(&tx_ring_desc->map[i],
1298                                                       mapaddr),
1299                                        pci_unmap_len(&tx_ring_desc->map[i],
1300                                                      maplen), PCI_DMA_TODEVICE);
1301                 }
1302         }
1303
1304 }
1305
1306 /* Map the buffers for this transmit.  This will return
1307  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308  */
1309 static int ql_map_send(struct ql_adapter *qdev,
1310                        struct ob_mac_iocb_req *mac_iocb_ptr,
1311                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312 {
1313         int len = skb_headlen(skb);
1314         dma_addr_t map;
1315         int frag_idx, err, map_idx = 0;
1316         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317         int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319         if (frag_cnt) {
1320                 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1321         }
1322         /*
1323          * Map the skb buffer first.
1324          */
1325         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327         err = pci_dma_mapping_error(qdev->pdev, map);
1328         if (err) {
1329                 QPRINTK(qdev, TX_QUEUED, ERR,
1330                         "PCI mapping failed with error: %d\n", err);
1331
1332                 return NETDEV_TX_BUSY;
1333         }
1334
1335         tbd->len = cpu_to_le32(len);
1336         tbd->addr = cpu_to_le64(map);
1337         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339         map_idx++;
1340
1341         /*
1342          * This loop fills the remainder of the 8 address descriptors
1343          * in the IOCB.  If there are more than 7 fragments, then the
1344          * eighth address desc will point to an external list (OAL).
1345          * When this happens, the remainder of the frags will be stored
1346          * in this list.
1347          */
1348         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350                 tbd++;
1351                 if (frag_idx == 6 && frag_cnt > 7) {
1352                         /* Let's tack on an sglist.
1353                          * Our control block will now
1354                          * look like this:
1355                          * iocb->seg[0] = skb->data
1356                          * iocb->seg[1] = frag[0]
1357                          * iocb->seg[2] = frag[1]
1358                          * iocb->seg[3] = frag[2]
1359                          * iocb->seg[4] = frag[3]
1360                          * iocb->seg[5] = frag[4]
1361                          * iocb->seg[6] = frag[5]
1362                          * iocb->seg[7] = ptr to OAL (external sglist)
1363                          * oal->seg[0] = frag[6]
1364                          * oal->seg[1] = frag[7]
1365                          * oal->seg[2] = frag[8]
1366                          * oal->seg[3] = frag[9]
1367                          * oal->seg[4] = frag[10]
1368                          *      etc...
1369                          */
1370                         /* Tack on the OAL in the eighth segment of IOCB. */
1371                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372                                              sizeof(struct oal),
1373                                              PCI_DMA_TODEVICE);
1374                         err = pci_dma_mapping_error(qdev->pdev, map);
1375                         if (err) {
1376                                 QPRINTK(qdev, TX_QUEUED, ERR,
1377                                         "PCI mapping outbound address list with error: %d\n",
1378                                         err);
1379                                 goto map_error;
1380                         }
1381
1382                         tbd->addr = cpu_to_le64(map);
1383                         /*
1384                          * The length is the number of fragments
1385                          * that remain to be mapped times the length
1386                          * of our sglist (OAL).
1387                          */
1388                         tbd->len =
1389                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1390                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1391                         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392                                            map);
1393                         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394                                           sizeof(struct oal));
1395                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396                         map_idx++;
1397                 }
1398
1399                 map =
1400                     pci_map_page(qdev->pdev, frag->page,
1401                                  frag->page_offset, frag->size,
1402                                  PCI_DMA_TODEVICE);
1403
1404                 err = pci_dma_mapping_error(qdev->pdev, map);
1405                 if (err) {
1406                         QPRINTK(qdev, TX_QUEUED, ERR,
1407                                 "PCI mapping frags failed with error: %d.\n",
1408                                 err);
1409                         goto map_error;
1410                 }
1411
1412                 tbd->addr = cpu_to_le64(map);
1413                 tbd->len = cpu_to_le32(frag->size);
1414                 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415                 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416                                   frag->size);
1417
1418         }
1419         /* Save the number of segments we've mapped. */
1420         tx_ring_desc->map_cnt = map_idx;
1421         /* Terminate the last segment. */
1422         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423         return NETDEV_TX_OK;
1424
1425 map_error:
1426         /*
1427          * If the first frag mapping failed, then i will be zero.
1428          * This causes the unmap of the skb->data area.  Otherwise
1429          * we pass in the number of frags that mapped successfully
1430          * so they can be umapped.
1431          */
1432         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433         return NETDEV_TX_BUSY;
1434 }
1435
1436 static void ql_realign_skb(struct sk_buff *skb, int len)
1437 {
1438         void *temp_addr = skb->data;
1439
1440         /* Undo the skb_reserve(skb,32) we did before
1441          * giving to hardware, and realign data on
1442          * a 2-byte boundary.
1443          */
1444         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1445         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1446         skb_copy_to_linear_data(skb, temp_addr,
1447                 (unsigned int)len);
1448 }
1449
1450 /*
1451  * This function builds an skb for the given inbound
1452  * completion.  It will be rewritten for readability in the near
1453  * future, but for not it works well.
1454  */
1455 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1456                                        struct rx_ring *rx_ring,
1457                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1458 {
1459         struct bq_desc *lbq_desc;
1460         struct bq_desc *sbq_desc;
1461         struct sk_buff *skb = NULL;
1462         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1463        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1464
1465         /*
1466          * Handle the header buffer if present.
1467          */
1468         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1469             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1470                 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1471                 /*
1472                  * Headers fit nicely into a small buffer.
1473                  */
1474                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1475                 pci_unmap_single(qdev->pdev,
1476                                 pci_unmap_addr(sbq_desc, mapaddr),
1477                                 pci_unmap_len(sbq_desc, maplen),
1478                                 PCI_DMA_FROMDEVICE);
1479                 skb = sbq_desc->p.skb;
1480                 ql_realign_skb(skb, hdr_len);
1481                 skb_put(skb, hdr_len);
1482                 sbq_desc->p.skb = NULL;
1483         }
1484
1485         /*
1486          * Handle the data buffer(s).
1487          */
1488         if (unlikely(!length)) {        /* Is there data too? */
1489                 QPRINTK(qdev, RX_STATUS, DEBUG,
1490                         "No Data buffer in this packet.\n");
1491                 return skb;
1492         }
1493
1494         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1495                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1496                         QPRINTK(qdev, RX_STATUS, DEBUG,
1497                                 "Headers in small, data of %d bytes in small, combine them.\n", length);
1498                         /*
1499                          * Data is less than small buffer size so it's
1500                          * stuffed in a small buffer.
1501                          * For this case we append the data
1502                          * from the "data" small buffer to the "header" small
1503                          * buffer.
1504                          */
1505                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1506                         pci_dma_sync_single_for_cpu(qdev->pdev,
1507                                                     pci_unmap_addr
1508                                                     (sbq_desc, mapaddr),
1509                                                     pci_unmap_len
1510                                                     (sbq_desc, maplen),
1511                                                     PCI_DMA_FROMDEVICE);
1512                         memcpy(skb_put(skb, length),
1513                                sbq_desc->p.skb->data, length);
1514                         pci_dma_sync_single_for_device(qdev->pdev,
1515                                                        pci_unmap_addr
1516                                                        (sbq_desc,
1517                                                         mapaddr),
1518                                                        pci_unmap_len
1519                                                        (sbq_desc,
1520                                                         maplen),
1521                                                        PCI_DMA_FROMDEVICE);
1522                 } else {
1523                         QPRINTK(qdev, RX_STATUS, DEBUG,
1524                                 "%d bytes in a single small buffer.\n", length);
1525                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1526                         skb = sbq_desc->p.skb;
1527                         ql_realign_skb(skb, length);
1528                         skb_put(skb, length);
1529                         pci_unmap_single(qdev->pdev,
1530                                          pci_unmap_addr(sbq_desc,
1531                                                         mapaddr),
1532                                          pci_unmap_len(sbq_desc,
1533                                                        maplen),
1534                                          PCI_DMA_FROMDEVICE);
1535                         sbq_desc->p.skb = NULL;
1536                 }
1537         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1538                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1539                         QPRINTK(qdev, RX_STATUS, DEBUG,
1540                                 "Header in small, %d bytes in large. Chain large to small!\n", length);
1541                         /*
1542                          * The data is in a single large buffer.  We
1543                          * chain it to the header buffer's skb and let
1544                          * it rip.
1545                          */
1546                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1547                         QPRINTK(qdev, RX_STATUS, DEBUG,
1548                                 "Chaining page at offset = %d,"
1549                                 "for %d bytes  to skb.\n",
1550                                 lbq_desc->p.pg_chunk.offset, length);
1551                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552                                                 lbq_desc->p.pg_chunk.offset,
1553                                                 length);
1554                         skb->len += length;
1555                         skb->data_len += length;
1556                         skb->truesize += length;
1557                 } else {
1558                         /*
1559                          * The headers and data are in a single large buffer. We
1560                          * copy it to a new skb and let it go. This can happen with
1561                          * jumbo mtu on a non-TCP/UDP frame.
1562                          */
1563                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1564                         skb = netdev_alloc_skb(qdev->ndev, length);
1565                         if (skb == NULL) {
1566                                 QPRINTK(qdev, PROBE, DEBUG,
1567                                         "No skb available, drop the packet.\n");
1568                                 return NULL;
1569                         }
1570                         pci_unmap_page(qdev->pdev,
1571                                        pci_unmap_addr(lbq_desc,
1572                                                       mapaddr),
1573                                        pci_unmap_len(lbq_desc, maplen),
1574                                        PCI_DMA_FROMDEVICE);
1575                         skb_reserve(skb, NET_IP_ALIGN);
1576                         QPRINTK(qdev, RX_STATUS, DEBUG,
1577                                 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1578                         skb_fill_page_desc(skb, 0,
1579                                                 lbq_desc->p.pg_chunk.page,
1580                                                 lbq_desc->p.pg_chunk.offset,
1581                                                 length);
1582                         skb->len += length;
1583                         skb->data_len += length;
1584                         skb->truesize += length;
1585                         length -= length;
1586                         __pskb_pull_tail(skb,
1587                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1588                                 VLAN_ETH_HLEN : ETH_HLEN);
1589                 }
1590         } else {
1591                 /*
1592                  * The data is in a chain of large buffers
1593                  * pointed to by a small buffer.  We loop
1594                  * thru and chain them to the our small header
1595                  * buffer's skb.
1596                  * frags:  There are 18 max frags and our small
1597                  *         buffer will hold 32 of them. The thing is,
1598                  *         we'll use 3 max for our 9000 byte jumbo
1599                  *         frames.  If the MTU goes up we could
1600                  *          eventually be in trouble.
1601                  */
1602                 int size, i = 0;
1603                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1604                 pci_unmap_single(qdev->pdev,
1605                                  pci_unmap_addr(sbq_desc, mapaddr),
1606                                  pci_unmap_len(sbq_desc, maplen),
1607                                  PCI_DMA_FROMDEVICE);
1608                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1609                         /*
1610                          * This is an non TCP/UDP IP frame, so
1611                          * the headers aren't split into a small
1612                          * buffer.  We have to use the small buffer
1613                          * that contains our sg list as our skb to
1614                          * send upstairs. Copy the sg list here to
1615                          * a local buffer and use it to find the
1616                          * pages to chain.
1617                          */
1618                         QPRINTK(qdev, RX_STATUS, DEBUG,
1619                                 "%d bytes of headers & data in chain of large.\n", length);
1620                         skb = sbq_desc->p.skb;
1621                         sbq_desc->p.skb = NULL;
1622                         skb_reserve(skb, NET_IP_ALIGN);
1623                 }
1624                 while (length > 0) {
1625                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1626                         size = (length < rx_ring->lbq_buf_size) ? length :
1627                                 rx_ring->lbq_buf_size;
1628
1629                         QPRINTK(qdev, RX_STATUS, DEBUG,
1630                                 "Adding page %d to skb for %d bytes.\n",
1631                                 i, size);
1632                         skb_fill_page_desc(skb, i,
1633                                                 lbq_desc->p.pg_chunk.page,
1634                                                 lbq_desc->p.pg_chunk.offset,
1635                                                 size);
1636                         skb->len += size;
1637                         skb->data_len += size;
1638                         skb->truesize += size;
1639                         length -= size;
1640                         i++;
1641                 }
1642                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1643                                 VLAN_ETH_HLEN : ETH_HLEN);
1644         }
1645         return skb;
1646 }
1647
1648 /* Process an inbound completion from an rx ring. */
1649 static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1650                                    struct rx_ring *rx_ring,
1651                                    struct ib_mac_iocb_rsp *ib_mac_rsp)
1652 {
1653         struct net_device *ndev = qdev->ndev;
1654         struct sk_buff *skb = NULL;
1655         u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656                         IB_MAC_IOCB_RSP_VLAN_MASK)
1657
1658         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659
1660         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1661         if (unlikely(!skb)) {
1662                 QPRINTK(qdev, RX_STATUS, DEBUG,
1663                         "No skb available, drop packet.\n");
1664                 return;
1665         }
1666
1667         /* Frame error, so drop the packet. */
1668         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1669                 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1670                                         ib_mac_rsp->flags2);
1671                 dev_kfree_skb_any(skb);
1672                 return;
1673         }
1674
1675         /* The max framesize filter on this chip is set higher than
1676          * MTU since FCoE uses 2k frames.
1677          */
1678         if (skb->len > ndev->mtu + ETH_HLEN) {
1679                 dev_kfree_skb_any(skb);
1680                 return;
1681         }
1682
1683         prefetch(skb->data);
1684         skb->dev = ndev;
1685         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1686                 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1687                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1688                         IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1689                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1690                         IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1691                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1692                         IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1693         }
1694         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1695                 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1696         }
1697
1698         skb->protocol = eth_type_trans(skb, ndev);
1699         skb->ip_summed = CHECKSUM_NONE;
1700
1701         /* If rx checksum is on, and there are no
1702          * csum or frame errors.
1703          */
1704         if (qdev->rx_csum &&
1705                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1706                 /* TCP frame. */
1707                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1708                         QPRINTK(qdev, RX_STATUS, DEBUG,
1709                                         "TCP checksum done!\n");
1710                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1711                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1712                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1713                 /* Unfragmented ipv4 UDP frame. */
1714                         struct iphdr *iph = (struct iphdr *) skb->data;
1715                         if (!(iph->frag_off &
1716                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1717                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1719                                                 "TCP checksum done!\n");
1720                         }
1721                 }
1722         }
1723
1724         ndev->stats.rx_packets++;
1725         ndev->stats.rx_bytes += skb->len;
1726         skb_record_rx_queue(skb, rx_ring->cq_id);
1727         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1728                 if (qdev->vlgrp &&
1729                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1730                         (vlan_id != 0))
1731                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1732                                 vlan_id, skb);
1733                 else
1734                         napi_gro_receive(&rx_ring->napi, skb);
1735         } else {
1736                 if (qdev->vlgrp &&
1737                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1738                         (vlan_id != 0))
1739                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1740                 else
1741                         netif_receive_skb(skb);
1742         }
1743 }
1744
1745 /* Process an outbound completion from an rx ring. */
1746 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1747                                    struct ob_mac_iocb_rsp *mac_rsp)
1748 {
1749         struct net_device *ndev = qdev->ndev;
1750         struct tx_ring *tx_ring;
1751         struct tx_ring_desc *tx_ring_desc;
1752
1753         QL_DUMP_OB_MAC_RSP(mac_rsp);
1754         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1755         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1756         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1757         ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1758         ndev->stats.tx_packets++;
1759         dev_kfree_skb(tx_ring_desc->skb);
1760         tx_ring_desc->skb = NULL;
1761
1762         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1763                                         OB_MAC_IOCB_RSP_S |
1764                                         OB_MAC_IOCB_RSP_L |
1765                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1766                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1767                         QPRINTK(qdev, TX_DONE, WARNING,
1768                                 "Total descriptor length did not match transfer length.\n");
1769                 }
1770                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1771                         QPRINTK(qdev, TX_DONE, WARNING,
1772                                 "Frame too short to be legal, not sent.\n");
1773                 }
1774                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1775                         QPRINTK(qdev, TX_DONE, WARNING,
1776                                 "Frame too long, but sent anyway.\n");
1777                 }
1778                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1779                         QPRINTK(qdev, TX_DONE, WARNING,
1780                                 "PCI backplane error. Frame not sent.\n");
1781                 }
1782         }
1783         atomic_inc(&tx_ring->tx_count);
1784 }
1785
1786 /* Fire up a handler to reset the MPI processor. */
1787 void ql_queue_fw_error(struct ql_adapter *qdev)
1788 {
1789         ql_link_off(qdev);
1790         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1791 }
1792
1793 void ql_queue_asic_error(struct ql_adapter *qdev)
1794 {
1795         ql_link_off(qdev);
1796         ql_disable_interrupts(qdev);
1797         /* Clear adapter up bit to signal the recovery
1798          * process that it shouldn't kill the reset worker
1799          * thread
1800          */
1801         clear_bit(QL_ADAPTER_UP, &qdev->flags);
1802         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1803 }
1804
1805 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1806                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
1807 {
1808         switch (ib_ae_rsp->event) {
1809         case MGMT_ERR_EVENT:
1810                 QPRINTK(qdev, RX_ERR, ERR,
1811                         "Management Processor Fatal Error.\n");
1812                 ql_queue_fw_error(qdev);
1813                 return;
1814
1815         case CAM_LOOKUP_ERR_EVENT:
1816                 QPRINTK(qdev, LINK, ERR,
1817                         "Multiple CAM hits lookup occurred.\n");
1818                 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1819                 ql_queue_asic_error(qdev);
1820                 return;
1821
1822         case SOFT_ECC_ERROR_EVENT:
1823                 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1824                 ql_queue_asic_error(qdev);
1825                 break;
1826
1827         case PCI_ERR_ANON_BUF_RD:
1828                 QPRINTK(qdev, RX_ERR, ERR,
1829                         "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1830                         ib_ae_rsp->q_id);
1831                 ql_queue_asic_error(qdev);
1832                 break;
1833
1834         default:
1835                 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1836                         ib_ae_rsp->event);
1837                 ql_queue_asic_error(qdev);
1838                 break;
1839         }
1840 }
1841
1842 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1843 {
1844         struct ql_adapter *qdev = rx_ring->qdev;
1845         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1846         struct ob_mac_iocb_rsp *net_rsp = NULL;
1847         int count = 0;
1848
1849         struct tx_ring *tx_ring;
1850         /* While there are entries in the completion queue. */
1851         while (prod != rx_ring->cnsmr_idx) {
1852
1853                 QPRINTK(qdev, RX_STATUS, DEBUG,
1854                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1855                         prod, rx_ring->cnsmr_idx);
1856
1857                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1858                 rmb();
1859                 switch (net_rsp->opcode) {
1860
1861                 case OPCODE_OB_MAC_TSO_IOCB:
1862                 case OPCODE_OB_MAC_IOCB:
1863                         ql_process_mac_tx_intr(qdev, net_rsp);
1864                         break;
1865                 default:
1866                         QPRINTK(qdev, RX_STATUS, DEBUG,
1867                                 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1868                                 net_rsp->opcode);
1869                 }
1870                 count++;
1871                 ql_update_cq(rx_ring);
1872                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1873         }
1874         ql_write_cq_idx(rx_ring);
1875         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1876         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1877                                         net_rsp != NULL) {
1878                 if (atomic_read(&tx_ring->queue_stopped) &&
1879                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1880                         /*
1881                          * The queue got stopped because the tx_ring was full.
1882                          * Wake it up, because it's now at least 25% empty.
1883                          */
1884                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
1885         }
1886
1887         return count;
1888 }
1889
1890 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1891 {
1892         struct ql_adapter *qdev = rx_ring->qdev;
1893         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1894         struct ql_net_rsp_iocb *net_rsp;
1895         int count = 0;
1896
1897         /* While there are entries in the completion queue. */
1898         while (prod != rx_ring->cnsmr_idx) {
1899
1900                 QPRINTK(qdev, RX_STATUS, DEBUG,
1901                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1902                         prod, rx_ring->cnsmr_idx);
1903
1904                 net_rsp = rx_ring->curr_entry;
1905                 rmb();
1906                 switch (net_rsp->opcode) {
1907                 case OPCODE_IB_MAC_IOCB:
1908                         ql_process_mac_rx_intr(qdev, rx_ring,
1909                                                (struct ib_mac_iocb_rsp *)
1910                                                net_rsp);
1911                         break;
1912
1913                 case OPCODE_IB_AE_IOCB:
1914                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1915                                                 net_rsp);
1916                         break;
1917                 default:
1918                         {
1919                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1920                                         "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1921                                         net_rsp->opcode);
1922                         }
1923                 }
1924                 count++;
1925                 ql_update_cq(rx_ring);
1926                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1927                 if (count == budget)
1928                         break;
1929         }
1930         ql_update_buffer_queues(qdev, rx_ring);
1931         ql_write_cq_idx(rx_ring);
1932         return count;
1933 }
1934
1935 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1936 {
1937         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1938         struct ql_adapter *qdev = rx_ring->qdev;
1939         struct rx_ring *trx_ring;
1940         int i, work_done = 0;
1941         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
1942
1943         QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1944                 rx_ring->cq_id);
1945
1946         /* Service the TX rings first.  They start
1947          * right after the RSS rings. */
1948         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1949                 trx_ring = &qdev->rx_ring[i];
1950                 /* If this TX completion ring belongs to this vector and
1951                  * it's not empty then service it.
1952                  */
1953                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1954                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1955                                         trx_ring->cnsmr_idx)) {
1956                         QPRINTK(qdev, INTR, DEBUG,
1957                                 "%s: Servicing TX completion ring %d.\n",
1958                                 __func__, trx_ring->cq_id);
1959                         ql_clean_outbound_rx_ring(trx_ring);
1960                 }
1961         }
1962
1963         /*
1964          * Now service the RSS ring if it's active.
1965          */
1966         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1967                                         rx_ring->cnsmr_idx) {
1968                 QPRINTK(qdev, INTR, DEBUG,
1969                         "%s: Servicing RX completion ring %d.\n",
1970                         __func__, rx_ring->cq_id);
1971                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1972         }
1973
1974         if (work_done < budget) {
1975                 napi_complete(napi);
1976                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1977         }
1978         return work_done;
1979 }
1980
1981 static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1982 {
1983         struct ql_adapter *qdev = netdev_priv(ndev);
1984
1985         qdev->vlgrp = grp;
1986         if (grp) {
1987                 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1988                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1989                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1990         } else {
1991                 QPRINTK(qdev, IFUP, DEBUG,
1992                         "Turning off VLAN in NIC_RCV_CFG.\n");
1993                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1994         }
1995 }
1996
1997 static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1998 {
1999         struct ql_adapter *qdev = netdev_priv(ndev);
2000         u32 enable_bit = MAC_ADDR_E;
2001         int status;
2002
2003         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2004         if (status)
2005                 return;
2006         if (ql_set_mac_addr_reg
2007             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2008                 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2009         }
2010         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2011 }
2012
2013 static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2014 {
2015         struct ql_adapter *qdev = netdev_priv(ndev);
2016         u32 enable_bit = 0;
2017         int status;
2018
2019         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2020         if (status)
2021                 return;
2022
2023         if (ql_set_mac_addr_reg
2024             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2025                 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2026         }
2027         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2028
2029 }
2030
2031 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2032 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2033 {
2034         struct rx_ring *rx_ring = dev_id;
2035         napi_schedule(&rx_ring->napi);
2036         return IRQ_HANDLED;
2037 }
2038
2039 /* This handles a fatal error, MPI activity, and the default
2040  * rx_ring in an MSI-X multiple vector environment.
2041  * In MSI/Legacy environment it also process the rest of
2042  * the rx_rings.
2043  */
2044 static irqreturn_t qlge_isr(int irq, void *dev_id)
2045 {
2046         struct rx_ring *rx_ring = dev_id;
2047         struct ql_adapter *qdev = rx_ring->qdev;
2048         struct intr_context *intr_context = &qdev->intr_context[0];
2049         u32 var;
2050         int work_done = 0;
2051
2052         spin_lock(&qdev->hw_lock);
2053         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2054                 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2055                 spin_unlock(&qdev->hw_lock);
2056                 return IRQ_NONE;
2057         }
2058         spin_unlock(&qdev->hw_lock);
2059
2060         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2061
2062         /*
2063          * Check for fatal error.
2064          */
2065         if (var & STS_FE) {
2066                 ql_queue_asic_error(qdev);
2067                 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2068                 var = ql_read32(qdev, ERR_STS);
2069                 QPRINTK(qdev, INTR, ERR,
2070                         "Resetting chip. Error Status Register = 0x%x\n", var);
2071                 return IRQ_HANDLED;
2072         }
2073
2074         /*
2075          * Check MPI processor activity.
2076          */
2077         if ((var & STS_PI) &&
2078                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2079                 /*
2080                  * We've got an async event or mailbox completion.
2081                  * Handle it and clear the source of the interrupt.
2082                  */
2083                 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2084                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2085                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2086                 queue_delayed_work_on(smp_processor_id(),
2087                                 qdev->workqueue, &qdev->mpi_work, 0);
2088                 work_done++;
2089         }
2090
2091         /*
2092          * Get the bit-mask that shows the active queues for this
2093          * pass.  Compare it to the queues that this irq services
2094          * and call napi if there's a match.
2095          */
2096         var = ql_read32(qdev, ISR1);
2097         if (var & intr_context->irq_mask) {
2098                                 QPRINTK(qdev, INTR, INFO,
2099                         "Waking handler for rx_ring[0].\n");
2100                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2101                                         napi_schedule(&rx_ring->napi);
2102                                 work_done++;
2103                         }
2104         ql_enable_completion_interrupt(qdev, intr_context->intr);
2105         return work_done ? IRQ_HANDLED : IRQ_NONE;
2106 }
2107
2108 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2109 {
2110
2111         if (skb_is_gso(skb)) {
2112                 int err;
2113                 if (skb_header_cloned(skb)) {
2114                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2115                         if (err)
2116                                 return err;
2117                 }
2118
2119                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2120                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2121                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2122                 mac_iocb_ptr->total_hdrs_len =
2123                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2124                 mac_iocb_ptr->net_trans_offset =
2125                     cpu_to_le16(skb_network_offset(skb) |
2126                                 skb_transport_offset(skb)
2127                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2128                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2129                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2130                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2131                         struct iphdr *iph = ip_hdr(skb);
2132                         iph->check = 0;
2133                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2134                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2135                                                                  iph->daddr, 0,
2136                                                                  IPPROTO_TCP,
2137                                                                  0);
2138                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2139                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2140                         tcp_hdr(skb)->check =
2141                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2142                                              &ipv6_hdr(skb)->daddr,
2143                                              0, IPPROTO_TCP, 0);
2144                 }
2145                 return 1;
2146         }
2147         return 0;
2148 }
2149
2150 static void ql_hw_csum_setup(struct sk_buff *skb,
2151                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2152 {
2153         int len;
2154         struct iphdr *iph = ip_hdr(skb);
2155         __sum16 *check;
2156         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2157         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2158         mac_iocb_ptr->net_trans_offset =
2159                 cpu_to_le16(skb_network_offset(skb) |
2160                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2161
2162         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2163         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2164         if (likely(iph->protocol == IPPROTO_TCP)) {
2165                 check = &(tcp_hdr(skb)->check);
2166                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2167                 mac_iocb_ptr->total_hdrs_len =
2168                     cpu_to_le16(skb_transport_offset(skb) +
2169                                 (tcp_hdr(skb)->doff << 2));
2170         } else {
2171                 check = &(udp_hdr(skb)->check);
2172                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2173                 mac_iocb_ptr->total_hdrs_len =
2174                     cpu_to_le16(skb_transport_offset(skb) +
2175                                 sizeof(struct udphdr));
2176         }
2177         *check = ~csum_tcpudp_magic(iph->saddr,
2178                                     iph->daddr, len, iph->protocol, 0);
2179 }
2180
2181 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2182 {
2183         struct tx_ring_desc *tx_ring_desc;
2184         struct ob_mac_iocb_req *mac_iocb_ptr;
2185         struct ql_adapter *qdev = netdev_priv(ndev);
2186         int tso;
2187         struct tx_ring *tx_ring;
2188         u32 tx_ring_idx = (u32) skb->queue_mapping;
2189
2190         tx_ring = &qdev->tx_ring[tx_ring_idx];
2191
2192         if (skb_padto(skb, ETH_ZLEN))
2193                 return NETDEV_TX_OK;
2194
2195         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2196                 QPRINTK(qdev, TX_QUEUED, INFO,
2197                         "%s: shutting down tx queue %d du to lack of resources.\n",
2198                         __func__, tx_ring_idx);
2199                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2200                 atomic_inc(&tx_ring->queue_stopped);
2201                 return NETDEV_TX_BUSY;
2202         }
2203         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2204         mac_iocb_ptr = tx_ring_desc->queue_entry;
2205         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2206
2207         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2208         mac_iocb_ptr->tid = tx_ring_desc->index;
2209         /* We use the upper 32-bits to store the tx queue for this IO.
2210          * When we get the completion we can use it to establish the context.
2211          */
2212         mac_iocb_ptr->txq_idx = tx_ring_idx;
2213         tx_ring_desc->skb = skb;
2214
2215         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2216
2217         if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2218                 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2219                         vlan_tx_tag_get(skb));
2220                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2221                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2222         }
2223         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2224         if (tso < 0) {
2225                 dev_kfree_skb_any(skb);
2226                 return NETDEV_TX_OK;
2227         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2228                 ql_hw_csum_setup(skb,
2229                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2230         }
2231         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2232                         NETDEV_TX_OK) {
2233                 QPRINTK(qdev, TX_QUEUED, ERR,
2234                                 "Could not map the segments.\n");
2235                 return NETDEV_TX_BUSY;
2236         }
2237         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2238         tx_ring->prod_idx++;
2239         if (tx_ring->prod_idx == tx_ring->wq_len)
2240                 tx_ring->prod_idx = 0;
2241         wmb();
2242
2243         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2244         QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2245                 tx_ring->prod_idx, skb->len);
2246
2247         atomic_dec(&tx_ring->tx_count);
2248         return NETDEV_TX_OK;
2249 }
2250
2251 static void ql_free_shadow_space(struct ql_adapter *qdev)
2252 {
2253         if (qdev->rx_ring_shadow_reg_area) {
2254                 pci_free_consistent(qdev->pdev,
2255                                     PAGE_SIZE,
2256                                     qdev->rx_ring_shadow_reg_area,
2257                                     qdev->rx_ring_shadow_reg_dma);
2258                 qdev->rx_ring_shadow_reg_area = NULL;
2259         }
2260         if (qdev->tx_ring_shadow_reg_area) {
2261                 pci_free_consistent(qdev->pdev,
2262                                     PAGE_SIZE,
2263                                     qdev->tx_ring_shadow_reg_area,
2264                                     qdev->tx_ring_shadow_reg_dma);
2265                 qdev->tx_ring_shadow_reg_area = NULL;
2266         }
2267 }
2268
2269 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2270 {
2271         qdev->rx_ring_shadow_reg_area =
2272             pci_alloc_consistent(qdev->pdev,
2273                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2274         if (qdev->rx_ring_shadow_reg_area == NULL) {
2275                 QPRINTK(qdev, IFUP, ERR,
2276                         "Allocation of RX shadow space failed.\n");
2277                 return -ENOMEM;
2278         }
2279         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2280         qdev->tx_ring_shadow_reg_area =
2281             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2282                                  &qdev->tx_ring_shadow_reg_dma);
2283         if (qdev->tx_ring_shadow_reg_area == NULL) {
2284                 QPRINTK(qdev, IFUP, ERR,
2285                         "Allocation of TX shadow space failed.\n");
2286                 goto err_wqp_sh_area;
2287         }
2288         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2289         return 0;
2290
2291 err_wqp_sh_area:
2292         pci_free_consistent(qdev->pdev,
2293                             PAGE_SIZE,
2294                             qdev->rx_ring_shadow_reg_area,
2295                             qdev->rx_ring_shadow_reg_dma);
2296         return -ENOMEM;
2297 }
2298
2299 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2300 {
2301         struct tx_ring_desc *tx_ring_desc;
2302         int i;
2303         struct ob_mac_iocb_req *mac_iocb_ptr;
2304
2305         mac_iocb_ptr = tx_ring->wq_base;
2306         tx_ring_desc = tx_ring->q;
2307         for (i = 0; i < tx_ring->wq_len; i++) {
2308                 tx_ring_desc->index = i;
2309                 tx_ring_desc->skb = NULL;
2310                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2311                 mac_iocb_ptr++;
2312                 tx_ring_desc++;
2313         }
2314         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2315         atomic_set(&tx_ring->queue_stopped, 0);
2316 }
2317
2318 static void ql_free_tx_resources(struct ql_adapter *qdev,
2319                                  struct tx_ring *tx_ring)
2320 {
2321         if (tx_ring->wq_base) {
2322                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2323                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2324                 tx_ring->wq_base = NULL;
2325         }
2326         kfree(tx_ring->q);
2327         tx_ring->q = NULL;
2328 }
2329
2330 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2331                                  struct tx_ring *tx_ring)
2332 {
2333         tx_ring->wq_base =
2334             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2335                                  &tx_ring->wq_base_dma);
2336
2337         if ((tx_ring->wq_base == NULL)
2338                 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2339                 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2340                 return -ENOMEM;
2341         }
2342         tx_ring->q =
2343             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2344         if (tx_ring->q == NULL)
2345                 goto err;
2346
2347         return 0;
2348 err:
2349         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2350                             tx_ring->wq_base, tx_ring->wq_base_dma);
2351         return -ENOMEM;
2352 }
2353
2354 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2355 {
2356         struct bq_desc *lbq_desc;
2357
2358         uint32_t  curr_idx, clean_idx;
2359
2360         curr_idx = rx_ring->lbq_curr_idx;
2361         clean_idx = rx_ring->lbq_clean_idx;
2362         while (curr_idx != clean_idx) {
2363                 lbq_desc = &rx_ring->lbq[curr_idx];
2364
2365                 if (lbq_desc->p.pg_chunk.last_flag) {
2366                         pci_unmap_page(qdev->pdev,
2367                                 lbq_desc->p.pg_chunk.map,
2368                                 ql_lbq_block_size(qdev),
2369                                        PCI_DMA_FROMDEVICE);
2370                         lbq_desc->p.pg_chunk.last_flag = 0;
2371                 }
2372
2373                 put_page(lbq_desc->p.pg_chunk.page);
2374                 lbq_desc->p.pg_chunk.page = NULL;
2375
2376                 if (++curr_idx == rx_ring->lbq_len)
2377                         curr_idx = 0;
2378
2379         }
2380 }
2381
2382 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2383 {
2384         int i;
2385         struct bq_desc *sbq_desc;
2386
2387         for (i = 0; i < rx_ring->sbq_len; i++) {
2388                 sbq_desc = &rx_ring->sbq[i];
2389                 if (sbq_desc == NULL) {
2390                         QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2391                         return;
2392                 }
2393                 if (sbq_desc->p.skb) {
2394                         pci_unmap_single(qdev->pdev,
2395                                          pci_unmap_addr(sbq_desc, mapaddr),
2396                                          pci_unmap_len(sbq_desc, maplen),
2397                                          PCI_DMA_FROMDEVICE);
2398                         dev_kfree_skb(sbq_desc->p.skb);
2399                         sbq_desc->p.skb = NULL;
2400                 }
2401         }
2402 }
2403
2404 /* Free all large and small rx buffers associated
2405  * with the completion queues for this device.
2406  */
2407 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2408 {
2409         int i;
2410         struct rx_ring *rx_ring;
2411
2412         for (i = 0; i < qdev->rx_ring_count; i++) {
2413                 rx_ring = &qdev->rx_ring[i];
2414                 if (rx_ring->lbq)
2415                         ql_free_lbq_buffers(qdev, rx_ring);
2416                 if (rx_ring->sbq)
2417                         ql_free_sbq_buffers(qdev, rx_ring);
2418         }
2419 }
2420
2421 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2422 {
2423         struct rx_ring *rx_ring;
2424         int i;
2425
2426         for (i = 0; i < qdev->rx_ring_count; i++) {
2427                 rx_ring = &qdev->rx_ring[i];
2428                 if (rx_ring->type != TX_Q)
2429                         ql_update_buffer_queues(qdev, rx_ring);
2430         }
2431 }
2432
2433 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2434                                 struct rx_ring *rx_ring)
2435 {
2436         int i;
2437         struct bq_desc *lbq_desc;
2438         __le64 *bq = rx_ring->lbq_base;
2439
2440         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2441         for (i = 0; i < rx_ring->lbq_len; i++) {
2442                 lbq_desc = &rx_ring->lbq[i];
2443                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2444                 lbq_desc->index = i;
2445                 lbq_desc->addr = bq;
2446                 bq++;
2447         }
2448 }
2449
2450 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2451                                 struct rx_ring *rx_ring)
2452 {
2453         int i;
2454         struct bq_desc *sbq_desc;
2455         __le64 *bq = rx_ring->sbq_base;
2456
2457         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2458         for (i = 0; i < rx_ring->sbq_len; i++) {
2459                 sbq_desc = &rx_ring->sbq[i];
2460                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2461                 sbq_desc->index = i;
2462                 sbq_desc->addr = bq;
2463                 bq++;
2464         }
2465 }
2466
2467 static void ql_free_rx_resources(struct ql_adapter *qdev,
2468                                  struct rx_ring *rx_ring)
2469 {
2470         /* Free the small buffer queue. */
2471         if (rx_ring->sbq_base) {
2472                 pci_free_consistent(qdev->pdev,
2473                                     rx_ring->sbq_size,
2474                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2475                 rx_ring->sbq_base = NULL;
2476         }
2477
2478         /* Free the small buffer queue control blocks. */
2479         kfree(rx_ring->sbq);
2480         rx_ring->sbq = NULL;
2481
2482         /* Free the large buffer queue. */
2483         if (rx_ring->lbq_base) {
2484                 pci_free_consistent(qdev->pdev,
2485                                     rx_ring->lbq_size,
2486                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2487                 rx_ring->lbq_base = NULL;
2488         }
2489
2490         /* Free the large buffer queue control blocks. */
2491         kfree(rx_ring->lbq);
2492         rx_ring->lbq = NULL;
2493
2494         /* Free the rx queue. */
2495         if (rx_ring->cq_base) {
2496                 pci_free_consistent(qdev->pdev,
2497                                     rx_ring->cq_size,
2498                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2499                 rx_ring->cq_base = NULL;
2500         }
2501 }
2502
2503 /* Allocate queues and buffers for this completions queue based
2504  * on the values in the parameter structure. */
2505 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2506                                  struct rx_ring *rx_ring)
2507 {
2508
2509         /*
2510          * Allocate the completion queue for this rx_ring.
2511          */
2512         rx_ring->cq_base =
2513             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2514                                  &rx_ring->cq_base_dma);
2515
2516         if (rx_ring->cq_base == NULL) {
2517                 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2518                 return -ENOMEM;
2519         }
2520
2521         if (rx_ring->sbq_len) {
2522                 /*
2523                  * Allocate small buffer queue.
2524                  */
2525                 rx_ring->sbq_base =
2526                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2527                                          &rx_ring->sbq_base_dma);
2528
2529                 if (rx_ring->sbq_base == NULL) {
2530                         QPRINTK(qdev, IFUP, ERR,
2531                                 "Small buffer queue allocation failed.\n");
2532                         goto err_mem;
2533                 }
2534
2535                 /*
2536                  * Allocate small buffer queue control blocks.
2537                  */
2538                 rx_ring->sbq =
2539                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2540                             GFP_KERNEL);
2541                 if (rx_ring->sbq == NULL) {
2542                         QPRINTK(qdev, IFUP, ERR,
2543                                 "Small buffer queue control block allocation failed.\n");
2544                         goto err_mem;
2545                 }
2546
2547                 ql_init_sbq_ring(qdev, rx_ring);
2548         }
2549
2550         if (rx_ring->lbq_len) {
2551                 /*
2552                  * Allocate large buffer queue.
2553                  */
2554                 rx_ring->lbq_base =
2555                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2556                                          &rx_ring->lbq_base_dma);
2557
2558                 if (rx_ring->lbq_base == NULL) {
2559                         QPRINTK(qdev, IFUP, ERR,
2560                                 "Large buffer queue allocation failed.\n");
2561                         goto err_mem;
2562                 }
2563                 /*
2564                  * Allocate large buffer queue control blocks.
2565                  */
2566                 rx_ring->lbq =
2567                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2568                             GFP_KERNEL);
2569                 if (rx_ring->lbq == NULL) {
2570                         QPRINTK(qdev, IFUP, ERR,
2571                                 "Large buffer queue control block allocation failed.\n");
2572                         goto err_mem;
2573                 }
2574
2575                 ql_init_lbq_ring(qdev, rx_ring);
2576         }
2577
2578         return 0;
2579
2580 err_mem:
2581         ql_free_rx_resources(qdev, rx_ring);
2582         return -ENOMEM;
2583 }
2584
2585 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2586 {
2587         struct tx_ring *tx_ring;
2588         struct tx_ring_desc *tx_ring_desc;
2589         int i, j;
2590
2591         /*
2592          * Loop through all queues and free
2593          * any resources.
2594          */
2595         for (j = 0; j < qdev->tx_ring_count; j++) {
2596                 tx_ring = &qdev->tx_ring[j];
2597                 for (i = 0; i < tx_ring->wq_len; i++) {
2598                         tx_ring_desc = &tx_ring->q[i];
2599                         if (tx_ring_desc && tx_ring_desc->skb) {
2600                                 QPRINTK(qdev, IFDOWN, ERR,
2601                                 "Freeing lost SKB %p, from queue %d, index %d.\n",
2602                                         tx_ring_desc->skb, j,
2603                                         tx_ring_desc->index);
2604                                 ql_unmap_send(qdev, tx_ring_desc,
2605                                               tx_ring_desc->map_cnt);
2606                                 dev_kfree_skb(tx_ring_desc->skb);
2607                                 tx_ring_desc->skb = NULL;
2608                         }
2609                 }
2610         }
2611 }
2612
2613 static void ql_free_mem_resources(struct ql_adapter *qdev)
2614 {
2615         int i;
2616
2617         for (i = 0; i < qdev->tx_ring_count; i++)
2618                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2619         for (i = 0; i < qdev->rx_ring_count; i++)
2620                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2621         ql_free_shadow_space(qdev);
2622 }
2623
2624 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2625 {
2626         int i;
2627
2628         /* Allocate space for our shadow registers and such. */
2629         if (ql_alloc_shadow_space(qdev))
2630                 return -ENOMEM;
2631
2632         for (i = 0; i < qdev->rx_ring_count; i++) {
2633                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2634                         QPRINTK(qdev, IFUP, ERR,
2635                                 "RX resource allocation failed.\n");
2636                         goto err_mem;
2637                 }
2638         }
2639         /* Allocate tx queue resources */
2640         for (i = 0; i < qdev->tx_ring_count; i++) {
2641                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2642                         QPRINTK(qdev, IFUP, ERR,
2643                                 "TX resource allocation failed.\n");
2644                         goto err_mem;
2645                 }
2646         }
2647         return 0;
2648
2649 err_mem:
2650         ql_free_mem_resources(qdev);
2651         return -ENOMEM;
2652 }
2653
2654 /* Set up the rx ring control block and pass it to the chip.
2655  * The control block is defined as
2656  * "Completion Queue Initialization Control Block", or cqicb.
2657  */
2658 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2659 {
2660         struct cqicb *cqicb = &rx_ring->cqicb;
2661         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2662                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2663         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2664                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2665         void __iomem *doorbell_area =
2666             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2667         int err = 0;
2668         u16 bq_len;
2669         u64 tmp;
2670         __le64 *base_indirect_ptr;
2671         int page_entries;
2672
2673         /* Set up the shadow registers for this ring. */
2674         rx_ring->prod_idx_sh_reg = shadow_reg;
2675         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2676         *rx_ring->prod_idx_sh_reg = 0;
2677         shadow_reg += sizeof(u64);
2678         shadow_reg_dma += sizeof(u64);
2679         rx_ring->lbq_base_indirect = shadow_reg;
2680         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2681         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2682         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2683         rx_ring->sbq_base_indirect = shadow_reg;
2684         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2685
2686         /* PCI doorbell mem area + 0x00 for consumer index register */
2687         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
2688         rx_ring->cnsmr_idx = 0;
2689         rx_ring->curr_entry = rx_ring->cq_base;
2690
2691         /* PCI doorbell mem area + 0x04 for valid register */
2692         rx_ring->valid_db_reg = doorbell_area + 0x04;
2693
2694         /* PCI doorbell mem area + 0x18 for large buffer consumer */
2695         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
2696
2697         /* PCI doorbell mem area + 0x1c */
2698         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
2699
2700         memset((void *)cqicb, 0, sizeof(struct cqicb));
2701         cqicb->msix_vect = rx_ring->irq;
2702
2703         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2704         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
2705
2706         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2707
2708         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2709
2710         /*
2711          * Set up the control block load flags.
2712          */
2713         cqicb->flags = FLAGS_LC |       /* Load queue base address */
2714             FLAGS_LV |          /* Load MSI-X vector */
2715             FLAGS_LI;           /* Load irq delay values */
2716         if (rx_ring->lbq_len) {
2717                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
2718                 tmp = (u64)rx_ring->lbq_base_dma;
2719                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2720                 page_entries = 0;
2721                 do {
2722                         *base_indirect_ptr = cpu_to_le64(tmp);
2723                         tmp += DB_PAGE_SIZE;
2724                         base_indirect_ptr++;
2725                         page_entries++;
2726                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2727                 cqicb->lbq_addr =
2728                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2729                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2730                         (u16) rx_ring->lbq_buf_size;
2731                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2732                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2733                         (u16) rx_ring->lbq_len;
2734                 cqicb->lbq_len = cpu_to_le16(bq_len);
2735                 rx_ring->lbq_prod_idx = 0;
2736                 rx_ring->lbq_curr_idx = 0;
2737                 rx_ring->lbq_clean_idx = 0;
2738                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
2739         }
2740         if (rx_ring->sbq_len) {
2741                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
2742                 tmp = (u64)rx_ring->sbq_base_dma;
2743                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2744                 page_entries = 0;
2745                 do {
2746                         *base_indirect_ptr = cpu_to_le64(tmp);
2747                         tmp += DB_PAGE_SIZE;
2748                         base_indirect_ptr++;
2749                         page_entries++;
2750                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
2751                 cqicb->sbq_addr =
2752                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2753                 cqicb->sbq_buf_size =
2754                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
2755                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2756                         (u16) rx_ring->sbq_len;
2757                 cqicb->sbq_len = cpu_to_le16(bq_len);
2758                 rx_ring->sbq_prod_idx = 0;
2759                 rx_ring->sbq_curr_idx = 0;
2760                 rx_ring->sbq_clean_idx = 0;
2761                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
2762         }
2763         switch (rx_ring->type) {
2764         case TX_Q:
2765                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2766                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2767                 break;
2768         case RX_Q:
2769                 /* Inbound completion handling rx_rings run in
2770                  * separate NAPI contexts.
2771                  */
2772                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2773                                64);
2774                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2775                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2776                 break;
2777         default:
2778                 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2779                         rx_ring->type);
2780         }
2781         QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
2782         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2783                            CFG_LCQ, rx_ring->cq_id);
2784         if (err) {
2785                 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2786                 return err;
2787         }
2788         return err;
2789 }
2790
2791 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2792 {
2793         struct wqicb *wqicb = (struct wqicb *)tx_ring;
2794         void __iomem *doorbell_area =
2795             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2796         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2797             (tx_ring->wq_id * sizeof(u64));
2798         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2799             (tx_ring->wq_id * sizeof(u64));
2800         int err = 0;
2801
2802         /*
2803          * Assign doorbell registers for this tx_ring.
2804          */
2805         /* TX PCI doorbell mem area for tx producer index */
2806         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
2807         tx_ring->prod_idx = 0;
2808         /* TX PCI doorbell mem area + 0x04 */
2809         tx_ring->valid_db_reg = doorbell_area + 0x04;
2810
2811         /*
2812          * Assign shadow registers for this tx_ring.
2813          */
2814         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2815         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2816
2817         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2818         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2819                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2820         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2821         wqicb->rid = 0;
2822         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
2823
2824         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
2825
2826         ql_init_tx_ring(qdev, tx_ring);
2827
2828         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
2829                            (u16) tx_ring->wq_id);
2830         if (err) {
2831                 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2832                 return err;
2833         }
2834         QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
2835         return err;
2836 }
2837
2838 static void ql_disable_msix(struct ql_adapter *qdev)
2839 {
2840         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2841                 pci_disable_msix(qdev->pdev);
2842                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2843                 kfree(qdev->msi_x_entry);
2844                 qdev->msi_x_entry = NULL;
2845         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2846                 pci_disable_msi(qdev->pdev);
2847                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2848         }
2849 }
2850
2851 /* We start by trying to get the number of vectors
2852  * stored in qdev->intr_count. If we don't get that
2853  * many then we reduce the count and try again.
2854  */
2855 static void ql_enable_msix(struct ql_adapter *qdev)
2856 {
2857         int i, err;
2858
2859         /* Get the MSIX vectors. */
2860         if (irq_type == MSIX_IRQ) {
2861                 /* Try to alloc space for the msix struct,
2862                  * if it fails then go to MSI/legacy.
2863                  */
2864                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
2865                                             sizeof(struct msix_entry),
2866                                             GFP_KERNEL);
2867                 if (!qdev->msi_x_entry) {
2868                         irq_type = MSI_IRQ;
2869                         goto msi;
2870                 }
2871
2872                 for (i = 0; i < qdev->intr_count; i++)
2873                         qdev->msi_x_entry[i].entry = i;
2874
2875                 /* Loop to get our vectors.  We start with
2876                  * what we want and settle for what we get.
2877                  */
2878                 do {
2879                         err = pci_enable_msix(qdev->pdev,
2880                                 qdev->msi_x_entry, qdev->intr_count);
2881                         if (err > 0)
2882                                 qdev->intr_count = err;
2883                 } while (err > 0);
2884
2885                 if (err < 0) {
2886                         kfree(qdev->msi_x_entry);
2887                         qdev->msi_x_entry = NULL;
2888                         QPRINTK(qdev, IFUP, WARNING,
2889                                 "MSI-X Enable failed, trying MSI.\n");
2890                         qdev->intr_count = 1;
2891                         irq_type = MSI_IRQ;
2892                 } else if (err == 0) {
2893                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
2894                         QPRINTK(qdev, IFUP, INFO,
2895                                 "MSI-X Enabled, got %d vectors.\n",
2896                                 qdev->intr_count);
2897                         return;
2898                 }
2899         }
2900 msi:
2901         qdev->intr_count = 1;
2902         if (irq_type == MSI_IRQ) {
2903                 if (!pci_enable_msi(qdev->pdev)) {
2904                         set_bit(QL_MSI_ENABLED, &qdev->flags);
2905                         QPRINTK(qdev, IFUP, INFO,
2906                                 "Running with MSI interrupts.\n");
2907                         return;
2908                 }
2909         }
2910         irq_type = LEG_IRQ;
2911         QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2912 }
2913
2914 /* Each vector services 1 RSS ring and and 1 or more
2915  * TX completion rings.  This function loops through
2916  * the TX completion rings and assigns the vector that
2917  * will service it.  An example would be if there are
2918  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2919  * This would mean that vector 0 would service RSS ring 0
2920  * and TX competion rings 0,1,2 and 3.  Vector 1 would
2921  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2922  */
2923 static void ql_set_tx_vect(struct ql_adapter *qdev)
2924 {
2925         int i, j, vect;
2926         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2927
2928         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2929                 /* Assign irq vectors to TX rx_rings.*/
2930                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2931                                          i < qdev->rx_ring_count; i++) {
2932                         if (j == tx_rings_per_vector) {
2933                                 vect++;
2934                                 j = 0;
2935                         }
2936                         qdev->rx_ring[i].irq = vect;
2937                         j++;
2938                 }
2939         } else {
2940                 /* For single vector all rings have an irq
2941                  * of zero.
2942                  */
2943                 for (i = 0; i < qdev->rx_ring_count; i++)
2944                         qdev->rx_ring[i].irq = 0;
2945         }
2946 }
2947
2948 /* Set the interrupt mask for this vector.  Each vector
2949  * will service 1 RSS ring and 1 or more TX completion
2950  * rings.  This function sets up a bit mask per vector
2951  * that indicates which rings it services.
2952  */
2953 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2954 {
2955         int j, vect = ctx->intr;
2956         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2957
2958         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2959                 /* Add the RSS ring serviced by this vector
2960                  * to the mask.
2961                  */
2962                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2963                 /* Add the TX ring(s) serviced by this vector
2964                  * to the mask. */
2965                 for (j = 0; j < tx_rings_per_vector; j++) {
2966                         ctx->irq_mask |=
2967                         (1 << qdev->rx_ring[qdev->rss_ring_count +
2968                         (vect * tx_rings_per_vector) + j].cq_id);
2969                 }
2970         } else {
2971                 /* For single vector we just shift each queue's
2972                  * ID into the mask.
2973                  */
2974                 for (j = 0; j < qdev->rx_ring_count; j++)
2975                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2976         }
2977 }
2978
2979 /*
2980  * Here we build the intr_context structures based on
2981  * our rx_ring count and intr vector count.
2982  * The intr_context structure is used to hook each vector
2983  * to possibly different handlers.
2984  */
2985 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2986 {
2987         int i = 0;
2988         struct intr_context *intr_context = &qdev->intr_context[0];
2989
2990         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2991                 /* Each rx_ring has it's
2992                  * own intr_context since we have separate
2993                  * vectors for each queue.
2994                  */
2995                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2996                         qdev->rx_ring[i].irq = i;
2997                         intr_context->intr = i;
2998                         intr_context->qdev = qdev;
2999                         /* Set up this vector's bit-mask that indicates
3000                          * which queues it services.
3001                          */
3002                         ql_set_irq_mask(qdev, intr_context);
3003                         /*
3004                          * We set up each vectors enable/disable/read bits so
3005                          * there's no bit/mask calculations in the critical path.
3006                          */
3007                         intr_context->intr_en_mask =
3008                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3009                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3010                             | i;
3011                         intr_context->intr_dis_mask =
3012                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3013                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3014                             INTR_EN_IHD | i;
3015                         intr_context->intr_read_mask =
3016                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3017                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3018                             i;
3019                         if (i == 0) {
3020                                 /* The first vector/queue handles
3021                                  * broadcast/multicast, fatal errors,
3022                                  * and firmware events.  This in addition
3023                                  * to normal inbound NAPI processing.
3024                                  */
3025                                 intr_context->handler = qlge_isr;
3026                                 sprintf(intr_context->name, "%s-rx-%d",
3027                                         qdev->ndev->name, i);
3028                         } else {
3029                                 /*
3030                                  * Inbound queues handle unicast frames only.
3031                                  */
3032                                 intr_context->handler = qlge_msix_rx_isr;
3033                                 sprintf(intr_context->name, "%s-rx-%d",
3034                                         qdev->ndev->name, i);
3035                         }
3036                 }
3037         } else {
3038                 /*
3039                  * All rx_rings use the same intr_context since
3040                  * there is only one vector.
3041                  */
3042                 intr_context->intr = 0;
3043                 intr_context->qdev = qdev;
3044                 /*
3045                  * We set up each vectors enable/disable/read bits so
3046                  * there's no bit/mask calculations in the critical path.
3047                  */
3048                 intr_context->intr_en_mask =
3049                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3050                 intr_context->intr_dis_mask =
3051                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3052                     INTR_EN_TYPE_DISABLE;
3053                 intr_context->intr_read_mask =
3054                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3055                 /*
3056                  * Single interrupt means one handler for all rings.
3057                  */
3058                 intr_context->handler = qlge_isr;
3059                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3060                 /* Set up this vector's bit-mask that indicates
3061                  * which queues it services. In this case there is
3062                  * a single vector so it will service all RSS and
3063                  * TX completion rings.
3064                  */
3065                 ql_set_irq_mask(qdev, intr_context);
3066         }
3067         /* Tell the TX completion rings which MSIx vector
3068          * they will be using.
3069          */
3070         ql_set_tx_vect(qdev);
3071 }
3072
3073 static void ql_free_irq(struct ql_adapter *qdev)
3074 {
3075         int i;
3076         struct intr_context *intr_context = &qdev->intr_context[0];
3077
3078         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3079                 if (intr_context->hooked) {
3080                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3081                                 free_irq(qdev->msi_x_entry[i].vector,
3082                                          &qdev->rx_ring[i]);
3083                                 QPRINTK(qdev, IFDOWN, DEBUG,
3084                                         "freeing msix interrupt %d.\n", i);
3085                         } else {
3086                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3087                                 QPRINTK(qdev, IFDOWN, DEBUG,
3088                                         "freeing msi interrupt %d.\n", i);
3089                         }
3090                 }
3091         }
3092         ql_disable_msix(qdev);
3093 }
3094
3095 static int ql_request_irq(struct ql_adapter *qdev)
3096 {
3097         int i;
3098         int status = 0;
3099         struct pci_dev *pdev = qdev->pdev;
3100         struct intr_context *intr_context = &qdev->intr_context[0];
3101
3102         ql_resolve_queues_to_irqs(qdev);
3103
3104         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3105                 atomic_set(&intr_context->irq_cnt, 0);
3106                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3107                         status = request_irq(qdev->msi_x_entry[i].vector,
3108                                              intr_context->handler,
3109                                              0,
3110                                              intr_context->name,
3111                                              &qdev->rx_ring[i]);
3112                         if (status) {
3113                                 QPRINTK(qdev, IFUP, ERR,
3114                                         "Failed request for MSIX interrupt %d.\n",
3115                                         i);
3116                                 goto err_irq;
3117                         } else {
3118                                 QPRINTK(qdev, IFUP, DEBUG,
3119                                         "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3120                                         i,
3121                                         qdev->rx_ring[i].type ==
3122                                         DEFAULT_Q ? "DEFAULT_Q" : "",
3123                                         qdev->rx_ring[i].type ==
3124                                         TX_Q ? "TX_Q" : "",
3125                                         qdev->rx_ring[i].type ==
3126                                         RX_Q ? "RX_Q" : "", intr_context->name);
3127                         }
3128                 } else {
3129                         QPRINTK(qdev, IFUP, DEBUG,
3130                                 "trying msi or legacy interrupts.\n");
3131                         QPRINTK(qdev, IFUP, DEBUG,
3132                                 "%s: irq = %d.\n", __func__, pdev->irq);
3133                         QPRINTK(qdev, IFUP, DEBUG,
3134                                 "%s: context->name = %s.\n", __func__,
3135                                intr_context->name);
3136                         QPRINTK(qdev, IFUP, DEBUG,
3137                                 "%s: dev_id = 0x%p.\n", __func__,
3138                                &qdev->rx_ring[0]);
3139                         status =
3140                             request_irq(pdev->irq, qlge_isr,
3141                                         test_bit(QL_MSI_ENABLED,
3142                                                  &qdev->
3143                                                  flags) ? 0 : IRQF_SHARED,
3144                                         intr_context->name, &qdev->rx_ring[0]);
3145                         if (status)
3146                                 goto err_irq;
3147
3148                         QPRINTK(qdev, IFUP, ERR,
3149                                 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3150                                 i,
3151                                 qdev->rx_ring[0].type ==
3152                                 DEFAULT_Q ? "DEFAULT_Q" : "",
3153                                 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3154                                 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3155                                 intr_context->name);
3156                 }
3157                 intr_context->hooked = 1;
3158         }
3159         return status;
3160 err_irq:
3161         QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3162         ql_free_irq(qdev);
3163         return status;
3164 }
3165
3166 static int ql_start_rss(struct ql_adapter *qdev)
3167 {
3168         u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3169                                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3170                                 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3171                                 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3172                                 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3173                                 0xbe, 0xac, 0x01, 0xfa};
3174         struct ricb *ricb = &qdev->ricb;
3175         int status = 0;
3176         int i;
3177         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3178
3179         memset((void *)ricb, 0, sizeof(*ricb));
3180
3181         ricb->base_cq = RSS_L4K;
3182         ricb->flags =
3183                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3184         ricb->mask = cpu_to_le16((u16)(0x3ff));
3185
3186         /*
3187          * Fill out the Indirection Table.
3188          */
3189         for (i = 0; i < 1024; i++)
3190                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3191
3192         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3193         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3194
3195         QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3196
3197         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3198         if (status) {
3199                 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3200                 return status;
3201         }
3202         QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
3203         return status;
3204 }
3205
3206 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3207 {
3208         int i, status = 0;
3209
3210         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3211         if (status)
3212                 return status;
3213         /* Clear all the entries in the routing table. */
3214         for (i = 0; i < 16; i++) {
3215                 status = ql_set_routing_reg(qdev, i, 0, 0);
3216                 if (status) {
3217                         QPRINTK(qdev, IFUP, ERR,
3218                                 "Failed to init routing register for CAM "
3219                                 "packets.\n");
3220                         break;
3221                 }
3222         }
3223         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3224         return status;
3225 }
3226
3227 /* Initialize the frame-to-queue routing. */
3228 static int ql_route_initialize(struct ql_adapter *qdev)
3229 {
3230         int status = 0;
3231
3232         /* Clear all the entries in the routing table. */
3233         status = ql_clear_routing_entries(qdev);
3234         if (status)
3235                 return status;
3236
3237         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3238         if (status)
3239                 return status;
3240
3241         status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3242         if (status) {
3243                 QPRINTK(qdev, IFUP, ERR,
3244                         "Failed to init routing register for error packets.\n");
3245                 goto exit;
3246         }
3247         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3248         if (status) {
3249                 QPRINTK(qdev, IFUP, ERR,
3250                         "Failed to init routing register for broadcast packets.\n");
3251                 goto exit;
3252         }
3253         /* If we have more than one inbound queue, then turn on RSS in the
3254          * routing block.
3255          */
3256         if (qdev->rss_ring_count > 1) {
3257                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3258                                         RT_IDX_RSS_MATCH, 1);
3259                 if (status) {
3260                         QPRINTK(qdev, IFUP, ERR,
3261                                 "Failed to init routing register for MATCH RSS packets.\n");
3262                         goto exit;
3263                 }
3264         }
3265
3266         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3267                                     RT_IDX_CAM_HIT, 1);
3268         if (status)
3269                 QPRINTK(qdev, IFUP, ERR,
3270                         "Failed to init routing register for CAM packets.\n");
3271 exit:
3272         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3273         return status;
3274 }
3275
3276 int ql_cam_route_initialize(struct ql_adapter *qdev)
3277 {
3278         int status, set;
3279
3280         /* If check if the link is up and use to
3281          * determine if we are setting or clearing
3282          * the MAC address in the CAM.
3283          */
3284         set = ql_read32(qdev, STS);
3285         set &= qdev->port_link_up;
3286         status = ql_set_mac_addr(qdev, set);
3287         if (status) {
3288                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3289                 return status;
3290         }
3291
3292         status = ql_route_initialize(qdev);
3293         if (status)
3294                 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3295
3296         return status;
3297 }
3298
3299 static int ql_adapter_initialize(struct ql_adapter *qdev)
3300 {
3301         u32 value, mask;
3302         int i;
3303         int status = 0;
3304
3305         /*
3306          * Set up the System register to halt on errors.
3307          */
3308         value = SYS_EFE | SYS_FAE;
3309         mask = value << 16;
3310         ql_write32(qdev, SYS, mask | value);
3311
3312         /* Set the default queue, and VLAN behavior. */
3313         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3314         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3315         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3316
3317         /* Set the MPI interrupt to enabled. */
3318         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3319
3320         /* Enable the function, set pagesize, enable error checking. */
3321         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3322             FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3323
3324         /* Set/clear header splitting. */
3325         mask = FSC_VM_PAGESIZE_MASK |
3326             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3327         ql_write32(qdev, FSC, mask | value);
3328
3329         ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3330                 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
3331
3332         /* Set RX packet routing to use port/pci function on which the
3333          * packet arrived on in addition to usual frame routing.
3334          * This is helpful on bonding where both interfaces can have
3335          * the same MAC address.
3336          */
3337         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3338
3339         /* Start up the rx queues. */
3340         for (i = 0; i < qdev->rx_ring_count; i++) {
3341                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3342                 if (status) {
3343                         QPRINTK(qdev, IFUP, ERR,
3344                                 "Failed to start rx ring[%d].\n", i);
3345                         return status;
3346                 }
3347         }
3348
3349         /* If there is more than one inbound completion queue
3350          * then download a RICB to configure RSS.
3351          */
3352         if (qdev->rss_ring_count > 1) {
3353                 status = ql_start_rss(qdev);
3354                 if (status) {
3355                         QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3356                         return status;
3357                 }
3358         }
3359
3360         /* Start up the tx queues. */
3361         for (i = 0; i < qdev->tx_ring_count; i++) {
3362                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3363                 if (status) {
3364                         QPRINTK(qdev, IFUP, ERR,
3365                                 "Failed to start tx ring[%d].\n", i);
3366                         return status;
3367                 }
3368         }
3369
3370         /* Initialize the port and set the max framesize. */
3371         status = qdev->nic_ops->port_initialize(qdev);
3372         if (status)
3373                 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3374
3375         /* Set up the MAC address and frame routing filter. */
3376         status = ql_cam_route_initialize(qdev);
3377         if (status) {
3378                 QPRINTK(qdev, IFUP, ERR,
3379                                 "Failed to init CAM/Routing tables.\n");
3380                 return status;
3381         }
3382
3383         /* Start NAPI for the RSS queues. */
3384         for (i = 0; i < qdev->rss_ring_count; i++) {
3385                 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
3386                         i);
3387                 napi_enable(&qdev->rx_ring[i].napi);
3388         }
3389
3390         return status;
3391 }
3392
3393 /* Issue soft reset to chip. */
3394 static int ql_adapter_reset(struct ql_adapter *qdev)
3395 {
3396         u32 value;
3397         int status = 0;
3398         unsigned long end_jiffies;
3399
3400         /* Clear all the entries in the routing table. */
3401         status = ql_clear_routing_entries(qdev);
3402         if (status) {
3403                 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3404                 return status;
3405         }
3406
3407         end_jiffies = jiffies +
3408                 max((unsigned long)1, usecs_to_jiffies(30));
3409
3410         /* Stop management traffic. */
3411         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3412
3413         /* Wait for the NIC and MGMNT FIFOs to empty. */
3414         ql_wait_fifo_empty(qdev);
3415
3416         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3417
3418         do {
3419                 value = ql_read32(qdev, RST_FO);
3420                 if ((value & RST_FO_FR) == 0)
3421                         break;
3422                 cpu_relax();
3423         } while (time_before(jiffies, end_jiffies));
3424
3425         if (value & RST_FO_FR) {
3426                 QPRINTK(qdev, IFDOWN, ERR,
3427                         "ETIMEDOUT!!! errored out of resetting the chip!\n");
3428                 status = -ETIMEDOUT;
3429         }
3430
3431         /* Resume management traffic. */
3432         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3433         return status;
3434 }
3435
3436 static void ql_display_dev_info(struct net_device *ndev)
3437 {
3438         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3439
3440         QPRINTK(qdev, PROBE, INFO,
3441                 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3442                 "XG Roll = %d, XG Rev = %d.\n",
3443                 qdev->func,
3444                 qdev->port,
3445                 qdev->chip_rev_id & 0x0000000f,
3446                 qdev->chip_rev_id >> 4 & 0x0000000f,
3447                 qdev->chip_rev_id >> 8 & 0x0000000f,
3448                 qdev->chip_rev_id >> 12 & 0x0000000f);
3449         QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3450 }
3451
3452 static int ql_adapter_down(struct ql_adapter *qdev)
3453 {
3454         int i, status = 0;
3455
3456         ql_link_off(qdev);
3457
3458         /* Don't kill the reset worker thread if we
3459          * are in the process of recovery.
3460          */
3461         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3462                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3463         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3464         cancel_delayed_work_sync(&qdev->mpi_work);
3465         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3466         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3467
3468         for (i = 0; i < qdev->rss_ring_count; i++)
3469                 napi_disable(&qdev->rx_ring[i].napi);
3470
3471         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3472
3473         ql_disable_interrupts(qdev);
3474
3475         ql_tx_ring_clean(qdev);
3476
3477         /* Call netif_napi_del() from common point.
3478          */
3479         for (i = 0; i < qdev->rss_ring_count; i++)
3480                 netif_napi_del(&qdev->rx_ring[i].napi);
3481
3482         ql_free_rx_buffers(qdev);
3483
3484         status = ql_adapter_reset(qdev);
3485         if (status)
3486                 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3487                         qdev->func);
3488         return status;
3489 }
3490
3491 static int ql_adapter_up(struct ql_adapter *qdev)
3492 {
3493         int err = 0;
3494
3495         err = ql_adapter_initialize(qdev);
3496         if (err) {
3497                 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3498                 goto err_init;
3499         }
3500         set_bit(QL_ADAPTER_UP, &qdev->flags);
3501         ql_alloc_rx_buffers(qdev);
3502         /* If the port is initialized and the
3503          * link is up the turn on the carrier.
3504          */
3505         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3506                         (ql_read32(qdev, STS) & qdev->port_link_up))
3507                 ql_link_on(qdev);
3508         ql_enable_interrupts(qdev);
3509         ql_enable_all_completion_interrupts(qdev);
3510         netif_tx_start_all_queues(qdev->ndev);
3511
3512         return 0;
3513 err_init:
3514         ql_adapter_reset(qdev);
3515         return err;
3516 }
3517
3518 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3519 {
3520         ql_free_mem_resources(qdev);
3521         ql_free_irq(qdev);
3522 }
3523
3524 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3525 {
3526         int status = 0;
3527
3528         if (ql_alloc_mem_resources(qdev)) {
3529                 QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
3530                 return -ENOMEM;
3531         }
3532         status = ql_request_irq(qdev);
3533         return status;
3534 }
3535
3536 static int qlge_close(struct net_device *ndev)
3537 {
3538         struct ql_adapter *qdev = netdev_priv(ndev);
3539
3540         /*
3541          * Wait for device to recover from a reset.
3542          * (Rarely happens, but possible.)
3543          */
3544         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3545                 msleep(1);
3546         ql_adapter_down(qdev);
3547         ql_release_adapter_resources(qdev);
3548         return 0;
3549 }
3550
3551 static int ql_configure_rings(struct ql_adapter *qdev)
3552 {
3553         int i;
3554         struct rx_ring *rx_ring;
3555         struct tx_ring *tx_ring;
3556         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3557         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3558                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3559
3560         qdev->lbq_buf_order = get_order(lbq_buf_len);
3561
3562         /* In a perfect world we have one RSS ring for each CPU
3563          * and each has it's own vector.  To do that we ask for
3564          * cpu_cnt vectors.  ql_enable_msix() will adjust the
3565          * vector count to what we actually get.  We then
3566          * allocate an RSS ring for each.
3567          * Essentially, we are doing min(cpu_count, msix_vector_count).
3568          */
3569         qdev->intr_count = cpu_cnt;
3570         ql_enable_msix(qdev);
3571         /* Adjust the RSS ring count to the actual vector count. */
3572         qdev->rss_ring_count = qdev->intr_count;
3573         qdev->tx_ring_count = cpu_cnt;
3574         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3575
3576         for (i = 0; i < qdev->tx_ring_count; i++) {
3577                 tx_ring = &qdev->tx_ring[i];
3578                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3579                 tx_ring->qdev = qdev;
3580                 tx_ring->wq_id = i;
3581                 tx_ring->wq_len = qdev->tx_ring_size;
3582                 tx_ring->wq_size =
3583                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3584
3585                 /*
3586                  * The completion queue ID for the tx rings start
3587                  * immediately after the rss rings.
3588                  */
3589                 tx_ring->cq_id = qdev->rss_ring_count + i;
3590         }
3591
3592         for (i = 0; i < qdev->rx_ring_count; i++) {
3593                 rx_ring = &qdev->rx_ring[i];
3594                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3595                 rx_ring->qdev = qdev;
3596                 rx_ring->cq_id = i;
3597                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
3598                 if (i < qdev->rss_ring_count) {
3599                         /*
3600                          * Inbound (RSS) queues.
3601                          */
3602                         rx_ring->cq_len = qdev->rx_ring_size;
3603                         rx_ring->cq_size =
3604                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3605                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3606                         rx_ring->lbq_size =
3607                             rx_ring->lbq_len * sizeof(__le64);
3608                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3609                         QPRINTK(qdev, IFUP, DEBUG,
3610                                 "lbq_buf_size %d, order = %d\n",
3611                                 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
3612                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3613                         rx_ring->sbq_size =
3614                             rx_ring->sbq_len * sizeof(__le64);
3615                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
3616                         rx_ring->type = RX_Q;
3617                 } else {
3618                         /*
3619                          * Outbound queue handles outbound completions only.
3620                          */
3621                         /* outbound cq is same size as tx_ring it services. */
3622                         rx_ring->cq_len = qdev->tx_ring_size;
3623                         rx_ring->cq_size =
3624                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3625                         rx_ring->lbq_len = 0;
3626                         rx_ring->lbq_size = 0;
3627                         rx_ring->lbq_buf_size = 0;
3628                         rx_ring->sbq_len = 0;
3629                         rx_ring->sbq_size = 0;
3630                         rx_ring->sbq_buf_size = 0;
3631                         rx_ring->type = TX_Q;
3632                 }
3633         }
3634         return 0;
3635 }
3636
3637 static int qlge_open(struct net_device *ndev)
3638 {
3639         int err = 0;
3640         struct ql_adapter *qdev = netdev_priv(ndev);
3641
3642         err = ql_configure_rings(qdev);
3643         if (err)
3644                 return err;
3645
3646         err = ql_get_adapter_resources(qdev);
3647         if (err)
3648                 goto error_up;
3649
3650         err = ql_adapter_up(qdev);
3651         if (err)
3652                 goto error_up;
3653
3654         return err;
3655
3656 error_up:
3657         ql_release_adapter_resources(qdev);
3658         return err;
3659 }
3660
3661 static int ql_change_rx_buffers(struct ql_adapter *qdev)
3662 {
3663         struct rx_ring *rx_ring;
3664         int i, status;
3665         u32 lbq_buf_len;
3666
3667         /* Wait for an oustanding reset to complete. */
3668         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3669                 int i = 3;
3670                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3671                         QPRINTK(qdev, IFUP, ERR,
3672                                  "Waiting for adapter UP...\n");
3673                         ssleep(1);
3674                 }
3675
3676                 if (!i) {
3677                         QPRINTK(qdev, IFUP, ERR,
3678                          "Timed out waiting for adapter UP\n");
3679                         return -ETIMEDOUT;
3680                 }
3681         }
3682
3683         status = ql_adapter_down(qdev);
3684         if (status)
3685                 goto error;
3686
3687         /* Get the new rx buffer size. */
3688         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3689                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3690         qdev->lbq_buf_order = get_order(lbq_buf_len);
3691
3692         for (i = 0; i < qdev->rss_ring_count; i++) {
3693                 rx_ring = &qdev->rx_ring[i];
3694                 /* Set the new size. */
3695                 rx_ring->lbq_buf_size = lbq_buf_len;
3696         }
3697
3698         status = ql_adapter_up(qdev);
3699         if (status)
3700                 goto error;
3701
3702         return status;
3703 error:
3704         QPRINTK(qdev, IFUP, ALERT,
3705                 "Driver up/down cycle failed, closing device.\n");
3706         set_bit(QL_ADAPTER_UP, &qdev->flags);
3707         dev_close(qdev->ndev);
3708         return status;
3709 }
3710
3711 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3712 {
3713         struct ql_adapter *qdev = netdev_priv(ndev);
3714         int status;
3715
3716         if (ndev->mtu == 1500 && new_mtu == 9000) {
3717                 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3718         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3719                 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3720         } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3721                    (ndev->mtu == 9000 && new_mtu == 9000)) {
3722                 return 0;
3723         } else
3724                 return -EINVAL;
3725
3726         queue_delayed_work(qdev->workqueue,
3727                         &qdev->mpi_port_cfg_work, 3*HZ);
3728
3729         if (!netif_running(qdev->ndev)) {
3730                 ndev->mtu = new_mtu;
3731                 return 0;
3732         }
3733
3734         ndev->mtu = new_mtu;
3735         status = ql_change_rx_buffers(qdev);
3736         if (status) {
3737                 QPRINTK(qdev, IFUP, ERR,
3738                         "Changing MTU failed.\n");
3739         }
3740
3741         return status;
3742 }
3743
3744 static struct net_device_stats *qlge_get_stats(struct net_device
3745                                                *ndev)
3746 {
3747         return &ndev->stats;
3748 }
3749
3750 static void qlge_set_multicast_list(struct net_device *ndev)
3751 {
3752         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3753         struct dev_mc_list *mc_ptr;
3754         int i, status;
3755
3756         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3757         if (status)
3758                 return;
3759         /*
3760          * Set or clear promiscuous mode if a
3761          * transition is taking place.
3762          */
3763         if (ndev->flags & IFF_PROMISC) {
3764                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3765                         if (ql_set_routing_reg
3766                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3767                                 QPRINTK(qdev, HW, ERR,
3768                                         "Failed to set promiscous mode.\n");
3769                         } else {
3770                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
3771                         }
3772                 }
3773         } else {
3774                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3775                         if (ql_set_routing_reg
3776                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3777                                 QPRINTK(qdev, HW, ERR,
3778                                         "Failed to clear promiscous mode.\n");
3779                         } else {
3780                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3781                         }
3782                 }
3783         }
3784
3785         /*
3786          * Set or clear all multicast mode if a
3787          * transition is taking place.
3788          */
3789         if ((ndev->flags & IFF_ALLMULTI) ||
3790             (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3791                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3792                         if (ql_set_routing_reg
3793                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3794                                 QPRINTK(qdev, HW, ERR,
3795                                         "Failed to set all-multi mode.\n");
3796                         } else {
3797                                 set_bit(QL_ALLMULTI, &qdev->flags);
3798                         }
3799                 }
3800         } else {
3801                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3802                         if (ql_set_routing_reg
3803                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3804                                 QPRINTK(qdev, HW, ERR,
3805                                         "Failed to clear all-multi mode.\n");
3806                         } else {
3807                                 clear_bit(QL_ALLMULTI, &qdev->flags);
3808                         }
3809                 }
3810         }
3811
3812         if (ndev->mc_count) {
3813                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3814                 if (status)
3815                         goto exit;
3816                 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3817                      i++, mc_ptr = mc_ptr->next)
3818                         if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3819                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3820                                 QPRINTK(qdev, HW, ERR,
3821                                         "Failed to loadmulticast address.\n");
3822                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3823                                 goto exit;
3824                         }
3825                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3826                 if (ql_set_routing_reg
3827                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3828                         QPRINTK(qdev, HW, ERR,
3829                                 "Failed to set multicast match mode.\n");
3830                 } else {
3831                         set_bit(QL_ALLMULTI, &qdev->flags);
3832                 }
3833         }
3834 exit:
3835         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3836 }
3837
3838 static int qlge_set_mac_address(struct net_device *ndev, void *p)
3839 {
3840         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3841         struct sockaddr *addr = p;
3842         int status;
3843
3844         if (netif_running(ndev))
3845                 return -EBUSY;
3846
3847         if (!is_valid_ether_addr(addr->sa_data))
3848                 return -EADDRNOTAVAIL;
3849         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3850
3851         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3852         if (status)
3853                 return status;
3854         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3855                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3856         if (status)
3857                 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3858         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3859         return status;
3860 }
3861
3862 static void qlge_tx_timeout(struct net_device *ndev)
3863 {
3864         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3865         ql_queue_asic_error(qdev);
3866 }
3867
3868 static void ql_asic_reset_work(struct work_struct *work)
3869 {
3870         struct ql_adapter *qdev =
3871             container_of(work, struct ql_adapter, asic_reset_work.work);
3872         int status;
3873         rtnl_lock();
3874         status = ql_adapter_down(qdev);
3875         if (status)
3876                 goto error;
3877
3878         status = ql_adapter_up(qdev);
3879         if (status)
3880                 goto error;
3881
3882         /* Restore rx mode. */
3883         clear_bit(QL_ALLMULTI, &qdev->flags);
3884         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3885         qlge_set_multicast_list(qdev->ndev);
3886
3887         rtnl_unlock();
3888         return;
3889 error:
3890         QPRINTK(qdev, IFUP, ALERT,
3891                 "Driver up/down cycle failed, closing device\n");
3892
3893         set_bit(QL_ADAPTER_UP, &qdev->flags);
3894         dev_close(qdev->ndev);
3895         rtnl_unlock();
3896 }
3897
3898 static struct nic_operations qla8012_nic_ops = {
3899         .get_flash              = ql_get_8012_flash_params,
3900         .port_initialize        = ql_8012_port_initialize,
3901 };
3902
3903 static struct nic_operations qla8000_nic_ops = {
3904         .get_flash              = ql_get_8000_flash_params,
3905         .port_initialize        = ql_8000_port_initialize,
3906 };
3907
3908 /* Find the pcie function number for the other NIC
3909  * on this chip.  Since both NIC functions share a
3910  * common firmware we have the lowest enabled function
3911  * do any common work.  Examples would be resetting
3912  * after a fatal firmware error, or doing a firmware
3913  * coredump.
3914  */
3915 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3916 {
3917         int status = 0;
3918         u32 temp;
3919         u32 nic_func1, nic_func2;
3920
3921         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3922                         &temp);
3923         if (status)
3924                 return status;
3925
3926         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3927                         MPI_TEST_NIC_FUNC_MASK);
3928         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3929                         MPI_TEST_NIC_FUNC_MASK);
3930
3931         if (qdev->func == nic_func1)
3932                 qdev->alt_func = nic_func2;
3933         else if (qdev->func == nic_func2)
3934                 qdev->alt_func = nic_func1;
3935         else
3936                 status = -EIO;
3937
3938         return status;
3939 }
3940
3941 static int ql_get_board_info(struct ql_adapter *qdev)
3942 {
3943         int status;
3944         qdev->func =
3945             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3946         if (qdev->func > 3)
3947                 return -EIO;
3948
3949         status = ql_get_alt_pcie_func(qdev);
3950         if (status)
3951                 return status;
3952
3953         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3954         if (qdev->port) {
3955                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3956                 qdev->port_link_up = STS_PL1;
3957                 qdev->port_init = STS_PI1;
3958                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3959                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3960         } else {
3961                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3962                 qdev->port_link_up = STS_PL0;
3963                 qdev->port_init = STS_PI0;
3964                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3965                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3966         }
3967         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3968         qdev->device_id = qdev->pdev->device;
3969         if (qdev->device_id == QLGE_DEVICE_ID_8012)
3970                 qdev->nic_ops = &qla8012_nic_ops;
3971         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3972                 qdev->nic_ops = &qla8000_nic_ops;
3973         return status;
3974 }
3975
3976 static void ql_release_all(struct pci_dev *pdev)
3977 {
3978         struct net_device *ndev = pci_get_drvdata(pdev);
3979         struct ql_adapter *qdev = netdev_priv(ndev);
3980
3981         if (qdev->workqueue) {
3982                 destroy_workqueue(qdev->workqueue);
3983                 qdev->workqueue = NULL;
3984         }
3985
3986         if (qdev->reg_base)
3987                 iounmap(qdev->reg_base);
3988         if (qdev->doorbell_area)
3989                 iounmap(qdev->doorbell_area);
3990         pci_release_regions(pdev);
3991         pci_set_drvdata(pdev, NULL);
3992 }
3993
3994 static int __devinit ql_init_device(struct pci_dev *pdev,
3995                                     struct net_device *ndev, int cards_found)
3996 {
3997         struct ql_adapter *qdev = netdev_priv(ndev);
3998         int err = 0;
3999
4000         memset((void *)qdev, 0, sizeof(*qdev));
4001         err = pci_enable_device(pdev);
4002         if (err) {
4003                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4004                 return err;
4005         }
4006
4007         qdev->ndev = ndev;
4008         qdev->pdev = pdev;
4009         pci_set_drvdata(pdev, ndev);
4010
4011         /* Set PCIe read request size */
4012         err = pcie_set_readrq(pdev, 4096);
4013         if (err) {
4014                 dev_err(&pdev->dev, "Set readrq failed.\n");
4015                 goto err_out;
4016         }
4017
4018         err = pci_request_regions(pdev, DRV_NAME);
4019         if (err) {
4020                 dev_err(&pdev->dev, "PCI region request failed.\n");
4021                 return err;
4022         }
4023
4024         pci_set_master(pdev);
4025         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4026                 set_bit(QL_DMA64, &qdev->flags);
4027                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4028         } else {
4029                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4030                 if (!err)
4031                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4032         }
4033
4034         if (err) {
4035                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4036                 goto err_out;
4037         }
4038
4039         qdev->reg_base =
4040             ioremap_nocache(pci_resource_start(pdev, 1),
4041                             pci_resource_len(pdev, 1));
4042         if (!qdev->reg_base) {
4043                 dev_err(&pdev->dev, "Register mapping failed.\n");
4044                 err = -ENOMEM;
4045                 goto err_out;
4046         }
4047
4048         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4049         qdev->doorbell_area =
4050             ioremap_nocache(pci_resource_start(pdev, 3),
4051                             pci_resource_len(pdev, 3));
4052         if (!qdev->doorbell_area) {
4053                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4054                 err = -ENOMEM;
4055                 goto err_out;
4056         }
4057
4058         err = ql_get_board_info(qdev);
4059         if (err) {
4060                 dev_err(&pdev->dev, "Register access failed.\n");
4061                 err = -EIO;
4062                 goto err_out;
4063         }
4064         qdev->msg_enable = netif_msg_init(debug, default_msg);
4065         spin_lock_init(&qdev->hw_lock);
4066         spin_lock_init(&qdev->stats_lock);
4067
4068         /* make sure the EEPROM is good */
4069         err = qdev->nic_ops->get_flash(qdev);
4070         if (err) {
4071                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4072                 goto err_out;
4073         }
4074
4075         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4076
4077         /* Set up the default ring sizes. */
4078         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4079         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4080
4081         /* Set up the coalescing parameters. */
4082         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4083         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4084         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4085         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4086
4087         /*
4088          * Set up the operating parameters.
4089          */
4090         qdev->rx_csum = 1;
4091         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4092         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4093         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4094         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4095         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4096         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4097         init_completion(&qdev->ide_completion);
4098
4099         if (!cards_found) {
4100                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4101                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4102                          DRV_NAME, DRV_VERSION);
4103         }
4104         return 0;
4105 err_out:
4106         ql_release_all(pdev);
4107         pci_disable_device(pdev);
4108         return err;
4109 }
4110
4111
4112 static const struct net_device_ops qlge_netdev_ops = {
4113         .ndo_open               = qlge_open,
4114         .ndo_stop               = qlge_close,
4115         .ndo_start_xmit         = qlge_send,
4116         .ndo_change_mtu         = qlge_change_mtu,
4117         .ndo_get_stats          = qlge_get_stats,
4118         .ndo_set_multicast_list = qlge_set_multicast_list,
4119         .ndo_set_mac_address    = qlge_set_mac_address,
4120         .ndo_validate_addr      = eth_validate_addr,
4121         .ndo_tx_timeout         = qlge_tx_timeout,
4122         .ndo_vlan_rx_register   = ql_vlan_rx_register,
4123         .ndo_vlan_rx_add_vid    = ql_vlan_rx_add_vid,
4124         .ndo_vlan_rx_kill_vid   = ql_vlan_rx_kill_vid,
4125 };
4126
4127 static int __devinit qlge_probe(struct pci_dev *pdev,
4128                                 const struct pci_device_id *pci_entry)
4129 {
4130         struct net_device *ndev = NULL;
4131         struct ql_adapter *qdev = NULL;
4132         static int cards_found = 0;
4133         int err = 0;
4134
4135         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4136                         min(MAX_CPUS, (int)num_online_cpus()));
4137         if (!ndev)
4138                 return -ENOMEM;
4139
4140         err = ql_init_device(pdev, ndev, cards_found);
4141         if (err < 0) {
4142                 free_netdev(ndev);
4143                 return err;
4144         }
4145
4146         qdev = netdev_priv(ndev);
4147         SET_NETDEV_DEV(ndev, &pdev->dev);
4148         ndev->features = (0
4149                           | NETIF_F_IP_CSUM
4150                           | NETIF_F_SG
4151                           | NETIF_F_TSO
4152                           | NETIF_F_TSO6
4153                           | NETIF_F_TSO_ECN
4154                           | NETIF_F_HW_VLAN_TX
4155                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4156         ndev->features |= NETIF_F_GRO;
4157
4158         if (test_bit(QL_DMA64, &qdev->flags))
4159                 ndev->features |= NETIF_F_HIGHDMA;
4160
4161         /*
4162          * Set up net_device structure.
4163          */
4164         ndev->tx_queue_len = qdev->tx_ring_size;
4165         ndev->irq = pdev->irq;
4166
4167         ndev->netdev_ops = &qlge_netdev_ops;
4168         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4169         ndev->watchdog_timeo = 10 * HZ;
4170
4171         err = register_netdev(ndev);
4172         if (err) {
4173                 dev_err(&pdev->dev, "net device registration failed.\n");
4174                 ql_release_all(pdev);
4175                 pci_disable_device(pdev);
4176                 return err;
4177         }
4178         ql_link_off(qdev);
4179         ql_display_dev_info(ndev);
4180         cards_found++;
4181         return 0;
4182 }
4183
4184 static void __devexit qlge_remove(struct pci_dev *pdev)
4185 {
4186         struct net_device *ndev = pci_get_drvdata(pdev);
4187         unregister_netdev(ndev);
4188         ql_release_all(pdev);
4189         pci_disable_device(pdev);
4190         free_netdev(ndev);
4191 }
4192
4193 /*
4194  * This callback is called by the PCI subsystem whenever
4195  * a PCI bus error is detected.
4196  */
4197 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4198                                                enum pci_channel_state state)
4199 {
4200         struct net_device *ndev = pci_get_drvdata(pdev);
4201         struct ql_adapter *qdev = netdev_priv(ndev);
4202
4203         netif_device_detach(ndev);
4204
4205         if (state == pci_channel_io_perm_failure)
4206                 return PCI_ERS_RESULT_DISCONNECT;
4207
4208         if (netif_running(ndev))
4209                 ql_adapter_down(qdev);
4210
4211         pci_disable_device(pdev);
4212
4213         /* Request a slot reset. */
4214         return PCI_ERS_RESULT_NEED_RESET;
4215 }
4216
4217 /*
4218  * This callback is called after the PCI buss has been reset.
4219  * Basically, this tries to restart the card from scratch.
4220  * This is a shortened version of the device probe/discovery code,
4221  * it resembles the first-half of the () routine.
4222  */
4223 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4224 {
4225         struct net_device *ndev = pci_get_drvdata(pdev);
4226         struct ql_adapter *qdev = netdev_priv(ndev);
4227
4228         if (pci_enable_device(pdev)) {
4229                 QPRINTK(qdev, IFUP, ERR,
4230                         "Cannot re-enable PCI device after reset.\n");
4231                 return PCI_ERS_RESULT_DISCONNECT;
4232         }
4233
4234         pci_set_master(pdev);
4235
4236         netif_carrier_off(ndev);
4237         ql_adapter_reset(qdev);
4238
4239         /* Make sure the EEPROM is good */
4240         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4241
4242         if (!is_valid_ether_addr(ndev->perm_addr)) {
4243                 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4244                 return PCI_ERS_RESULT_DISCONNECT;
4245         }
4246
4247         return PCI_ERS_RESULT_RECOVERED;
4248 }
4249
4250 static void qlge_io_resume(struct pci_dev *pdev)
4251 {
4252         struct net_device *ndev = pci_get_drvdata(pdev);
4253         struct ql_adapter *qdev = netdev_priv(ndev);
4254
4255         pci_set_master(pdev);
4256
4257         if (netif_running(ndev)) {
4258                 if (ql_adapter_up(qdev)) {
4259                         QPRINTK(qdev, IFUP, ERR,
4260                                 "Device initialization failed after reset.\n");
4261                         return;
4262                 }
4263         }
4264
4265         netif_device_attach(ndev);
4266 }
4267
4268 static struct pci_error_handlers qlge_err_handler = {
4269         .error_detected = qlge_io_error_detected,
4270         .slot_reset = qlge_io_slot_reset,
4271         .resume = qlge_io_resume,
4272 };
4273
4274 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4275 {
4276         struct net_device *ndev = pci_get_drvdata(pdev);
4277         struct ql_adapter *qdev = netdev_priv(ndev);
4278         int err;
4279
4280         netif_device_detach(ndev);
4281
4282         if (netif_running(ndev)) {
4283                 err = ql_adapter_down(qdev);
4284                 if (!err)
4285                         return err;
4286         }
4287
4288         err = pci_save_state(pdev);
4289         if (err)
4290                 return err;
4291
4292         pci_disable_device(pdev);
4293
4294         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4295
4296         return 0;
4297 }
4298
4299 #ifdef CONFIG_PM
4300 static int qlge_resume(struct pci_dev *pdev)
4301 {
4302         struct net_device *ndev = pci_get_drvdata(pdev);
4303         struct ql_adapter *qdev = netdev_priv(ndev);
4304         int err;
4305
4306         pci_set_power_state(pdev, PCI_D0);
4307         pci_restore_state(pdev);
4308         err = pci_enable_device(pdev);
4309         if (err) {
4310                 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4311                 return err;
4312         }
4313         pci_set_master(pdev);
4314
4315         pci_enable_wake(pdev, PCI_D3hot, 0);
4316         pci_enable_wake(pdev, PCI_D3cold, 0);
4317
4318         if (netif_running(ndev)) {
4319                 err = ql_adapter_up(qdev);
4320                 if (err)
4321                         return err;
4322         }
4323
4324         netif_device_attach(ndev);
4325
4326         return 0;
4327 }
4328 #endif /* CONFIG_PM */
4329
4330 static void qlge_shutdown(struct pci_dev *pdev)
4331 {
4332         qlge_suspend(pdev, PMSG_SUSPEND);
4333 }
4334
4335 static struct pci_driver qlge_driver = {
4336         .name = DRV_NAME,
4337         .id_table = qlge_pci_tbl,
4338         .probe = qlge_probe,
4339         .remove = __devexit_p(qlge_remove),
4340 #ifdef CONFIG_PM
4341         .suspend = qlge_suspend,
4342         .resume = qlge_resume,
4343 #endif
4344         .shutdown = qlge_shutdown,
4345         .err_handler = &qlge_err_handler
4346 };
4347
4348 static int __init qlge_init_module(void)
4349 {
4350         return pci_register_driver(&qlge_driver);
4351 }
4352
4353 static void __exit qlge_exit(void)
4354 {
4355         pci_unregister_driver(&qlge_driver);
4356 }
4357
4358 module_init(qlge_init_module);
4359 module_exit(qlge_exit);