dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)
[linux-2.6.git] / drivers / net / qlge / qlge_main.c
index 6445d2c..0add30d 100644 (file)
@@ -58,8 +58,8 @@ static const u32 default_msg =
     NETIF_MSG_IFUP |
     NETIF_MSG_RX_ERR |
     NETIF_MSG_TX_ERR |
-    NETIF_MSG_TX_QUEUED |
-    NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
+/*  NETIF_MSG_TX_QUEUED | */
+/*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
 /* NETIF_MSG_PKTDATA | */
     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
 
@@ -75,8 +75,8 @@ module_param(irq_type, int, MSIX_IRQ);
 MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
 
 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
-       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
-       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
        /* required last entry */
        {0,}
 };
@@ -127,12 +127,12 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
 
 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 {
-       unsigned int seconds = 3;
+       unsigned int wait_count = 30;
        do {
                if (!ql_sem_trylock(qdev, sem_mask))
                        return 0;
-               ssleep(1);
-       } while (--seconds);
+               udelay(100);
+       } while (--wait_count);
        return -ETIMEDOUT;
 }
 
@@ -248,16 +248,13 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
        u32 offset = 0;
        int status;
 
-       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
-       if (status)
-               return status;
        switch (type) {
        case MAC_ADDR_TYPE_MULTI_MAC:
        case MAC_ADDR_TYPE_CAM_MAC:
                {
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
                        if (status)
                                goto exit;
                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -265,13 +262,13 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MR, 0);
                        if (status)
                                goto exit;
                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
                        if (status)
                                goto exit;
                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -279,14 +276,14 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
                                   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MR, 0);
                        if (status)
                                goto exit;
                        *value++ = ql_read32(qdev, MAC_ADDR_DATA);
                        if (type == MAC_ADDR_TYPE_CAM_MAC) {
                                status =
                                    ql_wait_reg_rdy(qdev,
-                                       MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+                                       MAC_ADDR_IDX, MAC_ADDR_MW, 0);
                                if (status)
                                        goto exit;
                                ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -294,7 +291,7 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
                                           MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
                                status =
                                    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
-                                                   MAC_ADDR_MR, MAC_ADDR_E);
+                                                   MAC_ADDR_MR, 0);
                                if (status)
                                        goto exit;
                                *value++ = ql_read32(qdev, MAC_ADDR_DATA);
@@ -309,7 +306,6 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
                status = -EPERM;
        }
 exit:
-       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
        return status;
 }
 
@@ -322,9 +318,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
        u32 offset = 0;
        int status = 0;
 
-       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
-       if (status)
-               return status;
        switch (type) {
        case MAC_ADDR_TYPE_MULTI_MAC:
        case MAC_ADDR_TYPE_CAM_MAC:
@@ -335,7 +328,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
                            (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
                            (addr[5]);
 
-                       QPRINTK(qdev, IFUP, INFO,
+                       QPRINTK(qdev, IFUP, DEBUG,
                                "Adding %s address %pM"
                                " at index %d in the CAM.\n",
                                ((type ==
@@ -344,7 +337,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
                        if (status)
                                goto exit;
                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -353,7 +346,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
                        ql_write32(qdev, MAC_ADDR_DATA, lower);
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
                        if (status)
                                goto exit;
                        ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -362,7 +355,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
                        ql_write32(qdev, MAC_ADDR_DATA, upper);
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
                        if (status)
                                goto exit;
                        ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
@@ -400,7 +393,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 
                        status =
                            ql_wait_reg_rdy(qdev,
-                               MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
+                               MAC_ADDR_IDX, MAC_ADDR_MW, 0);
                        if (status)
                                goto exit;
                        ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
@@ -416,7 +409,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
                status = -EPERM;
        }
 exit:
-       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
        return status;
 }
 
@@ -427,22 +419,17 @@ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
 {
        int status = 0;
 
-       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
-       if (status)
-               goto exit;
-
-       status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
+       status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
        if (status)
                goto exit;
 
        ql_write32(qdev, RT_IDX,
                   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
-       status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
+       status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
        if (status)
                goto exit;
        *value = ql_read32(qdev, RT_DATA);
 exit:
-       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
        return status;
 }
 
@@ -454,13 +441,9 @@ exit:
 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
                              int enable)
 {
-       int status;
+       int status = -EINVAL; /* Return error if no mask match. */
        u32 value = 0;
 
-       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
-       if (status)
-               return status;
-
        QPRINTK(qdev, IFUP, DEBUG,
                "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
                (enable ? "Adding" : "Removing"),
@@ -556,7 +539,6 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
                ql_write32(qdev, RT_DATA, enable ? mask : 0);
        }
 exit:
-       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
        return status;
 }
 
@@ -605,7 +587,6 @@ u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 {
        u32 var = 0;
-       unsigned long hw_flags;
        struct intr_context *ctx;
 
        /* HW disables for us if we're MSIX multi interrupts and
@@ -615,14 +596,14 @@ static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
                return 0;
 
        ctx = qdev->intr_context + intr;
-       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       spin_lock(&qdev->hw_lock);
        if (!atomic_read(&ctx->irq_cnt)) {
                ql_write32(qdev, INTR_EN,
                ctx->intr_dis_mask);
                var = ql_read32(qdev, STS);
        }
        atomic_inc(&ctx->irq_cnt);
-       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       spin_unlock(&qdev->hw_lock);
        return var;
 }
 
@@ -642,7 +623,29 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 
 }
 
-int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
+static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
+{
+       int status, i;
+       u16 csum = 0;
+       __le16 *flash = (__le16 *)&qdev->flash;
+
+       status = strncmp((char *)&qdev->flash, str, 4);
+       if (status) {
+               QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
+               return  status;
+       }
+
+       for (i = 0; i < size; i++)
+               csum += le16_to_cpu(*flash++);
+
+       if (csum)
+               QPRINTK(qdev, IFUP, ERR,
+                       "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+
+       return csum;
+}
+
+static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
 {
        int status = 0;
        /* wait for reg to come ready */
@@ -657,29 +660,110 @@ int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
                        FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
        if (status)
                goto exit;
-       /* get the data */
-       *data = ql_read32(qdev, FLASH_DATA);
+        /* This data is stored on flash as an array of
+        * __le32.  Since ql_read32() returns cpu endian
+        * we need to swap it back.
+        */
+       *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
+exit:
+       return status;
+}
+
+static int ql_get_8000_flash_params(struct ql_adapter *qdev)
+{
+       u32 i, size;
+       int status;
+       __le32 *p = (__le32 *)&qdev->flash;
+       u32 offset;
+
+       /* Get flash offset for function and adjust
+        * for dword access.
+        */
+       if (!qdev->func)
+               offset = FUNC0_FLASH_OFFSET / sizeof(u32);
+       else
+               offset = FUNC1_FLASH_OFFSET / sizeof(u32);
+
+       if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+               return -ETIMEDOUT;
+
+       size = sizeof(struct flash_params_8000) / sizeof(u32);
+       for (i = 0; i < size; i++, p++) {
+               status = ql_read_flash_word(qdev, i+offset, p);
+               if (status) {
+                       QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+                       goto exit;
+               }
+       }
+
+       status = ql_validate_flash(qdev,
+                       sizeof(struct flash_params_8000) / sizeof(u16),
+                       "8000");
+       if (status) {
+               QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+               status = -EINVAL;
+               goto exit;
+       }
+
+       if (!is_valid_ether_addr(qdev->flash.flash_params_8000.mac_addr)) {
+               QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
+               status = -EINVAL;
+               goto exit;
+       }
+
+       memcpy(qdev->ndev->dev_addr,
+               qdev->flash.flash_params_8000.mac_addr,
+               qdev->ndev->addr_len);
+
 exit:
+       ql_sem_unlock(qdev, SEM_FLASH_MASK);
        return status;
 }
 
-static int ql_get_flash_params(struct ql_adapter *qdev)
+static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 {
        int i;
        int status;
-       u32 *p = (u32 *)&qdev->flash;
+       __le32 *p = (__le32 *)&qdev->flash;
+       u32 offset = 0;
+       u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
+
+       /* Second function's parameters follow the first
+        * function's.
+        */
+       if (qdev->func)
+               offset = size;
 
        if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
                return -ETIMEDOUT;
 
-       for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
-               status = ql_read_flash_word(qdev, i, p);
+       for (i = 0; i < size; i++, p++) {
+               status = ql_read_flash_word(qdev, i+offset, p);
                if (status) {
                        QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
                        goto exit;
                }
 
        }
+
+       status = ql_validate_flash(qdev,
+                       sizeof(struct flash_params_8012) / sizeof(u16),
+                       "8012");
+       if (status) {
+               QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+               status = -EINVAL;
+               goto exit;
+       }
+
+       if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
+               status = -EINVAL;
+               goto exit;
+       }
+
+       memcpy(qdev->ndev->dev_addr,
+               qdev->flash.flash_params_8012.mac_addr,
+               qdev->ndev->addr_len);
+
 exit:
        ql_sem_unlock(qdev, SEM_FLASH_MASK);
        return status;
@@ -750,13 +834,25 @@ exit:
        return status;
 }
 
+static int ql_8000_port_initialize(struct ql_adapter *qdev)
+{
+       int status;
+       status = ql_mb_get_fw_state(qdev);
+       if (status)
+               goto exit;
+       /* Wake up a worker to get/set the TX/RX frame sizes. */
+       queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
+exit:
+       return status;
+}
+
 /* Take the MAC Core out of reset.
  * Enable statistics counting.
  * Take the transmitter/receiver out of reset.
  * This functionality may be done in the MPI firmware at a
  * later date.
  */
-static int ql_port_initialize(struct ql_adapter *qdev)
+static int ql_8012_port_initialize(struct ql_adapter *qdev)
 {
        int status = 0;
        u32 data;
@@ -832,7 +928,7 @@ end:
 }
 
 /* Get the next large buffer. */
-struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
 {
        struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
        rx_ring->lbq_curr_idx++;
@@ -843,7 +939,7 @@ struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
 }
 
 /* Get the next small buffer. */
-struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
+static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
 {
        struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
        rx_ring->sbq_curr_idx++;
@@ -872,9 +968,9 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
 /* Process (refill) a large buffer queue. */
 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
-       int clean_idx = rx_ring->lbq_clean_idx;
+       u32 clean_idx = rx_ring->lbq_clean_idx;
+       u32 start_idx = clean_idx;
        struct bq_desc *lbq_desc;
-       struct bq_element *bq;
        u64 map;
        int i;
 
@@ -884,13 +980,13 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                                "lbq: try cleaning clean_idx = %d.\n",
                                clean_idx);
                        lbq_desc = &rx_ring->lbq[clean_idx];
-                       bq = lbq_desc->bq;
                        if (lbq_desc->p.lbq_page == NULL) {
                                QPRINTK(qdev, RX_STATUS, DEBUG,
                                        "lbq: getting new page for index %d.\n",
                                        lbq_desc->index);
                                lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
                                if (lbq_desc->p.lbq_page == NULL) {
+                                       rx_ring->lbq_clean_idx = clean_idx;
                                        QPRINTK(qdev, RX_STATUS, ERR,
                                                "Couldn't get a page.\n");
                                        return;
@@ -900,16 +996,16 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                                                   0, PAGE_SIZE,
                                                   PCI_DMA_FROMDEVICE);
                                if (pci_dma_mapping_error(qdev->pdev, map)) {
+                                       rx_ring->lbq_clean_idx = clean_idx;
+                                       put_page(lbq_desc->p.lbq_page);
+                                       lbq_desc->p.lbq_page = NULL;
                                        QPRINTK(qdev, RX_STATUS, ERR,
                                                "PCI mapping failed.\n");
                                        return;
                                }
                                pci_unmap_addr_set(lbq_desc, mapaddr, map);
                                pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
-                               bq->addr_lo =   /*lbq_desc->addr_lo = */
-                                   cpu_to_le32(map);
-                               bq->addr_hi =   /*lbq_desc->addr_hi = */
-                                   cpu_to_le32(map >> 32);
+                               *lbq_desc->addr = cpu_to_le64(map);
                        }
                        clean_idx++;
                        if (clean_idx == rx_ring->lbq_len)
@@ -920,21 +1016,24 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                rx_ring->lbq_prod_idx += 16;
                if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
                        rx_ring->lbq_prod_idx = 0;
+               rx_ring->lbq_free_cnt -= 16;
+       }
+
+       if (start_idx != clean_idx) {
                QPRINTK(qdev, RX_STATUS, DEBUG,
                        "lbq: updating prod idx = %d.\n",
                        rx_ring->lbq_prod_idx);
                ql_write_db_reg(rx_ring->lbq_prod_idx,
                                rx_ring->lbq_prod_idx_db_reg);
-               rx_ring->lbq_free_cnt -= 16;
        }
 }
 
 /* Process (refill) a small buffer queue. */
 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
-       int clean_idx = rx_ring->sbq_clean_idx;
+       u32 clean_idx = rx_ring->sbq_clean_idx;
+       u32 start_idx = clean_idx;
        struct bq_desc *sbq_desc;
-       struct bq_element *bq;
        u64 map;
        int i;
 
@@ -944,7 +1043,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                        QPRINTK(qdev, RX_STATUS, DEBUG,
                                "sbq: try cleaning clean_idx = %d.\n",
                                clean_idx);
-                       bq = sbq_desc->bq;
                        if (sbq_desc->p.skb == NULL) {
                                QPRINTK(qdev, RX_STATUS, DEBUG,
                                        "sbq: getting new skb for index %d.\n",
@@ -963,11 +1061,17 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                                                     sbq_desc->p.skb->data,
                                                     rx_ring->sbq_buf_size /
                                                     2, PCI_DMA_FROMDEVICE);
+                               if (pci_dma_mapping_error(qdev->pdev, map)) {
+                                       QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
+                                       rx_ring->sbq_clean_idx = clean_idx;
+                                       dev_kfree_skb_any(sbq_desc->p.skb);
+                                       sbq_desc->p.skb = NULL;
+                                       return;
+                               }
                                pci_unmap_addr_set(sbq_desc, mapaddr, map);
                                pci_unmap_len_set(sbq_desc, maplen,
                                                  rx_ring->sbq_buf_size / 2);
-                               bq->addr_lo = cpu_to_le32(map);
-                               bq->addr_hi = cpu_to_le32(map >> 32);
+                               *sbq_desc->addr = cpu_to_le64(map);
                        }
 
                        clean_idx++;
@@ -978,13 +1082,15 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                rx_ring->sbq_prod_idx += 16;
                if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
                        rx_ring->sbq_prod_idx = 0;
+               rx_ring->sbq_free_cnt -= 16;
+       }
+
+       if (start_idx != clean_idx) {
                QPRINTK(qdev, RX_STATUS, DEBUG,
                        "sbq: updating prod idx = %d.\n",
                        rx_ring->sbq_prod_idx);
                ql_write_db_reg(rx_ring->sbq_prod_idx,
                                rx_ring->sbq_prod_idx_db_reg);
-
-               rx_ring->sbq_free_cnt -= 16;
        }
 }
 
@@ -1166,7 +1272,7 @@ map_error:
        return NETDEV_TX_BUSY;
 }
 
-void ql_realign_skb(struct sk_buff *skb, int len)
+static void ql_realign_skb(struct sk_buff *skb, int len)
 {
        void *temp_addr = skb->data;
 
@@ -1303,6 +1409,11 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                                        "No skb available, drop the packet.\n");
                                return NULL;
                        }
+                       pci_unmap_page(qdev->pdev,
+                                      pci_unmap_addr(lbq_desc,
+                                                     mapaddr),
+                                      pci_unmap_len(lbq_desc, maplen),
+                                      PCI_DMA_FROMDEVICE);
                        skb_reserve(skb, NET_IP_ALIGN);
                        QPRINTK(qdev, RX_STATUS, DEBUG,
                                "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
@@ -1330,7 +1441,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                 *          eventually be in trouble.
                 */
                int size, offset, i = 0;
-               struct bq_element *bq, bq_array[8];
+               __le64 *bq, bq_array[8];
                sbq_desc = ql_get_curr_sbuf(rx_ring);
                pci_unmap_single(qdev->pdev,
                                 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1356,16 +1467,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                } else {
                        QPRINTK(qdev, RX_STATUS, DEBUG,
                                "Headers in small, %d bytes of data in chain of large.\n", length);
-                       bq = (struct bq_element *)sbq_desc->p.skb->data;
+                       bq = (__le64 *)sbq_desc->p.skb->data;
                }
                while (length > 0) {
                        lbq_desc = ql_get_curr_lbuf(rx_ring);
-                       if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
-                               QPRINTK(qdev, RX_STATUS, ERR,
-                                       "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
-                                       lbq_desc->bq->addr_lo, bq->addr_lo);
-                               return NULL;
-                       }
                        pci_unmap_page(qdev->pdev,
                                       pci_unmap_addr(lbq_desc,
                                                      mapaddr),
@@ -1401,6 +1506,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 {
        struct net_device *ndev = qdev->ndev;
        struct sk_buff *skb = NULL;
+       u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
+                       IB_MAC_IOCB_RSP_VLAN_MASK)
 
        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
 
@@ -1425,33 +1532,54 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
                QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
        }
-       if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
-               QPRINTK(qdev, RX_STATUS, ERR,
-                       "Bad checksum for this %s packet.\n",
-                       ((ib_mac_rsp->
-                         flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
-               skb->ip_summed = CHECKSUM_NONE;
-       } else if (qdev->rx_csum &&
-                  ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
-                   ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
-                    !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
-               QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       skb->protocol = eth_type_trans(skb, ndev);
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* If rx checksum is on, and there are no
+        * csum or frame errors.
+        */
+       if (qdev->rx_csum &&
+               !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) &&
+               !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+               /* TCP frame. */
+               if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+                       QPRINTK(qdev, RX_STATUS, DEBUG,
+                                       "TCP checksum done!\n");
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+                               (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+               /* Unfragmented ipv4 UDP frame. */
+                       struct iphdr *iph = (struct iphdr *) skb->data;
+                       if (!(iph->frag_off &
+                               cpu_to_be16(IP_MF|IP_OFFSET))) {
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               QPRINTK(qdev, RX_STATUS, DEBUG,
+                                               "TCP checksum done!\n");
+                       }
+               }
        }
+
        qdev->stats.rx_packets++;
        qdev->stats.rx_bytes += skb->len;
-       skb->protocol = eth_type_trans(skb, ndev);
-       if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
-               QPRINTK(qdev, RX_STATUS, DEBUG,
-                       "Passing a VLAN packet upstream.\n");
-               vlan_hwaccel_rx(skb, qdev->vlgrp,
-                               le16_to_cpu(ib_mac_rsp->vlan_id));
+       skb_record_rx_queue(skb,
+               rx_ring->cq_id - qdev->rss_ring_first_cq_id);
+       if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               if (qdev->vlgrp &&
+                       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (vlan_id != 0))
+                       vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+                               vlan_id, skb);
+               else
+                       napi_gro_receive(&rx_ring->napi, skb);
        } else {
-               QPRINTK(qdev, RX_STATUS, DEBUG,
-                       "Passing a normal packet upstream.\n");
-               netif_rx(skb);
+               if (qdev->vlgrp &&
+                       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (vlan_id != 0))
+                       vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+               else
+                       netif_receive_skb(skb);
        }
-       ndev->last_rx = jiffies;
 }
 
 /* Process an outbound completion from an rx ring. */
@@ -1497,16 +1625,19 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
 /* Fire up a handler to reset the MPI processor. */
 void ql_queue_fw_error(struct ql_adapter *qdev)
 {
-       netif_stop_queue(qdev->ndev);
        netif_carrier_off(qdev->ndev);
        queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
 }
 
 void ql_queue_asic_error(struct ql_adapter *qdev)
 {
-       netif_stop_queue(qdev->ndev);
        netif_carrier_off(qdev->ndev);
        ql_disable_interrupts(qdev);
+       /* Clear adapter up bit to signal the recovery
+        * process that it shouldn't kill the reset worker
+        * thread
+        */
+       clear_bit(QL_ADAPTER_UP, &qdev->flags);
        queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
 }
 
@@ -1554,6 +1685,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
        struct ob_mac_iocb_rsp *net_rsp = NULL;
        int count = 0;
 
+       struct tx_ring *tx_ring;
        /* While there are entries in the completion queue. */
        while (prod != rx_ring->cnsmr_idx) {
 
@@ -1579,15 +1711,16 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
        }
        ql_write_cq_idx(rx_ring);
-       if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
-               struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+       tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+       if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
+                                       net_rsp != NULL) {
                if (atomic_read(&tx_ring->queue_stopped) &&
                    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
                        /*
                         * The queue got stopped because the tx_ring was full.
                         * Wake it up, because it's now at least 25% empty.
                         */
-                       netif_wake_queue(qdev->ndev);
+                       netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
        }
 
        return count;
@@ -1648,7 +1781,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
                rx_ring->cq_id);
 
        if (work_done < budget) {
-               __netif_rx_complete(qdev->ndev, napi);
+               napi_complete(napi);
                ql_enable_completion_interrupt(qdev, rx_ring->irq);
        }
        return work_done;
@@ -1674,19 +1807,29 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        u32 enable_bit = MAC_ADDR_E;
+       int status;
 
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return;
        spin_lock(&qdev->hw_lock);
        if (ql_set_mac_addr_reg
            (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
                QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
        }
        spin_unlock(&qdev->hw_lock);
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 }
 
 static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        u32 enable_bit = 0;
+       int status;
+
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return;
 
        spin_lock(&qdev->hw_lock);
        if (ql_set_mac_addr_reg
@@ -1694,6 +1837,7 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
                QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
        }
        spin_unlock(&qdev->hw_lock);
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 
 }
 
@@ -1733,8 +1877,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
 {
        struct rx_ring *rx_ring = dev_id;
-       struct ql_adapter *qdev = rx_ring->qdev;
-       netif_rx_schedule(qdev->ndev, &rx_ring->napi);
+       napi_schedule(&rx_ring->napi);
        return IRQ_HANDLED;
 }
 
@@ -1820,8 +1963,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
                                                              &rx_ring->rx_work,
                                                              0);
                                else
-                                       netif_rx_schedule(qdev->ndev,
-                                                         &rx_ring->napi);
+                                       napi_schedule(&rx_ring->napi);
                                work_done++;
                        }
                }
@@ -1877,7 +2019,7 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
 {
        int len;
        struct iphdr *iph = ip_hdr(skb);
-       u16 *check;
+       __sum16 *check;
        mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
        mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
        mac_iocb_ptr->net_trans_offset =
@@ -1910,25 +2052,24 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
        struct ql_adapter *qdev = netdev_priv(ndev);
        int tso;
        struct tx_ring *tx_ring;
-       u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
+       u32 tx_ring_idx = (u32) skb->queue_mapping;
 
        tx_ring = &qdev->tx_ring[tx_ring_idx];
 
+       if (skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
                QPRINTK(qdev, TX_QUEUED, INFO,
                        "%s: shutting down tx queue %d du to lack of resources.\n",
                        __func__, tx_ring_idx);
-               netif_stop_queue(ndev);
+               netif_stop_subqueue(ndev, tx_ring->wq_id);
                atomic_inc(&tx_ring->queue_stopped);
                return NETDEV_TX_BUSY;
        }
        tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
        mac_iocb_ptr = tx_ring_desc->queue_entry;
        memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
-       if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
-               QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
-               return NETDEV_TX_BUSY;
-       }
 
        mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
        mac_iocb_ptr->tid = tx_ring_desc->index;
@@ -1954,6 +2095,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
                ql_hw_csum_setup(skb,
                                 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
        }
+       if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
+                       NETDEV_TX_OK) {
+               QPRINTK(qdev, TX_QUEUED, ERR,
+                               "Could not map the segments.\n");
+               return NETDEV_TX_BUSY;
+       }
        QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
        tx_ring->prod_idx++;
        if (tx_ring->prod_idx == tx_ring->wq_len)
@@ -1997,6 +2144,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
                        "Allocation of RX shadow space failed.\n");
                return -ENOMEM;
        }
+       memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
        qdev->tx_ring_shadow_reg_area =
            pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
                                 &qdev->tx_ring_shadow_reg_dma);
@@ -2005,6 +2153,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
                        "Allocation of TX shadow space failed.\n");
                goto err_wqp_sh_area;
        }
+       memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
        return 0;
 
 err_wqp_sh_area:
@@ -2070,7 +2219,7 @@ err:
        return -ENOMEM;
 }
 
-void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
        int i;
        struct bq_desc *lbq_desc;
@@ -2086,54 +2235,10 @@ void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                        put_page(lbq_desc->p.lbq_page);
                        lbq_desc->p.lbq_page = NULL;
                }
-               lbq_desc->bq->addr_lo = 0;
-               lbq_desc->bq->addr_hi = 0;
        }
 }
 
-/*
- * Allocate and map a page for each element of the lbq.
- */
-static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
-                               struct rx_ring *rx_ring)
-{
-       int i;
-       struct bq_desc *lbq_desc;
-       u64 map;
-       struct bq_element *bq = rx_ring->lbq_base;
-
-       for (i = 0; i < rx_ring->lbq_len; i++) {
-               lbq_desc = &rx_ring->lbq[i];
-               memset(lbq_desc, 0, sizeof(lbq_desc));
-               lbq_desc->bq = bq;
-               lbq_desc->index = i;
-               lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
-               if (unlikely(!lbq_desc->p.lbq_page)) {
-                       QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
-                       goto mem_error;
-               } else {
-                       map = pci_map_page(qdev->pdev,
-                                          lbq_desc->p.lbq_page,
-                                          0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(qdev->pdev, map)) {
-                               QPRINTK(qdev, IFUP, ERR,
-                                       "PCI mapping failed.\n");
-                               goto mem_error;
-                       }
-                       pci_unmap_addr_set(lbq_desc, mapaddr, map);
-                       pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
-                       bq->addr_lo = cpu_to_le32(map);
-                       bq->addr_hi = cpu_to_le32(map >> 32);
-               }
-               bq++;
-       }
-       return 0;
-mem_error:
-       ql_free_lbq_buffers(qdev, rx_ring);
-       return -ENOMEM;
-}
-
-void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
        int i;
        struct bq_desc *sbq_desc;
@@ -2152,76 +2257,75 @@ void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                        dev_kfree_skb(sbq_desc->p.skb);
                        sbq_desc->p.skb = NULL;
                }
-               if (sbq_desc->bq == NULL) {
-                       QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
-                               i);
-                       return;
-               }
-               sbq_desc->bq->addr_lo = 0;
-               sbq_desc->bq->addr_hi = 0;
        }
 }
 
-/* Allocate and map an skb for each element of the sbq. */
-static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
+/* Free all large and small rx buffers associated
+ * with the completion queues for this device.
+ */
+static void ql_free_rx_buffers(struct ql_adapter *qdev)
+{
+       int i;
+       struct rx_ring *rx_ring;
+
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               rx_ring = &qdev->rx_ring[i];
+               if (rx_ring->lbq)
+                       ql_free_lbq_buffers(qdev, rx_ring);
+               if (rx_ring->sbq)
+                       ql_free_sbq_buffers(qdev, rx_ring);
+       }
+}
+
+static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
+{
+       struct rx_ring *rx_ring;
+       int i;
+
+       for (i = 0; i < qdev->rx_ring_count; i++) {
+               rx_ring = &qdev->rx_ring[i];
+               if (rx_ring->type != TX_Q)
+                       ql_update_buffer_queues(qdev, rx_ring);
+       }
+}
+
+static void ql_init_lbq_ring(struct ql_adapter *qdev,
+                               struct rx_ring *rx_ring)
+{
+       int i;
+       struct bq_desc *lbq_desc;
+       __le64 *bq = rx_ring->lbq_base;
+
+       memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
+       for (i = 0; i < rx_ring->lbq_len; i++) {
+               lbq_desc = &rx_ring->lbq[i];
+               memset(lbq_desc, 0, sizeof(*lbq_desc));
+               lbq_desc->index = i;
+               lbq_desc->addr = bq;
+               bq++;
+       }
+}
+
+static void ql_init_sbq_ring(struct ql_adapter *qdev,
                                struct rx_ring *rx_ring)
 {
        int i;
        struct bq_desc *sbq_desc;
-       struct sk_buff *skb;
-       u64 map;
-       struct bq_element *bq = rx_ring->sbq_base;
+       __le64 *bq = rx_ring->sbq_base;
 
+       memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
        for (i = 0; i < rx_ring->sbq_len; i++) {
                sbq_desc = &rx_ring->sbq[i];
-               memset(sbq_desc, 0, sizeof(sbq_desc));
+               memset(sbq_desc, 0, sizeof(*sbq_desc));
                sbq_desc->index = i;
-               sbq_desc->bq = bq;
-               skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
-               if (unlikely(!skb)) {
-                       /* Better luck next round */
-                       QPRINTK(qdev, IFUP, ERR,
-                               "small buff alloc failed for %d bytes at index %d.\n",
-                               rx_ring->sbq_buf_size, i);
-                       goto mem_err;
-               }
-               skb_reserve(skb, QLGE_SB_PAD);
-               sbq_desc->p.skb = skb;
-               /*
-                * Map only half the buffer. Because the
-                * other half may get some data copied to it
-                * when the completion arrives.
-                */
-               map = pci_map_single(qdev->pdev,
-                                    skb->data,
-                                    rx_ring->sbq_buf_size / 2,
-                                    PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(qdev->pdev, map)) {
-                       QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
-                       goto mem_err;
-               }
-               pci_unmap_addr_set(sbq_desc, mapaddr, map);
-               pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
-               bq->addr_lo =   /*sbq_desc->addr_lo = */
-                   cpu_to_le32(map);
-               bq->addr_hi =   /*sbq_desc->addr_hi = */
-                   cpu_to_le32(map >> 32);
+               sbq_desc->addr = bq;
                bq++;
        }
-       return 0;
-mem_err:
-       ql_free_sbq_buffers(qdev, rx_ring);
-       return -ENOMEM;
 }
 
 static void ql_free_rx_resources(struct ql_adapter *qdev,
                                 struct rx_ring *rx_ring)
 {
-       if (rx_ring->sbq_len)
-               ql_free_sbq_buffers(qdev, rx_ring);
-       if (rx_ring->lbq_len)
-               ql_free_lbq_buffers(qdev, rx_ring);
-
        /* Free the small buffer queue. */
        if (rx_ring->sbq_base) {
                pci_free_consistent(qdev->pdev,
@@ -2299,11 +2403,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
                        goto err_mem;
                }
 
-               if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
-                       QPRINTK(qdev, IFUP, ERR,
-                               "Small buffer allocation failed.\n");
-                       goto err_mem;
-               }
+               ql_init_sbq_ring(qdev, rx_ring);
        }
 
        if (rx_ring->lbq_len) {
@@ -2331,14 +2431,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
                        goto err_mem;
                }
 
-               /*
-                * Allocate the buffers.
-                */
-               if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
-                       QPRINTK(qdev, IFUP, ERR,
-                               "Large buffer allocation failed.\n");
-                       goto err_mem;
-               }
+               ql_init_lbq_ring(qdev, rx_ring);
        }
 
        return 0;
@@ -2376,28 +2469,6 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
        }
 }
 
-static void ql_free_ring_cb(struct ql_adapter *qdev)
-{
-       kfree(qdev->ring_mem);
-}
-
-static int ql_alloc_ring_cb(struct ql_adapter *qdev)
-{
-       /* Allocate space for tx/rx ring control blocks. */
-       qdev->ring_mem_size =
-           (qdev->tx_ring_count * sizeof(struct tx_ring)) +
-           (qdev->rx_ring_count * sizeof(struct rx_ring));
-       qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
-       if (qdev->ring_mem == NULL) {
-               return -ENOMEM;
-       } else {
-               qdev->rx_ring = qdev->ring_mem;
-               qdev->tx_ring = qdev->ring_mem +
-                   (qdev->rx_ring_count * sizeof(struct rx_ring));
-       }
-       return 0;
-}
-
 static void ql_free_mem_resources(struct ql_adapter *qdev)
 {
        int i;
@@ -2454,6 +2525,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
            qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
        int err = 0;
        u16 bq_len;
+       u64 tmp;
 
        /* Set up the shadow registers for this ring. */
        rx_ring->prod_idx_sh_reg = shadow_reg;
@@ -2468,7 +2540,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
        rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
 
        /* PCI doorbell mem area + 0x00 for consumer index register */
-       rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
+       rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
        rx_ring->cnsmr_idx = 0;
        rx_ring->curr_entry = rx_ring->cq_base;
 
@@ -2476,22 +2548,20 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
        rx_ring->valid_db_reg = doorbell_area + 0x04;
 
        /* PCI doorbell mem area + 0x18 for large buffer consumer */
-       rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
+       rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
 
        /* PCI doorbell mem area + 0x1c */
-       rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
+       rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
 
        memset((void *)cqicb, 0, sizeof(struct cqicb));
        cqicb->msix_vect = rx_ring->irq;
 
-       cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
+       bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
+       cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
 
-       cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
-       cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
+       cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
 
-       cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
-       cqicb->prod_idx_addr_hi =
-           cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
+       cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
 
        /*
         * Set up the control block load flags.
@@ -2501,34 +2571,36 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
            FLAGS_LI;           /* Load irq delay values */
        if (rx_ring->lbq_len) {
                cqicb->flags |= FLAGS_LL;       /* Load lbq values */
-               *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
-               cqicb->lbq_addr_lo =
-                   cpu_to_le32(rx_ring->lbq_base_indirect_dma);
-               cqicb->lbq_addr_hi =
-                   cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
-               cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
-               bq_len = (u16) rx_ring->lbq_len;
+               tmp = (u64)rx_ring->lbq_base_dma;;
+               *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp);
+               cqicb->lbq_addr =
+                   cpu_to_le64(rx_ring->lbq_base_indirect_dma);
+               bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
+                       (u16) rx_ring->lbq_buf_size;
+               cqicb->lbq_buf_size = cpu_to_le16(bq_len);
+               bq_len = (rx_ring->lbq_len == 65536) ? 0 :
+                       (u16) rx_ring->lbq_len;
                cqicb->lbq_len = cpu_to_le16(bq_len);
-               rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
+               rx_ring->lbq_prod_idx = 0;
                rx_ring->lbq_curr_idx = 0;
-               rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
-               rx_ring->lbq_free_cnt = 16;
+               rx_ring->lbq_clean_idx = 0;
+               rx_ring->lbq_free_cnt = rx_ring->lbq_len;
        }
        if (rx_ring->sbq_len) {
                cqicb->flags |= FLAGS_LS;       /* Load sbq values */
-               *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
-               cqicb->sbq_addr_lo =
-                   cpu_to_le32(rx_ring->sbq_base_indirect_dma);
-               cqicb->sbq_addr_hi =
-                   cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
+               tmp = (u64)rx_ring->sbq_base_dma;;
+               *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp);
+               cqicb->sbq_addr =
+                   cpu_to_le64(rx_ring->sbq_base_indirect_dma);
                cqicb->sbq_buf_size =
-                   cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
-               bq_len = (u16) rx_ring->sbq_len;
+                   cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
+               bq_len = (rx_ring->sbq_len == 65536) ? 0 :
+                       (u16) rx_ring->sbq_len;
                cqicb->sbq_len = cpu_to_le16(bq_len);
-               rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
+               rx_ring->sbq_prod_idx = 0;
                rx_ring->sbq_curr_idx = 0;
-               rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
-               rx_ring->sbq_free_cnt = 16;
+               rx_ring->sbq_clean_idx = 0;
+               rx_ring->sbq_free_cnt = rx_ring->sbq_len;
        }
        switch (rx_ring->type) {
        case TX_Q:
@@ -2574,24 +2646,13 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
                QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
                        rx_ring->type);
        }
-       QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
+       QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
        err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
                           CFG_LCQ, rx_ring->cq_id);
        if (err) {
                QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
                return err;
        }
-       QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
-       /*
-        * Advance the producer index for the buffer queues.
-        */
-       wmb();
-       if (rx_ring->lbq_len)
-               ql_write_db_reg(rx_ring->lbq_prod_idx,
-                               rx_ring->lbq_prod_idx_db_reg);
-       if (rx_ring->sbq_len)
-               ql_write_db_reg(rx_ring->sbq_prod_idx,
-                               rx_ring->sbq_prod_idx_db_reg);
        return err;
 }
 
@@ -2610,7 +2671,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
         * Assign doorbell registers for this tx_ring.
         */
        /* TX PCI doorbell mem area for tx producer index */
-       tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
+       tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
        tx_ring->prod_idx = 0;
        /* TX PCI doorbell mem area + 0x04 */
        tx_ring->valid_db_reg = doorbell_area + 0x04;
@@ -2626,12 +2687,9 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
                                   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
        wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
        wqicb->rid = 0;
-       wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
-       wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
+       wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
 
-       wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
-       wqicb->cnsmr_idx_addr_hi =
-           cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
+       wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
 
        ql_init_tx_ring(qdev, tx_ring);
 
@@ -2641,7 +2699,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
                QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
                return err;
        }
-       QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
+       QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
        return err;
 }
 
@@ -2683,7 +2741,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
                    (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
                        set_bit(QL_MSIX_ENABLED, &qdev->flags);
                        qdev->intr_count = qdev->rx_ring_count;
-                       QPRINTK(qdev, IFUP, INFO,
+                       QPRINTK(qdev, IFUP, DEBUG,
                                "MSI-X Enabled, got %d vectors.\n",
                                qdev->intr_count);
                        return;
@@ -2761,14 +2819,14 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
                                 * Outbound queue is for outbound completions only.
                                 */
                                intr_context->handler = qlge_msix_tx_isr;
-                               sprintf(intr_context->name, "%s-txq-%d",
+                               sprintf(intr_context->name, "%s-tx-%d",
                                        qdev->ndev->name, i);
                        } else {
                                /*
                                 * Inbound queues handle unicast frames only.
                                 */
                                intr_context->handler = qlge_msix_rx_isr;
-                               sprintf(intr_context->name, "%s-rxq-%d",
+                               sprintf(intr_context->name, "%s-rx-%d",
                                        qdev->ndev->name, i);
                        }
                }
@@ -2810,11 +2868,11 @@ static void ql_free_irq(struct ql_adapter *qdev)
                        if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
                                free_irq(qdev->msi_x_entry[i].vector,
                                         &qdev->rx_ring[i]);
-                               QPRINTK(qdev, IFDOWN, ERR,
+                               QPRINTK(qdev, IFDOWN, DEBUG,
                                        "freeing msix interrupt %d.\n", i);
                        } else {
                                free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
-                               QPRINTK(qdev, IFDOWN, ERR,
+                               QPRINTK(qdev, IFDOWN, DEBUG,
                                        "freeing msi interrupt %d.\n", i);
                        }
                }
@@ -2845,7 +2903,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
                                        i);
                                goto err_irq;
                        } else {
-                               QPRINTK(qdev, IFUP, INFO,
+                               QPRINTK(qdev, IFUP, DEBUG,
                                        "Hooked intr %d, queue type %s%s%s, with name %s.\n",
                                        i,
                                        qdev->rx_ring[i].type ==
@@ -2911,8 +2969,8 @@ static int ql_start_rss(struct ql_adapter *qdev)
        /*
         * Fill out the Indirection Table.
         */
-       for (i = 0; i < 32; i++)
-               hash_id[i] = i & 1;
+       for (i = 0; i < 256; i++)
+               hash_id[i] = i & (qdev->rss_ring_count - 1);
 
        /*
         * Random values for the IPv6 and IPv4 Hash Keys.
@@ -2920,14 +2978,14 @@ static int ql_start_rss(struct ql_adapter *qdev)
        get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
        get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
 
-       QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
+       QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
 
        status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
        if (status) {
                QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
                return status;
        }
-       QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
+       QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
        return status;
 }
 
@@ -2937,13 +2995,17 @@ static int ql_route_initialize(struct ql_adapter *qdev)
        int status = 0;
        int i;
 
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (status)
+               return status;
+
        /* Clear all the entries in the routing table. */
        for (i = 0; i < 16; i++) {
                status = ql_set_routing_reg(qdev, i, 0, 0);
                if (status) {
                        QPRINTK(qdev, IFUP, ERR,
                                "Failed to init routing register for CAM packets.\n");
-                       return status;
+                       goto exit;
                }
        }
 
@@ -2951,13 +3013,13 @@ static int ql_route_initialize(struct ql_adapter *qdev)
        if (status) {
                QPRINTK(qdev, IFUP, ERR,
                        "Failed to init routing register for error packets.\n");
-               return status;
+               goto exit;
        }
        status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
        if (status) {
                QPRINTK(qdev, IFUP, ERR,
                        "Failed to init routing register for broadcast packets.\n");
-               return status;
+               goto exit;
        }
        /* If we have more than one inbound queue, then turn on RSS in the
         * routing block.
@@ -2968,17 +3030,39 @@ static int ql_route_initialize(struct ql_adapter *qdev)
                if (status) {
                        QPRINTK(qdev, IFUP, ERR,
                                "Failed to init routing register for MATCH RSS packets.\n");
-                       return status;
+                       goto exit;
                }
        }
 
        status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
                                    RT_IDX_CAM_HIT, 1);
-       if (status) {
+       if (status)
                QPRINTK(qdev, IFUP, ERR,
                        "Failed to init routing register for CAM packets.\n");
+exit:
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+       return status;
+}
+
+int ql_cam_route_initialize(struct ql_adapter *qdev)
+{
+       int status;
+
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return status;
+       status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
+                            MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status) {
+               QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
                return status;
        }
+
+       status = ql_route_initialize(qdev);
+       if (status)
+               QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+
        return status;
 }
 
@@ -2995,9 +3079,9 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
        mask = value << 16;
        ql_write32(qdev, SYS, mask | value);
 
-       /* Set the default queue. */
-       value = NIC_RCV_CFG_DFQ;
-       mask = NIC_RCV_CFG_DFQ_MASK;
+       /* Set the default queue, and VLAN behavior. */
+       value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
+       mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
 
        /* Set the MPI interrupt to enabled. */
@@ -3046,28 +3130,24 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
                }
        }
 
-       status = ql_port_initialize(qdev);
-       if (status) {
-               QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
-               return status;
-       }
+       /* Initialize the port and set the max framesize. */
+       status = qdev->nic_ops->port_initialize(qdev);
+       if (status) {
+              QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+              return status;
+       }
 
-       status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
-                                    MAC_ADDR_TYPE_CAM_MAC, qdev->func);
+       /* Set up the MAC address and frame routing filter. */
+       status = ql_cam_route_initialize(qdev);
        if (status) {
-               QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
-               return status;
-       }
-
-       status = ql_route_initialize(qdev);
-       if (status) {
-               QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+               QPRINTK(qdev, IFUP, ERR,
+                               "Failed to init CAM/Routing tables.\n");
                return status;
        }
 
        /* Start NAPI for the RSS queues. */
        for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
-               QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
+               QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
                        i);
                napi_enable(&qdev->rx_ring[i].napi);
        }
@@ -3079,36 +3159,23 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
 static int ql_adapter_reset(struct ql_adapter *qdev)
 {
        u32 value;
-       int max_wait_time;
        int status = 0;
-       int resetCnt = 0;
+       unsigned long end_jiffies = jiffies +
+               max((unsigned long)1, usecs_to_jiffies(30));
 
-#define MAX_RESET_CNT   1
-issueReset:
-       resetCnt++;
-       QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
-       /* Wait for reset to complete. */
-       max_wait_time = 3;
-       QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
-               max_wait_time);
+
        do {
                value = ql_read32(qdev, RST_FO);
                if ((value & RST_FO_FR) == 0)
                        break;
+               cpu_relax();
+       } while (time_before(jiffies, end_jiffies));
 
-               ssleep(1);
-       } while ((--max_wait_time));
        if (value & RST_FO_FR) {
                QPRINTK(qdev, IFDOWN, ERR,
-                       "Stuck in SoftReset:  FSC_SR:0x%08x\n", value);
-               if (resetCnt < MAX_RESET_CNT)
-                       goto issueReset;
-       }
-       if (max_wait_time == 0) {
-               status = -ETIMEDOUT;
-               QPRINTK(qdev, IFDOWN, ERR,
                        "ETIMEOUT!!! errored out of resetting the chip!\n");
+               status = -ETIMEDOUT;
        }
 
        return status;
@@ -3131,16 +3198,20 @@ static void ql_display_dev_info(struct net_device *ndev)
 
 static int ql_adapter_down(struct ql_adapter *qdev)
 {
-       struct net_device *ndev = qdev->ndev;
        int i, status = 0;
        struct rx_ring *rx_ring;
 
-       netif_stop_queue(ndev);
-       netif_carrier_off(ndev);
+       netif_carrier_off(qdev->ndev);
 
-       cancel_delayed_work_sync(&qdev->asic_reset_work);
+       /* Don't kill the reset worker thread if we
+        * are in the process of recovery.
+        */
+       if (test_bit(QL_ADAPTER_UP, &qdev->flags))
+               cancel_delayed_work_sync(&qdev->asic_reset_work);
        cancel_delayed_work_sync(&qdev->mpi_reset_work);
        cancel_delayed_work_sync(&qdev->mpi_work);
+       cancel_delayed_work_sync(&qdev->mpi_idc_work);
+       cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
 
        /* The default queue at index 0 is always processed in
         * a workqueue.
@@ -3151,7 +3222,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
         * a workqueue only if it's a single interrupt
         * environment (MSI/Legacy).
         */
-       for (i = 1; i > qdev->rx_ring_count; i++) {
+       for (i = 1; i < qdev->rx_ring_count; i++) {
                rx_ring = &qdev->rx_ring[i];
                /* Only the RSS rings use NAPI on multi irq
                 * environment.  Outbound completion processing
@@ -3170,6 +3241,13 @@ static int ql_adapter_down(struct ql_adapter *qdev)
 
        ql_tx_ring_clean(qdev);
 
+       /* Call netif_napi_del() from common point.
+        */
+       for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
+               netif_napi_del(&qdev->rx_ring[i].napi);
+
+       ql_free_rx_buffers(qdev);
+
        spin_lock(&qdev->hw_lock);
        status = ql_adapter_reset(qdev);
        if (status)
@@ -3183,21 +3261,19 @@ static int ql_adapter_up(struct ql_adapter *qdev)
 {
        int err = 0;
 
-       spin_lock(&qdev->hw_lock);
        err = ql_adapter_initialize(qdev);
        if (err) {
                QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
                spin_unlock(&qdev->hw_lock);
                goto err_init;
        }
-       spin_unlock(&qdev->hw_lock);
        set_bit(QL_ADAPTER_UP, &qdev->flags);
+       ql_alloc_rx_buffers(qdev);
+       if ((ql_read32(qdev, STS) & qdev->port_init))
+               netif_carrier_on(qdev->ndev);
        ql_enable_interrupts(qdev);
        ql_enable_all_completion_interrupts(qdev);
-       if ((ql_read32(qdev, STS) & qdev->port_init)) {
-               netif_carrier_on(qdev->ndev);
-               netif_start_queue(qdev->ndev);
-       }
+       netif_tx_start_all_queues(qdev->ndev);
 
        return 0;
 err_init:
@@ -3205,28 +3281,6 @@ err_init:
        return err;
 }
 
-static int ql_cycle_adapter(struct ql_adapter *qdev)
-{
-       int status;
-
-       status = ql_adapter_down(qdev);
-       if (status)
-               goto error;
-
-       status = ql_adapter_up(qdev);
-       if (status)
-               goto error;
-
-       return status;
-error:
-       QPRINTK(qdev, IFUP, ALERT,
-               "Driver up/down cycle failed, closing device\n");
-       rtnl_lock();
-       dev_close(qdev->ndev);
-       rtnl_unlock();
-       return status;
-}
-
 static void ql_release_adapter_resources(struct ql_adapter *qdev)
 {
        ql_free_mem_resources(qdev);
@@ -3262,7 +3316,6 @@ static int qlge_close(struct net_device *ndev)
                msleep(1);
        ql_adapter_down(qdev);
        ql_release_adapter_resources(qdev);
-       ql_free_ring_cb(qdev);
        return 0;
 }
 
@@ -3288,8 +3341,8 @@ static int ql_configure_rings(struct ql_adapter *qdev)
         * This limitation can be removed when requested.
         */
 
-       if (cpu_cnt > 8)
-               cpu_cnt = 8;
+       if (cpu_cnt > MAX_CPUS)
+               cpu_cnt = MAX_CPUS;
 
        /*
         * rx_ring[0] is always the default queue.
@@ -3308,9 +3361,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
         * completion handler rx_rings.
         */
        qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
-
-       if (ql_alloc_ring_cb(qdev))
-               return -ENOMEM;
+       netif_set_gso_max_size(qdev->ndev, 65536);
 
        for (i = 0; i < qdev->tx_ring_count; i++) {
                tx_ring = &qdev->tx_ring[i];
@@ -3344,11 +3395,11 @@ static int ql_configure_rings(struct ql_adapter *qdev)
                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
                        rx_ring->lbq_size =
-                           rx_ring->lbq_len * sizeof(struct bq_element);
+                           rx_ring->lbq_len * sizeof(__le64);
                        rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
                        rx_ring->sbq_size =
-                           rx_ring->sbq_len * sizeof(struct bq_element);
+                           rx_ring->sbq_len * sizeof(__le64);
                        rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
                        rx_ring->type = DEFAULT_Q;
                } else if (i < qdev->rss_ring_first_cq_id) {
@@ -3375,11 +3426,11 @@ static int ql_configure_rings(struct ql_adapter *qdev)
                            rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
                        rx_ring->lbq_len = NUM_LARGE_BUFFERS;
                        rx_ring->lbq_size =
-                           rx_ring->lbq_len * sizeof(struct bq_element);
+                           rx_ring->lbq_len * sizeof(__le64);
                        rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
                        rx_ring->sbq_len = NUM_SMALL_BUFFERS;
                        rx_ring->sbq_size =
-                           rx_ring->sbq_len * sizeof(struct bq_element);
+                           rx_ring->sbq_len * sizeof(__le64);
                        rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
                        rx_ring->type = RX_Q;
                }
@@ -3408,7 +3459,6 @@ static int qlge_open(struct net_device *ndev)
 
 error_up:
        ql_release_adapter_resources(qdev);
-       ql_free_ring_cb(qdev);
        return err;
 }
 
@@ -3418,6 +3468,8 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
 
        if (ndev->mtu == 1500 && new_mtu == 9000) {
                QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+               queue_delayed_work(qdev->workqueue,
+                               &qdev->mpi_port_cfg_work, 0);
        } else if (ndev->mtu == 9000 && new_mtu == 1500) {
                QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
        } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3440,8 +3492,11 @@ static void qlge_set_multicast_list(struct net_device *ndev)
 {
        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
        struct dev_mc_list *mc_ptr;
-       int i;
+       int i, status;
 
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       if (status)
+               return;
        spin_lock(&qdev->hw_lock);
        /*
         * Set or clear promiscuous mode if a
@@ -3497,14 +3552,19 @@ static void qlge_set_multicast_list(struct net_device *ndev)
        }
 
        if (ndev->mc_count) {
+               status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+               if (status)
+                       goto exit;
                for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
                     i++, mc_ptr = mc_ptr->next)
                        if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
                                                MAC_ADDR_TYPE_MULTI_MAC, i)) {
                                QPRINTK(qdev, HW, ERR,
                                        "Failed to loadmulticast address.\n");
+                               ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
                                goto exit;
                        }
+               ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
                if (ql_set_routing_reg
                    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
                        QPRINTK(qdev, HW, ERR,
@@ -3515,12 +3575,14 @@ static void qlge_set_multicast_list(struct net_device *ndev)
        }
 exit:
        spin_unlock(&qdev->hw_lock);
+       ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
 }
 
 static int qlge_set_mac_address(struct net_device *ndev, void *p)
 {
        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
        struct sockaddr *addr = p;
+       int status;
 
        if (netif_running(ndev))
                return -EBUSY;
@@ -3529,30 +3591,60 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
                return -EADDRNOTAVAIL;
        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 
+       status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+       if (status)
+               return status;
        spin_lock(&qdev->hw_lock);
-       if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
-                       MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
-               QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
-               return -1;
-       }
+       status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+                       MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
        spin_unlock(&qdev->hw_lock);
-
-       return 0;
+       if (status)
+               QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+       ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+       return status;
 }
 
 static void qlge_tx_timeout(struct net_device *ndev)
 {
        struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
-       queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+       ql_queue_asic_error(qdev);
 }
 
 static void ql_asic_reset_work(struct work_struct *work)
 {
        struct ql_adapter *qdev =
            container_of(work, struct ql_adapter, asic_reset_work.work);
-       ql_cycle_adapter(qdev);
+       int status;
+
+       status = ql_adapter_down(qdev);
+       if (status)
+               goto error;
+
+       status = ql_adapter_up(qdev);
+       if (status)
+               goto error;
+
+       return;
+error:
+       QPRINTK(qdev, IFUP, ALERT,
+               "Driver up/down cycle failed, closing device\n");
+       rtnl_lock();
+       set_bit(QL_ADAPTER_UP, &qdev->flags);
+       dev_close(qdev->ndev);
+       rtnl_unlock();
 }
 
+static struct nic_operations qla8012_nic_ops = {
+       .get_flash              = ql_get_8012_flash_params,
+       .port_initialize        = ql_8012_port_initialize,
+};
+
+static struct nic_operations qla8000_nic_ops = {
+       .get_flash              = ql_get_8000_flash_params,
+       .port_initialize        = ql_8000_port_initialize,
+};
+
+
 static void ql_get_board_info(struct ql_adapter *qdev)
 {
        qdev->func =
@@ -3571,6 +3663,11 @@ static void ql_get_board_info(struct ql_adapter *qdev)
                qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
        }
        qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+       qdev->device_id = qdev->pdev->device;
+       if (qdev->device_id == QLGE_DEVICE_ID_8012)
+               qdev->nic_ops = &qla8012_nic_ops;
+       else if (qdev->device_id == QLGE_DEVICE_ID_8000)
+               qdev->nic_ops = &qla8000_nic_ops;
 }
 
 static void ql_release_all(struct pci_dev *pdev)
@@ -3587,7 +3684,7 @@ static void ql_release_all(struct pci_dev *pdev)
                qdev->q_workqueue = NULL;
        }
        if (qdev->reg_base)
-               iounmap((void *)qdev->reg_base);
+               iounmap(qdev->reg_base);
        if (qdev->doorbell_area)
                iounmap(qdev->doorbell_area);
        pci_release_regions(pdev);
@@ -3629,9 +3726,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
        }
 
        pci_set_master(pdev);
-       if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                set_bit(QL_DMA64, &qdev->flags);
-               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        } else {
                err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
                if (!err)
@@ -3663,24 +3760,20 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
                goto err_out;
        }
 
-       ql_get_board_info(qdev);
        qdev->ndev = ndev;
        qdev->pdev = pdev;
+       ql_get_board_info(qdev);
        qdev->msg_enable = netif_msg_init(debug, default_msg);
        spin_lock_init(&qdev->hw_lock);
        spin_lock_init(&qdev->stats_lock);
 
        /* make sure the EEPROM is good */
-       err = ql_get_flash_params(qdev);
+       err = qdev->nic_ops->get_flash(qdev);
        if (err) {
                dev_err(&pdev->dev, "Invalid FLASH.\n");
                goto err_out;
        }
 
-       if (!is_valid_ether_addr(qdev->flash.mac_addr))
-               goto err_out;
-
-       memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 
        /* Set up the default ring sizes. */
@@ -3703,6 +3796,10 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
        INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+       INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
+       INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+       mutex_init(&qdev->mpi_mutex);
+       init_completion(&qdev->ide_completion);
 
        if (!cards_found) {
                dev_info(&pdev->dev, "%s\n", DRV_STRING);
@@ -3716,6 +3813,22 @@ err_out:
        return err;
 }
 
+
+static const struct net_device_ops qlge_netdev_ops = {
+       .ndo_open               = qlge_open,
+       .ndo_stop               = qlge_close,
+       .ndo_start_xmit         = qlge_send,
+       .ndo_change_mtu         = qlge_change_mtu,
+       .ndo_get_stats          = qlge_get_stats,
+       .ndo_set_multicast_list = qlge_set_multicast_list,
+       .ndo_set_mac_address    = qlge_set_mac_address,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_tx_timeout         = qlge_tx_timeout,
+       .ndo_vlan_rx_register   = ql_vlan_rx_register,
+       .ndo_vlan_rx_add_vid    = ql_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = ql_vlan_rx_kill_vid,
+};
+
 static int __devinit qlge_probe(struct pci_dev *pdev,
                                const struct pci_device_id *pci_entry)
 {
@@ -3724,7 +3837,8 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
        static int cards_found = 0;
        int err = 0;
 
-       ndev = alloc_etherdev(sizeof(struct ql_adapter));
+       ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
+                       min(MAX_CPUS, (int)num_online_cpus()));
        if (!ndev)
                return -ENOMEM;
 
@@ -3744,6 +3858,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
                          | NETIF_F_TSO_ECN
                          | NETIF_F_HW_VLAN_TX
                          | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
+       ndev->features |= NETIF_F_GRO;
 
        if (test_bit(QL_DMA64, &qdev->flags))
                ndev->features |= NETIF_F_HIGHDMA;
@@ -3753,19 +3868,11 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
         */
        ndev->tx_queue_len = qdev->tx_ring_size;
        ndev->irq = pdev->irq;
-       ndev->open = qlge_open;
-       ndev->stop = qlge_close;
-       ndev->hard_start_xmit = qlge_send;
+
+       ndev->netdev_ops = &qlge_netdev_ops;
        SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
-       ndev->change_mtu = qlge_change_mtu;
-       ndev->get_stats = qlge_get_stats;
-       ndev->set_multicast_list = qlge_set_multicast_list;
-       ndev->set_mac_address = qlge_set_mac_address;
-       ndev->tx_timeout = qlge_tx_timeout;
        ndev->watchdog_timeo = 10 * HZ;
-       ndev->vlan_rx_register = ql_vlan_rx_register;
-       ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
-       ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
+
        err = register_netdev(ndev);
        if (err) {
                dev_err(&pdev->dev, "net device registration failed.\n");
@@ -3774,7 +3881,6 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
                return err;
        }
        netif_carrier_off(ndev);
-       netif_stop_queue(ndev);
        ql_display_dev_info(ndev);
        cards_found++;
        return 0;
@@ -3828,7 +3934,6 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
        pci_set_master(pdev);
 
        netif_carrier_off(ndev);
-       netif_stop_queue(ndev);
        ql_adapter_reset(qdev);
 
        /* Make sure the EEPROM is good */