dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)
[linux-2.6.git] / drivers / net / qlge / qlge_main.c
index 7c1ce57..0add30d 100644 (file)
@@ -587,7 +587,6 @@ u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 {
        u32 var = 0;
-       unsigned long hw_flags;
        struct intr_context *ctx;
 
        /* HW disables for us if we're MSIX multi interrupts and
@@ -597,14 +596,14 @@ static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
                return 0;
 
        ctx = qdev->intr_context + intr;
-       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       spin_lock(&qdev->hw_lock);
        if (!atomic_read(&ctx->irq_cnt)) {
                ql_write32(qdev, INTR_EN,
                ctx->intr_dis_mask);
                var = ql_read32(qdev, STS);
        }
        atomic_inc(&ctx->irq_cnt);
-       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       spin_unlock(&qdev->hw_lock);
        return var;
 }
 
@@ -1507,6 +1506,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
 {
        struct net_device *ndev = qdev->ndev;
        struct sk_buff *skb = NULL;
+       u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
+                       IB_MAC_IOCB_RSP_VLAN_MASK)
 
        QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
 
@@ -1531,32 +1532,53 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
                QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
        }
-       if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
-               QPRINTK(qdev, RX_STATUS, ERR,
-                       "Bad checksum for this %s packet.\n",
-                       ((ib_mac_rsp->
-                         flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
-               skb->ip_summed = CHECKSUM_NONE;
-       } else if (qdev->rx_csum &&
-                  ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
-                   ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
-                    !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
-               QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       skb->protocol = eth_type_trans(skb, ndev);
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* If rx checksum is on, and there are no
+        * csum or frame errors.
+        */
+       if (qdev->rx_csum &&
+               !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) &&
+               !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+               /* TCP frame. */
+               if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+                       QPRINTK(qdev, RX_STATUS, DEBUG,
+                                       "TCP checksum done!\n");
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+                               (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+               /* Unfragmented ipv4 UDP frame. */
+                       struct iphdr *iph = (struct iphdr *) skb->data;
+                       if (!(iph->frag_off &
+                               cpu_to_be16(IP_MF|IP_OFFSET))) {
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               QPRINTK(qdev, RX_STATUS, DEBUG,
+                                               "TCP checksum done!\n");
+                       }
+               }
        }
+
        qdev->stats.rx_packets++;
        qdev->stats.rx_bytes += skb->len;
-       skb->protocol = eth_type_trans(skb, ndev);
-       skb_record_rx_queue(skb, rx_ring - &qdev->rx_ring[0]);
-       if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
-               QPRINTK(qdev, RX_STATUS, DEBUG,
-                       "Passing a VLAN packet upstream.\n");
-               vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
-                               le16_to_cpu(ib_mac_rsp->vlan_id));
+       skb_record_rx_queue(skb,
+               rx_ring->cq_id - qdev->rss_ring_first_cq_id);
+       if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               if (qdev->vlgrp &&
+                       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (vlan_id != 0))
+                       vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+                               vlan_id, skb);
+               else
+                       napi_gro_receive(&rx_ring->napi, skb);
        } else {
-               QPRINTK(qdev, RX_STATUS, DEBUG,
-                       "Passing a normal packet upstream.\n");
-               netif_receive_skb(skb);
+               if (qdev->vlgrp &&
+                       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (vlan_id != 0))
+                       vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+               else
+                       netif_receive_skb(skb);
        }
 }
 
@@ -1603,14 +1625,12 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
 /* Fire up a handler to reset the MPI processor. */
 void ql_queue_fw_error(struct ql_adapter *qdev)
 {
-       netif_stop_queue(qdev->ndev);
        netif_carrier_off(qdev->ndev);
        queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
 }
 
 void ql_queue_asic_error(struct ql_adapter *qdev)
 {
-       netif_stop_queue(qdev->ndev);
        netif_carrier_off(qdev->ndev);
        ql_disable_interrupts(qdev);
        /* Clear adapter up bit to signal the recovery
@@ -1665,6 +1685,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
        struct ob_mac_iocb_rsp *net_rsp = NULL;
        int count = 0;
 
+       struct tx_ring *tx_ring;
        /* While there are entries in the completion queue. */
        while (prod != rx_ring->cnsmr_idx) {
 
@@ -1690,15 +1711,16 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
                prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
        }
        ql_write_cq_idx(rx_ring);
-       if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
-               struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+       tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+       if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
+                                       net_rsp != NULL) {
                if (atomic_read(&tx_ring->queue_stopped) &&
                    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
                        /*
                         * The queue got stopped because the tx_ring was full.
                         * Wake it up, because it's now at least 25% empty.
                         */
-                       netif_wake_queue(qdev->ndev);
+                       netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
        }
 
        return count;
@@ -1759,7 +1781,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
                rx_ring->cq_id);
 
        if (work_done < budget) {
-               __napi_complete(napi);
+               napi_complete(napi);
                ql_enable_completion_interrupt(qdev, rx_ring->irq);
        }
        return work_done;
@@ -2030,15 +2052,18 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
        struct ql_adapter *qdev = netdev_priv(ndev);
        int tso;
        struct tx_ring *tx_ring;
-       u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
+       u32 tx_ring_idx = (u32) skb->queue_mapping;
 
        tx_ring = &qdev->tx_ring[tx_ring_idx];
 
+       if (skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
        if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
                QPRINTK(qdev, TX_QUEUED, INFO,
                        "%s: shutting down tx queue %d du to lack of resources.\n",
                        __func__, tx_ring_idx);
-               netif_stop_queue(ndev);
+               netif_stop_subqueue(ndev, tx_ring->wq_id);
                atomic_inc(&tx_ring->queue_stopped);
                return NETDEV_TX_BUSY;
        }
@@ -2119,6 +2144,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
                        "Allocation of RX shadow space failed.\n");
                return -ENOMEM;
        }
+       memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
        qdev->tx_ring_shadow_reg_area =
            pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
                                 &qdev->tx_ring_shadow_reg_dma);
@@ -2127,6 +2153,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
                        "Allocation of TX shadow space failed.\n");
                goto err_wqp_sh_area;
        }
+       memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
        return 0;
 
 err_wqp_sh_area:
@@ -2498,6 +2525,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
            qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
        int err = 0;
        u16 bq_len;
+       u64 tmp;
 
        /* Set up the shadow registers for this ring. */
        rx_ring->prod_idx_sh_reg = shadow_reg;
@@ -2543,7 +2571,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
            FLAGS_LI;           /* Load irq delay values */
        if (rx_ring->lbq_len) {
                cqicb->flags |= FLAGS_LL;       /* Load lbq values */
-               *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
+               tmp = (u64)rx_ring->lbq_base_dma;;
+               *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp);
                cqicb->lbq_addr =
                    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
                bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
@@ -2559,11 +2588,12 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
        }
        if (rx_ring->sbq_len) {
                cqicb->flags |= FLAGS_LS;       /* Load sbq values */
-               *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
+               tmp = (u64)rx_ring->sbq_base_dma;;
+               *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp);
                cqicb->sbq_addr =
                    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
                cqicb->sbq_buf_size =
-                   cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
+                   cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
                bq_len = (rx_ring->sbq_len == 65536) ? 0 :
                        (u16) rx_ring->sbq_len;
                cqicb->sbq_len = cpu_to_le16(bq_len);
@@ -3014,7 +3044,7 @@ exit:
        return status;
 }
 
-static int ql_cam_route_initialize(struct ql_adapter *qdev)
+int ql_cam_route_initialize(struct ql_adapter *qdev)
 {
        int status;
 
@@ -3049,9 +3079,9 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
        mask = value << 16;
        ql_write32(qdev, SYS, mask | value);
 
-       /* Set the default queue. */
-       value = NIC_RCV_CFG_DFQ;
-       mask = NIC_RCV_CFG_DFQ_MASK;
+       /* Set the default queue, and VLAN behavior. */
+       value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
+       mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
 
        /* Set the MPI interrupt to enabled. */
@@ -3129,36 +3159,23 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
 static int ql_adapter_reset(struct ql_adapter *qdev)
 {
        u32 value;
-       int max_wait_time;
        int status = 0;
-       int resetCnt = 0;
+       unsigned long end_jiffies = jiffies +
+               max((unsigned long)1, usecs_to_jiffies(30));
 
-#define MAX_RESET_CNT   1
-issueReset:
-       resetCnt++;
-       QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
-       /* Wait for reset to complete. */
-       max_wait_time = 3;
-       QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
-               max_wait_time);
+
        do {
                value = ql_read32(qdev, RST_FO);
                if ((value & RST_FO_FR) == 0)
                        break;
+               cpu_relax();
+       } while (time_before(jiffies, end_jiffies));
 
-               ssleep(1);
-       } while ((--max_wait_time));
        if (value & RST_FO_FR) {
                QPRINTK(qdev, IFDOWN, ERR,
-                       "Stuck in SoftReset:  FSC_SR:0x%08x\n", value);
-               if (resetCnt < MAX_RESET_CNT)
-                       goto issueReset;
-       }
-       if (max_wait_time == 0) {
-               status = -ETIMEDOUT;
-               QPRINTK(qdev, IFDOWN, ERR,
                        "ETIMEOUT!!! errored out of resetting the chip!\n");
+               status = -ETIMEDOUT;
        }
 
        return status;
@@ -3181,12 +3198,10 @@ static void ql_display_dev_info(struct net_device *ndev)
 
 static int ql_adapter_down(struct ql_adapter *qdev)
 {
-       struct net_device *ndev = qdev->ndev;
        int i, status = 0;
        struct rx_ring *rx_ring;
 
-       netif_stop_queue(ndev);
-       netif_carrier_off(ndev);
+       netif_carrier_off(qdev->ndev);
 
        /* Don't kill the reset worker thread if we
         * are in the process of recovery.
@@ -3195,6 +3210,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
                cancel_delayed_work_sync(&qdev->asic_reset_work);
        cancel_delayed_work_sync(&qdev->mpi_reset_work);
        cancel_delayed_work_sync(&qdev->mpi_work);
+       cancel_delayed_work_sync(&qdev->mpi_idc_work);
        cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
 
        /* The default queue at index 0 is always processed in
@@ -3225,7 +3241,13 @@ static int ql_adapter_down(struct ql_adapter *qdev)
 
        ql_tx_ring_clean(qdev);
 
+       /* Call netif_napi_del() from common point.
+        */
+       for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
+               netif_napi_del(&qdev->rx_ring[i].napi);
+
        ql_free_rx_buffers(qdev);
+
        spin_lock(&qdev->hw_lock);
        status = ql_adapter_reset(qdev);
        if (status)
@@ -3239,22 +3261,19 @@ static int ql_adapter_up(struct ql_adapter *qdev)
 {
        int err = 0;
 
-       spin_lock(&qdev->hw_lock);
        err = ql_adapter_initialize(qdev);
        if (err) {
                QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
                spin_unlock(&qdev->hw_lock);
                goto err_init;
        }
-       spin_unlock(&qdev->hw_lock);
        set_bit(QL_ADAPTER_UP, &qdev->flags);
        ql_alloc_rx_buffers(qdev);
+       if ((ql_read32(qdev, STS) & qdev->port_init))
+               netif_carrier_on(qdev->ndev);
        ql_enable_interrupts(qdev);
        ql_enable_all_completion_interrupts(qdev);
-       if ((ql_read32(qdev, STS) & qdev->port_init)) {
-               netif_carrier_on(qdev->ndev);
-               netif_start_queue(qdev->ndev);
-       }
+       netif_tx_start_all_queues(qdev->ndev);
 
        return 0;
 err_init:
@@ -3262,28 +3281,6 @@ err_init:
        return err;
 }
 
-static int ql_cycle_adapter(struct ql_adapter *qdev)
-{
-       int status;
-
-       status = ql_adapter_down(qdev);
-       if (status)
-               goto error;
-
-       status = ql_adapter_up(qdev);
-       if (status)
-               goto error;
-
-       return status;
-error:
-       QPRINTK(qdev, IFUP, ALERT,
-               "Driver up/down cycle failed, closing device\n");
-       rtnl_lock();
-       dev_close(qdev->ndev);
-       rtnl_unlock();
-       return status;
-}
-
 static void ql_release_adapter_resources(struct ql_adapter *qdev)
 {
        ql_free_mem_resources(qdev);
@@ -3364,6 +3361,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
         * completion handler rx_rings.
         */
        qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
+       netif_set_gso_max_size(qdev->ndev, 65536);
 
        for (i = 0; i < qdev->tx_ring_count; i++) {
                tx_ring = &qdev->tx_ring[i];
@@ -3616,7 +3614,24 @@ static void ql_asic_reset_work(struct work_struct *work)
 {
        struct ql_adapter *qdev =
            container_of(work, struct ql_adapter, asic_reset_work.work);
-       ql_cycle_adapter(qdev);
+       int status;
+
+       status = ql_adapter_down(qdev);
+       if (status)
+               goto error;
+
+       status = ql_adapter_up(qdev);
+       if (status)
+               goto error;
+
+       return;
+error:
+       QPRINTK(qdev, IFUP, ALERT,
+               "Driver up/down cycle failed, closing device\n");
+       rtnl_lock();
+       set_bit(QL_ADAPTER_UP, &qdev->flags);
+       dev_close(qdev->ndev);
+       rtnl_unlock();
 }
 
 static struct nic_operations qla8012_nic_ops = {
@@ -3711,9 +3726,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
        }
 
        pci_set_master(pdev);
-       if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                set_bit(QL_DMA64, &qdev->flags);
-               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        } else {
                err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
                if (!err)
@@ -3782,6 +3797,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
        INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
        INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
+       INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
        mutex_init(&qdev->mpi_mutex);
        init_completion(&qdev->ide_completion);
 
@@ -3821,7 +3837,8 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
        static int cards_found = 0;
        int err = 0;
 
-       ndev = alloc_etherdev(sizeof(struct ql_adapter));
+       ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
+                       min(MAX_CPUS, (int)num_online_cpus()));
        if (!ndev)
                return -ENOMEM;
 
@@ -3841,6 +3858,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
                          | NETIF_F_TSO_ECN
                          | NETIF_F_HW_VLAN_TX
                          | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
+       ndev->features |= NETIF_F_GRO;
 
        if (test_bit(QL_DMA64, &qdev->flags))
                ndev->features |= NETIF_F_HIGHDMA;
@@ -3863,7 +3881,6 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
                return err;
        }
        netif_carrier_off(ndev);
-       netif_stop_queue(ndev);
        ql_display_dev_info(ndev);
        cards_found++;
        return 0;
@@ -3917,7 +3934,6 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
        pci_set_master(pdev);
 
        netif_carrier_off(ndev);
-       netif_stop_queue(ndev);
        ql_adapter_reset(qdev);
 
        /* Make sure the EEPROM is good */
@@ -3959,7 +3975,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *ndev = pci_get_drvdata(pdev);
        struct ql_adapter *qdev = netdev_priv(ndev);
-       int err, i;
+       int err;
 
        netif_device_detach(ndev);
 
@@ -3969,9 +3985,6 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
                        return err;
        }
 
-       for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
-               netif_napi_del(&qdev->rx_ring[i].napi);
-
        err = pci_save_state(pdev);
        if (err)
                return err;