]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - drivers/infiniband/hw/cxgb4/qp.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-3.10.git] / drivers / infiniband / hw / cxgb4 / qp.c
index bdbf54d517d9f3bf37402fed24e675f5cdae01d3..5b059e2d80cc18019b060a7143bdf59098879ef0 100644 (file)
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
+
+#include <linux/module.h>
+
 #include "iw_cxgb4.h"
 
+static int db_delay_usecs = 1;
+module_param(db_delay_usecs, int, 0644);
+MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
+
+static int ocqp_support = 1;
+module_param(ocqp_support, int, 0644);
+MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
+
+int db_fc_threshold = 1000;
+module_param(db_fc_threshold, int, 0644);
+MODULE_PARM_DESC(db_fc_threshold,
+                "QP count/threshold that triggers"
+                " automatic db flow control mode (default = 1000)");
+
+int db_coalescing_threshold;
+module_param(db_coalescing_threshold, int, 0644);
+MODULE_PARM_DESC(db_coalescing_threshold,
+                "QP count/threshold that triggers"
+                " disabling db coalescing (default = 0)");
+
+static int max_fr_immd = T4_MAX_FR_IMMD;
+module_param(max_fr_immd, int, 0644);
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
+
+static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
+{
+       unsigned long flag;
+       spin_lock_irqsave(&qhp->lock, flag);
+       qhp->attr.state = state;
+       spin_unlock_irqrestore(&qhp->lock, flag);
+}
+
+static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
+}
+
+static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
+                         pci_unmap_addr(sq, mapping));
+}
+
+static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       if (t4_sq_onchip(sq))
+               dealloc_oc_sq(rdev, sq);
+       else
+               dealloc_host_sq(rdev, sq);
+}
+
+static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       if (!ocqp_support || !ocqp_supported(&rdev->lldi))
+               return -ENOSYS;
+       sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
+       if (!sq->dma_addr)
+               return -ENOMEM;
+       sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
+                       rdev->lldi.vr->ocq.start;
+       sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
+                                           rdev->lldi.vr->ocq.start);
+       sq->flags |= T4_SQ_ONCHIP;
+       return 0;
+}
+
+static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
+                                      &(sq->dma_addr), GFP_KERNEL);
+       if (!sq->queue)
+               return -ENOMEM;
+       sq->phys_addr = virt_to_phys(sq->queue);
+       pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+       return 0;
+}
+
 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                      struct c4iw_dev_ucontext *uctx)
 {
@@ -41,9 +121,7 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->rq.memsize, wq->rq.queue,
                          dma_unmap_addr(&wq->rq, mapping));
-       dma_free_coherent(&(rdev->lldi.pdev->dev),
-                         wq->sq.memsize, wq->sq.queue,
-                         dma_unmap_addr(&wq->sq, mapping));
+       dealloc_sq(rdev, &wq->sq);
        c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
        kfree(wq->rq.sw_rq);
        kfree(wq->sq.sw_sq);
@@ -62,7 +140,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        int wr_len;
        struct c4iw_wr_wait wr_wait;
        struct sk_buff *skb;
-       int ret;
+       int ret = 0;
        int eqsize;
 
        wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@ -70,19 +148,25 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                return -ENOMEM;
 
        wq->rq.qid = c4iw_get_qpid(rdev, uctx);
-       if (!wq->rq.qid)
-               goto err1;
+       if (!wq->rq.qid) {
+               ret = -ENOMEM;
+               goto free_sq_qid;
+       }
 
        if (!user) {
                wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
                                 GFP_KERNEL);
-               if (!wq->sq.sw_sq)
-                       goto err2;
+               if (!wq->sq.sw_sq) {
+                       ret = -ENOMEM;
+                       goto free_rq_qid;
+               }
 
                wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
                                 GFP_KERNEL);
-               if (!wq->rq.sw_rq)
-                       goto err3;
+               if (!wq->rq.sw_rq) {
+                       ret = -ENOMEM;
+                       goto free_sw_sq;
+               }
        }
 
        /*
@@ -90,22 +174,30 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
         */
        wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
        wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
-       if (!wq->rq.rqt_hwaddr)
-               goto err4;
+       if (!wq->rq.rqt_hwaddr) {
+               ret = -ENOMEM;
+               goto free_sw_rq;
+       }
+
+       if (user) {
+               if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
+                       goto free_hwaddr;
+       } else {
+               ret = alloc_host_sq(rdev, &wq->sq);
+               if (ret)
+                       goto free_hwaddr;
+       }
 
-       wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
-                                         wq->sq.memsize, &(wq->sq.dma_addr),
-                                         GFP_KERNEL);
-       if (!wq->sq.queue)
-               goto err5;
        memset(wq->sq.queue, 0, wq->sq.memsize);
        dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
 
        wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
                                          wq->rq.memsize, &(wq->rq.dma_addr),
                                          GFP_KERNEL);
-       if (!wq->rq.queue)
-               goto err6;
+       if (!wq->rq.queue) {
+               ret = -ENOMEM;
+               goto free_sq;
+       }
        PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
                __func__, wq->sq.queue,
                (unsigned long long)virt_to_phys(wq->sq.queue),
@@ -133,7 +225,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        skb = alloc_skb(wr_len, GFP_KERNEL);
        if (!skb) {
                ret = -ENOMEM;
-               goto err7;
+               goto free_dma;
        }
        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
 
@@ -158,12 +250,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
                V_FW_RI_RES_WR_CPRIO(0) |       /* don't keep in chip cache */
                V_FW_RI_RES_WR_PCIECHN(0) |     /* set by uP at ri_init time */
+               (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
                V_FW_RI_RES_WR_IQID(scq->cqid));
        res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
                V_FW_RI_RES_WR_DCAEN(0) |
                V_FW_RI_RES_WR_DCACPU(0) |
                V_FW_RI_RES_WR_FBMIN(2) |
-               V_FW_RI_RES_WR_FBMAX(3) |
+               V_FW_RI_RES_WR_FBMAX(2) |
                V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
                V_FW_RI_RES_WR_CIDXFTHRESH(0) |
                V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -186,7 +279,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                V_FW_RI_RES_WR_DCAEN(0) |
                V_FW_RI_RES_WR_DCACPU(0) |
                V_FW_RI_RES_WR_FBMIN(2) |
-               V_FW_RI_RES_WR_FBMAX(3) |
+               V_FW_RI_RES_WR_FBMAX(2) |
                V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
                V_FW_RI_RES_WR_CIDXFTHRESH(0) |
                V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -197,42 +290,33 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 
        ret = c4iw_ofld_send(rdev, skb);
        if (ret)
-               goto err7;
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rdev->lldi.pdev));
-               rdev->flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else
-               ret = wr_wait.ret;
+               goto free_dma;
+       ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
        if (ret)
-               goto err7;
+               goto free_dma;
 
        PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
             __func__, wq->sq.qid, wq->rq.qid, wq->db,
             (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
 
        return 0;
-err7:
+free_dma:
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->rq.memsize, wq->rq.queue,
                          dma_unmap_addr(&wq->rq, mapping));
-err6:
-       dma_free_coherent(&(rdev->lldi.pdev->dev),
-                         wq->sq.memsize, wq->sq.queue,
-                         dma_unmap_addr(&wq->sq, mapping));
-err5:
+free_sq:
+       dealloc_sq(rdev, &wq->sq);
+free_hwaddr:
        c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
-err4:
+free_sw_rq:
        kfree(wq->rq.sw_rq);
-err3:
+free_sw_sq:
        kfree(wq->sq.sw_sq);
-err2:
+free_rq_qid:
        c4iw_put_qpid(rdev, wq->rq.qid, uctx);
-err1:
+free_sq_qid:
        c4iw_put_qpid(rdev, wq->sq.qid, uctx);
-       return -ENOMEM;
+       return ret;
 }
 
 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
@@ -457,13 +541,15 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
        return 0;
 }
 
-static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
+                        struct ib_send_wr *wr, u8 *len16, u8 t5dev)
 {
 
        struct fw_ri_immd *imdp;
        __be64 *p;
        int i;
        int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+       int rem;
 
        if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
                return -EINVAL;
@@ -478,11 +564,18 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
        wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
        wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
                                        0xffffffff);
-       if (pbllen > T4_MAX_FR_IMMD) {
+
+       if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
                struct c4iw_fr_page_list *c4pl =
-                               to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+                       to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
                struct fw_ri_dsgl *sglp;
 
+               for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+                       wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
+                               cpu_to_be64((u64)
+                               wr->wr.fast_reg.page_list->page_list[i]);
+               }
+
                sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
                sglp->op = FW_RI_DATA_DSGL;
                sglp->r1 = 0;
@@ -490,7 +583,7 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
                sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
                sglp->len0 = cpu_to_be32(pbllen);
 
-               *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
+               *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
        } else {
                imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
                imdp->op = FW_RI_DATA_IMMD;
@@ -498,11 +591,23 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
                imdp->r2 = 0;
                imdp->immdlen = cpu_to_be32(pbllen);
                p = (__be64 *)(imdp + 1);
-               for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
+               rem = pbllen;
+               for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
                        *p = cpu_to_be64(
                                (u64)wr->wr.fast_reg.page_list->page_list[i]);
-               *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
-                                     16);
+                       rem -= sizeof(*p);
+                       if (++p == (__be64 *)&sq->queue[sq->size])
+                               p = (__be64 *)sq->queue;
+               }
+               BUG_ON(rem < 0);
+               while (rem) {
+                       *p = 0;
+                       rem -= sizeof(*p);
+                       if (++p == (__be64 *)&sq->queue[sq->size])
+                               p = (__be64 *)sq->queue;
+               }
+               *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
+                                     + pbllen, 16);
        }
        return 0;
 }
@@ -591,7 +696,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        fw_opcode = FW_RI_RDMA_READ_WR;
                        swsqe->opcode = FW_RI_READ_REQ;
                        if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
-                               fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
+                               fw_flags = FW_RI_RDMA_READ_INVALIDATE;
                        else
                                fw_flags = 0;
                        err = build_rdma_read(wqe, wr, &len16);
@@ -604,7 +709,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                case IB_WR_FAST_REG_MR:
                        fw_opcode = FW_RI_FR_NSMR_WR;
                        swsqe->opcode = FW_RI_FAST_REGISTER;
-                       err = build_fastreg(wqe, wr, &len16);
+                       err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
+                                           is_t5(
+                                           qhp->rhp->rdev.lldi.adapter_type) ?
+                                           1 : 0);
                        break;
                case IB_WR_LOCAL_INV:
                        if (wr->send_flags & IB_SEND_FENCE)
@@ -846,36 +954,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
        }
 }
 
-int c4iw_post_zb_read(struct c4iw_qp *qhp)
-{
-       union t4_wr *wqe;
-       struct sk_buff *skb;
-       u8 len16;
-
-       PDBG("%s enter\n", __func__);
-       skb = alloc_skb(40, GFP_KERNEL);
-       if (!skb) {
-               printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
-               return -ENOMEM;
-       }
-       set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
-
-       wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
-       memset(wqe, 0, sizeof wqe->read);
-       wqe->read.r2 = cpu_to_be64(0);
-       wqe->read.stag_sink = cpu_to_be32(1);
-       wqe->read.to_sink_hi = cpu_to_be32(0);
-       wqe->read.to_sink_lo = cpu_to_be32(1);
-       wqe->read.stag_src = cpu_to_be32(1);
-       wqe->read.plen = cpu_to_be32(0);
-       wqe->read.to_src_hi = cpu_to_be32(0);
-       wqe->read.to_src_lo = cpu_to_be32(1);
-       len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
-       init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
-
-       return c4iw_ofld_send(&qhp->rhp->rdev, skb);
-}
-
 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
                           gfp_t gfp)
 {
@@ -901,7 +979,11 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
        wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
        wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
        term = (struct terminate_message *)wqe->u.terminate.termmsg;
-       build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
+       if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
+               term->layer_etype = qhp->attr.layer_etype;
+               term->ecode = qhp->attr.ecode;
+       } else
+               build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
        c4iw_ofld_send(&qhp->rhp->rdev, skb);
 }
 
@@ -909,48 +991,47 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
  * Assumes qhp lock is held.
  */
 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
-                      struct c4iw_cq *schp, unsigned long *flag)
+                      struct c4iw_cq *schp)
 {
        int count;
        int flushed;
+       unsigned long flag;
 
        PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
-       /* take a ref on the qhp since we must release the lock */
-       atomic_inc(&qhp->refcnt);
-       spin_unlock_irqrestore(&qhp->lock, *flag);
 
        /* locking hierarchy: cq lock first, then qp lock. */
-       spin_lock_irqsave(&rchp->lock, *flag);
+       spin_lock_irqsave(&rchp->lock, flag);
        spin_lock(&qhp->lock);
        c4iw_flush_hw_cq(&rchp->cq);
        c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
        flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
        spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&rchp->lock, *flag);
-       if (flushed)
+       spin_unlock_irqrestore(&rchp->lock, flag);
+       if (flushed) {
+               spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+               spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+       }
 
        /* locking hierarchy: cq lock first, then qp lock. */
-       spin_lock_irqsave(&schp->lock, *flag);
+       spin_lock_irqsave(&schp->lock, flag);
        spin_lock(&qhp->lock);
        c4iw_flush_hw_cq(&schp->cq);
        c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
        flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
        spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&schp->lock, *flag);
-       if (flushed)
+       spin_unlock_irqrestore(&schp->lock, flag);
+       if (flushed) {
+               spin_lock_irqsave(&schp->comp_handler_lock, flag);
                (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
-
-       /* deref */
-       if (atomic_dec_and_test(&qhp->refcnt))
-               wake_up(&qhp->wait);
-
-       spin_lock_irqsave(&qhp->lock, *flag);
+               spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+       }
 }
 
-static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
+static void flush_qp(struct c4iw_qp *qhp)
 {
        struct c4iw_cq *rchp, *schp;
+       unsigned long flag;
 
        rchp = get_chp(qhp->rhp, qhp->attr.rcq);
        schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -958,11 +1039,19 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
        if (qhp->ibqp.uobject) {
                t4_set_wq_in_error(&qhp->wq);
                t4_set_cq_in_error(&rchp->cq);
-               if (schp != rchp)
+               spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+               (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+               spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+               if (schp != rchp) {
                        t4_set_cq_in_error(&schp->cq);
+                       spin_lock_irqsave(&schp->comp_handler_lock, flag);
+                       (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                       schp->ibcq.cq_context);
+                       spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+               }
                return;
        }
-       __flush_qp(qhp, rchp, schp, flag);
+       __flush_qp(qhp, rchp, schp);
 }
 
 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
@@ -970,7 +1059,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 {
        struct fw_ri_wr *wqe;
        int ret;
-       struct c4iw_wr_wait wr_wait;
        struct sk_buff *skb;
 
        PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -989,28 +1077,15 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
        wqe->flowid_len16 = cpu_to_be32(
                FW_WR_FLOWID(ep->hwtid) |
                FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
-       wqe->cookie = (unsigned long) &wr_wait;
+       wqe->cookie = (unsigned long) &ep->com.wr_wait;
 
        wqe->u.fini.type = FW_RI_TYPE_FINI;
-       c4iw_init_wr_wait(&wr_wait);
        ret = c4iw_ofld_send(&rhp->rdev, skb);
        if (ret)
                goto out;
 
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rhp->rdev.lldi.pdev));
-               rhp->rdev.flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else {
-               ret = wr_wait.ret;
-               if (ret)
-                       printk(KERN_WARNING MOD
-                              "%s: Abnormal close qpid %d ret %u\n",
-                              pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
-                              ret);
-       }
+       ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
+                            qhp->wq.sq.qid, __func__);
 out:
        PDBG("%s ret %d\n", __func__, ret);
        return ret;
@@ -1018,6 +1093,7 @@ out:
 
 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
 {
+       PDBG("%s p2p_type = %d\n", __func__, p2p_type);
        memset(&init->u, 0, sizeof init->u);
        switch (p2p_type) {
        case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
@@ -1044,7 +1120,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 {
        struct fw_ri_wr *wqe;
        int ret;
-       struct c4iw_wr_wait wr_wait;
        struct sk_buff *skb;
 
        PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -1064,7 +1139,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
                FW_WR_FLOWID(qhp->ep->hwtid) |
                FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
 
-       wqe->cookie = (unsigned long) &wr_wait;
+       wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
 
        wqe->u.init.type = FW_RI_TYPE_INIT;
        wqe->u.init.mpareqbit_p2ptype =
@@ -1101,24 +1176,46 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
        if (qhp->attr.mpa_attr.initiator)
                build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 
-       c4iw_init_wr_wait(&wr_wait);
        ret = c4iw_ofld_send(&rhp->rdev, skb);
        if (ret)
                goto out;
 
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rhp->rdev.lldi.pdev));
-               rhp->rdev.flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else
-               ret = wr_wait.ret;
+       ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
+                                 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
 out:
        PDBG("%s ret %d\n", __func__, ret);
        return ret;
 }
 
+/*
+ * Called by the library when the qp has user dbs disabled due to
+ * a DB_FULL condition.  This function will single-thread all user
+ * DB rings to avoid overflowing the hw db-fifo.
+ */
+static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
+{
+       int delay = db_delay_usecs;
+
+       mutex_lock(&qhp->rhp->db_mutex);
+       do {
+
+               /*
+                * The interrupt threshold is dbfifo_int_thresh << 6. So
+                * make sure we don't cross that and generate an interrupt.
+                */
+               if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
+                   (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
+                       writel(QID(qid) | PIDX(inc), qhp->wq.db);
+                       break;
+               }
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(usecs_to_jiffies(delay));
+               delay = min(delay << 1, 2000);
+       } while (1);
+       mutex_unlock(&qhp->rhp->db_mutex);
+       return 0;
+}
+
 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                   enum c4iw_qp_attr_mask mask,
                   struct c4iw_qp_attributes *attrs,
@@ -1126,7 +1223,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 {
        int ret = 0;
        struct c4iw_qp_attributes newattr = qhp->attr;
-       unsigned long flag;
        int disconnect = 0;
        int terminate = 0;
        int abort = 0;
@@ -1137,7 +1233,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
             qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
             (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
 
-       spin_lock_irqsave(&qhp->lock, flag);
+       mutex_lock(&qhp->mutex);
 
        /* Process attr changes if in IDLE */
        if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
@@ -1168,6 +1264,15 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                qhp->attr = newattr;
        }
 
+       if (mask & C4IW_QP_ATTR_SQ_DB) {
+               ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
+               goto out;
+       }
+       if (mask & C4IW_QP_ATTR_RQ_DB) {
+               ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
+               goto out;
+       }
+
        if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
                goto out;
        if (qhp->attr.state == attrs->next_state)
@@ -1188,7 +1293,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        qhp->attr.mpa_attr = attrs->mpa_attr;
                        qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
                        qhp->ep = qhp->attr.llp_stream_handle;
-                       qhp->attr.state = C4IW_QP_STATE_RTS;
+                       set_state(qhp, C4IW_QP_STATE_RTS);
 
                        /*
                         * Ref the endpoint here and deref when we
@@ -1197,15 +1302,13 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                         * transition.
                         */
                        c4iw_get_ep(&qhp->ep->com);
-                       spin_unlock_irqrestore(&qhp->lock, flag);
                        ret = rdma_init(rhp, qhp);
-                       spin_lock_irqsave(&qhp->lock, flag);
                        if (ret)
                                goto err;
                        break;
                case C4IW_QP_STATE_ERROR:
-                       qhp->attr.state = C4IW_QP_STATE_ERROR;
-                       flush_qp(qhp, &flag);
+                       set_state(qhp, C4IW_QP_STATE_ERROR);
+                       flush_qp(qhp);
                        break;
                default:
                        ret = -EINVAL;
@@ -1216,38 +1319,40 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                switch (attrs->next_state) {
                case C4IW_QP_STATE_CLOSING:
                        BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
-                       qhp->attr.state = C4IW_QP_STATE_CLOSING;
+                       set_state(qhp, C4IW_QP_STATE_CLOSING);
                        ep = qhp->ep;
                        if (!internal) {
                                abort = 0;
                                disconnect = 1;
-                               c4iw_get_ep(&ep->com);
+                               c4iw_get_ep(&qhp->ep->com);
                        }
-                       spin_unlock_irqrestore(&qhp->lock, flag);
+                       if (qhp->ibqp.uobject)
+                               t4_set_wq_in_error(&qhp->wq);
                        ret = rdma_fini(rhp, qhp, ep);
-                       spin_lock_irqsave(&qhp->lock, flag);
-                       if (ret) {
-                               c4iw_get_ep(&ep->com);
-                               disconnect = abort = 1;
+                       if (ret)
                                goto err;
-                       }
                        break;
                case C4IW_QP_STATE_TERMINATE:
-                       qhp->attr.state = C4IW_QP_STATE_TERMINATE;
+                       set_state(qhp, C4IW_QP_STATE_TERMINATE);
+                       qhp->attr.layer_etype = attrs->layer_etype;
+                       qhp->attr.ecode = attrs->ecode;
                        if (qhp->ibqp.uobject)
                                t4_set_wq_in_error(&qhp->wq);
                        ep = qhp->ep;
-                       c4iw_get_ep(&ep->com);
-                       terminate = 1;
+                       if (!internal)
+                               terminate = 1;
                        disconnect = 1;
+                       c4iw_get_ep(&qhp->ep->com);
                        break;
                case C4IW_QP_STATE_ERROR:
-                       qhp->attr.state = C4IW_QP_STATE_ERROR;
+                       set_state(qhp, C4IW_QP_STATE_ERROR);
+                       if (qhp->ibqp.uobject)
+                               t4_set_wq_in_error(&qhp->wq);
                        if (!internal) {
                                abort = 1;
                                disconnect = 1;
                                ep = qhp->ep;
-                               c4iw_get_ep(&ep->com);
+                               c4iw_get_ep(&qhp->ep->com);
                        }
                        goto err;
                        break;
@@ -1263,8 +1368,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                }
                switch (attrs->next_state) {
                case C4IW_QP_STATE_IDLE:
-                       flush_qp(qhp, &flag);
-                       qhp->attr.state = C4IW_QP_STATE_IDLE;
+                       flush_qp(qhp);
+                       set_state(qhp, C4IW_QP_STATE_IDLE);
                        qhp->attr.llp_stream_handle = NULL;
                        c4iw_put_ep(&qhp->ep->com);
                        qhp->ep = NULL;
@@ -1286,7 +1391,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        ret = -EINVAL;
                        goto out;
                }
-               qhp->attr.state = C4IW_QP_STATE_IDLE;
+               set_state(qhp, C4IW_QP_STATE_IDLE);
                break;
        case C4IW_QP_STATE_TERMINATE:
                if (!internal) {
@@ -1312,13 +1417,14 @@ err:
        if (!ep)
                ep = qhp->ep;
        qhp->ep = NULL;
-       qhp->attr.state = C4IW_QP_STATE_ERROR;
+       set_state(qhp, C4IW_QP_STATE_ERROR);
        free = 1;
+       abort = 1;
        wake_up(&qhp->wait);
        BUG_ON(!ep);
-       flush_qp(qhp, &flag);
+       flush_qp(qhp);
 out:
-       spin_unlock_irqrestore(&qhp->lock, flag);
+       mutex_unlock(&qhp->mutex);
 
        if (terminate)
                post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
@@ -1340,11 +1446,18 @@ out:
         */
        if (free)
                c4iw_put_ep(&ep->com);
-
        PDBG("%s exit state %d\n", __func__, qhp->attr.state);
        return ret;
 }
 
+static int enable_qp_db(int id, void *p, void *data)
+{
+       struct c4iw_qp *qp = p;
+
+       t4_enable_wq_db(&qp->wq);
+       return 0;
+}
+
 int c4iw_destroy_qp(struct ib_qp *ib_qp)
 {
        struct c4iw_dev *rhp;
@@ -1356,10 +1469,25 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        rhp = qhp->rhp;
 
        attrs.next_state = C4IW_QP_STATE_ERROR;
-       c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+       if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
+               c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+       else
+               c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
        wait_event(qhp->wait, !qhp->ep);
 
-       remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+       spin_lock_irq(&rhp->lock);
+       remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+       rhp->qpcnt--;
+       BUG_ON(rhp->qpcnt < 0);
+       if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
+               rhp->rdev.stats.db_state_transitions++;
+               rhp->db_state = NORMAL;
+               idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
+       }
+       if (db_coalescing_threshold >= 0)
+               if (rhp->qpcnt <= db_coalescing_threshold)
+                       cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
+       spin_unlock_irq(&rhp->lock);
        atomic_dec(&qhp->refcnt);
        wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
 
@@ -1373,6 +1501,14 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        return 0;
 }
 
+static int disable_qp_db(int id, void *p, void *data)
+{
+       struct c4iw_qp *qp = p;
+
+       t4_disable_wq_db(&qp->wq);
+       return 0;
+}
+
 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                             struct ib_udata *udata)
 {
@@ -1385,7 +1521,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        int sqsize, rqsize;
        struct c4iw_ucontext *ucontext;
        int ret;
-       struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+       struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
 
        PDBG("%s ib_pd %p\n", __func__, pd);
 
@@ -1455,10 +1591,24 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.max_ord = 1;
        qhp->attr.max_ird = 1;
        spin_lock_init(&qhp->lock);
+       mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
        atomic_set(&qhp->refcnt, 1);
 
-       ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+       spin_lock_irq(&rhp->lock);
+       if (rhp->db_state != NORMAL)
+               t4_disable_wq_db(&qhp->wq);
+       rhp->qpcnt++;
+       if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
+               rhp->rdev.stats.db_state_transitions++;
+               rhp->db_state = FLOW_CONTROL;
+               idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
+       }
+       if (db_coalescing_threshold >= 0)
+               if (rhp->qpcnt > db_coalescing_threshold)
+                       cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
+       ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+       spin_unlock_irq(&rhp->lock);
        if (ret)
                goto err2;
 
@@ -1483,7 +1633,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                        ret = -ENOMEM;
                        goto err6;
                }
-
+               if (t4_sq_onchip(&qhp->wq.sq)) {
+                       mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
+                       if (!mm5) {
+                               ret = -ENOMEM;
+                               goto err7;
+                       }
+                       uresp.flags = C4IW_QPF_ONCHIP;
+               } else
+                       uresp.flags = 0;
                uresp.qid_mask = rhp->rdev.qpmask;
                uresp.sqid = qhp->wq.sq.qid;
                uresp.sq_size = qhp->wq.sq.size;
@@ -1492,6 +1650,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                uresp.rq_size = qhp->wq.rq.size;
                uresp.rq_memsize = qhp->wq.rq.memsize;
                spin_lock(&ucontext->mmap_lock);
+               if (mm5) {
+                       uresp.ma_sync_key = ucontext->key;
+                       ucontext->key += PAGE_SIZE;
+               }
                uresp.sq_key = ucontext->key;
                ucontext->key += PAGE_SIZE;
                uresp.rq_key = ucontext->key;
@@ -1503,9 +1665,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                spin_unlock(&ucontext->mmap_lock);
                ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
                if (ret)
-                       goto err7;
+                       goto err8;
                mm1->key = uresp.sq_key;
-               mm1->addr = virt_to_phys(qhp->wq.sq.queue);
+               mm1->addr = qhp->wq.sq.phys_addr;
                mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
                insert_mmap(ucontext, mm1);
                mm2->key = uresp.rq_key;
@@ -1520,6 +1682,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                mm4->addr = qhp->wq.rq.udb;
                mm4->len = PAGE_SIZE;
                insert_mmap(ucontext, mm4);
+               if (mm5) {
+                       mm5->key = uresp.ma_sync_key;
+                       mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
+                                   + A_PCIE_MA_SYNC) & PAGE_MASK;
+                       mm5->len = PAGE_SIZE;
+                       insert_mmap(ucontext, mm5);
+               }
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
@@ -1527,6 +1696,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
             __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
             qhp->wq.sq.qid);
        return &qhp->ibqp;
+err8:
+       kfree(mm5);
 err7:
        kfree(mm4);
 err6:
@@ -1581,6 +1752,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                         C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
                         C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
 
+       /*
+        * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
+        * ringing the queue db when we're in DB_FULL mode.
+        */
+       attrs.sq_db_inc = attr->sq_psn;
+       attrs.rq_db_inc = attr->rq_psn;
+       mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
+       mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+
        return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
 }
 
@@ -1589,3 +1769,14 @@ struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
        PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
        return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
 }
+
+int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+                    int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+       struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
+
+       memset(attr, 0, sizeof *attr);
+       memset(init_attr, 0, sizeof *init_attr);
+       attr->qp_state = to_ib_qp_state(qhp->attr.state);
+       return 0;
+}