[SCSI] be2iscsi: Link Wrb with next Wrb
[linux-2.6.git] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
28
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
39
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
43 static unsigned int ring_mode;
44
45 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47 MODULE_AUTHOR("ServerEngines Corporation");
48 MODULE_LICENSE("GPL");
49 module_param(be_iopoll_budget, int, 0);
50 module_param(enable_msix, int, 0);
51 module_param(be_max_phys_size, uint, S_IRUGO);
52 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53                                    "contiguous memory that can be allocated."
54                                    "Range is 16 - 128");
55
56 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 {
58         blk_queue_max_segment_size(sdev->request_queue, 65536);
59         return 0;
60 }
61
62 /*------------------- PCI Driver operations and data ----------------- */
63 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69         { 0 }
70 };
71 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
73 static struct scsi_host_template beiscsi_sht = {
74         .module = THIS_MODULE,
75         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
76         .proc_name = DRV_NAME,
77         .queuecommand = iscsi_queuecommand,
78         .eh_abort_handler = iscsi_eh_abort,
79         .change_queue_depth = iscsi_change_queue_depth,
80         .slave_configure = beiscsi_slave_configure,
81         .target_alloc = iscsi_target_alloc,
82         .eh_device_reset_handler = iscsi_eh_device_reset,
83         .eh_target_reset_handler = iscsi_eh_target_reset,
84         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
85         .can_queue = BE2_IO_DEPTH,
86         .this_id = -1,
87         .max_sectors = BEISCSI_MAX_SECTORS,
88         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
89         .use_clustering = ENABLE_CLUSTERING,
90 };
91
92 static struct scsi_transport_template *beiscsi_scsi_transport;
93
94 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95 {
96         struct beiscsi_hba *phba;
97         struct Scsi_Host *shost;
98
99         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
100         if (!shost) {
101                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
102                         "iscsi_host_alloc failed \n");
103                 return NULL;
104         }
105         shost->dma_boundary = pcidev->dma_mask;
106         shost->max_id = BE2_MAX_SESSIONS;
107         shost->max_channel = 0;
108         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
109         shost->max_lun = BEISCSI_NUM_MAX_LUN;
110         shost->transportt = beiscsi_scsi_transport;
111         phba = iscsi_host_priv(shost);
112         memset(phba, 0, sizeof(*phba));
113         phba->shost = shost;
114         phba->pcidev = pci_dev_get(pcidev);
115
116         if (iscsi_host_add(shost, &phba->pcidev->dev))
117                 goto free_devices;
118         return phba;
119
120 free_devices:
121         pci_dev_put(phba->pcidev);
122         iscsi_host_free(phba->shost);
123         return NULL;
124 }
125
126 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
127 {
128         if (phba->csr_va) {
129                 iounmap(phba->csr_va);
130                 phba->csr_va = NULL;
131         }
132         if (phba->db_va) {
133                 iounmap(phba->db_va);
134                 phba->db_va = NULL;
135         }
136         if (phba->pci_va) {
137                 iounmap(phba->pci_va);
138                 phba->pci_va = NULL;
139         }
140 }
141
142 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143                                 struct pci_dev *pcidev)
144 {
145         u8 __iomem *addr;
146
147         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148                                pci_resource_len(pcidev, 2));
149         if (addr == NULL)
150                 return -ENOMEM;
151         phba->ctrl.csr = addr;
152         phba->csr_va = addr;
153         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
154
155         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
156         if (addr == NULL)
157                 goto pci_map_err;
158         phba->ctrl.db = addr;
159         phba->db_va = addr;
160         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
161
162         addr = ioremap_nocache(pci_resource_start(pcidev, 1),
163                                pci_resource_len(pcidev, 1));
164         if (addr == NULL)
165                 goto pci_map_err;
166         phba->ctrl.pcicfg = addr;
167         phba->pci_va = addr;
168         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
169         return 0;
170
171 pci_map_err:
172         beiscsi_unmap_pci_function(phba);
173         return -ENOMEM;
174 }
175
176 static int beiscsi_enable_pci(struct pci_dev *pcidev)
177 {
178         int ret;
179
180         ret = pci_enable_device(pcidev);
181         if (ret) {
182                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
183                         "failed. Returning -ENODEV\n");
184                 return ret;
185         }
186
187         pci_set_master(pcidev);
188         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
189                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
190                 if (ret) {
191                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
192                         pci_disable_device(pcidev);
193                         return ret;
194                 }
195         }
196         return 0;
197 }
198
199 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
200 {
201         struct be_ctrl_info *ctrl = &phba->ctrl;
202         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
203         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
204         int status = 0;
205
206         ctrl->pdev = pdev;
207         status = beiscsi_map_pci_bars(phba, pdev);
208         if (status)
209                 return status;
210         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
211         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
212                                                   mbox_mem_alloc->size,
213                                                   &mbox_mem_alloc->dma);
214         if (!mbox_mem_alloc->va) {
215                 beiscsi_unmap_pci_function(phba);
216                 status = -ENOMEM;
217                 return status;
218         }
219
220         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
221         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
222         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
223         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
224         spin_lock_init(&ctrl->mbox_lock);
225         spin_lock_init(&phba->ctrl.mcc_lock);
226         spin_lock_init(&phba->ctrl.mcc_cq_lock);
227
228         return status;
229 }
230
231 static void beiscsi_get_params(struct beiscsi_hba *phba)
232 {
233         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
234                                     - (phba->fw_config.iscsi_cid_count
235                                     + BE2_TMFS
236                                     + BE2_NOPOUT_REQ));
237         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
238         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
239         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
240         phba->params.num_sge_per_io = BE2_SGE;
241         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
242         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
243         phba->params.eq_timer = 64;
244         phba->params.num_eq_entries =
245             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
246                                     + BE2_TMFS) / 512) + 1) * 512;
247         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
248                                 ? 1024 : phba->params.num_eq_entries;
249         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
250                              phba->params.num_eq_entries);
251         phba->params.num_cq_entries =
252             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
253                                     + BE2_TMFS) / 512) + 1) * 512;
254         phba->params.wrbs_per_cxn = 256;
255 }
256
257 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
258                            unsigned int id, unsigned int clr_interrupt,
259                            unsigned int num_processed,
260                            unsigned char rearm, unsigned char event)
261 {
262         u32 val = 0;
263         val |= id & DB_EQ_RING_ID_MASK;
264         if (rearm)
265                 val |= 1 << DB_EQ_REARM_SHIFT;
266         if (clr_interrupt)
267                 val |= 1 << DB_EQ_CLR_SHIFT;
268         if (event)
269                 val |= 1 << DB_EQ_EVNT_SHIFT;
270         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
271         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
272 }
273
274 /**
275  * be_isr_mcc - The isr routine of the driver.
276  * @irq: Not used
277  * @dev_id: Pointer to host adapter structure
278  */
279 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
280 {
281         struct beiscsi_hba *phba;
282         struct be_eq_entry *eqe = NULL;
283         struct be_queue_info *eq;
284         struct be_queue_info *mcc;
285         unsigned int num_eq_processed;
286         struct be_eq_obj *pbe_eq;
287         unsigned long flags;
288
289         pbe_eq = dev_id;
290         eq = &pbe_eq->q;
291         phba =  pbe_eq->phba;
292         mcc = &phba->ctrl.mcc_obj.cq;
293         eqe = queue_tail_node(eq);
294         if (!eqe)
295                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
296
297         num_eq_processed = 0;
298
299         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
300                                 & EQE_VALID_MASK) {
301                 if (((eqe->dw[offsetof(struct amap_eq_entry,
302                      resource_id) / 32] &
303                      EQE_RESID_MASK) >> 16) == mcc->id) {
304                         spin_lock_irqsave(&phba->isr_lock, flags);
305                         phba->todo_mcc_cq = 1;
306                         spin_unlock_irqrestore(&phba->isr_lock, flags);
307                 }
308                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
309                 queue_tail_inc(eq);
310                 eqe = queue_tail_node(eq);
311                 num_eq_processed++;
312         }
313         if (phba->todo_mcc_cq)
314                 queue_work(phba->wq, &phba->work_cqs);
315         if (num_eq_processed)
316                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
317
318         return IRQ_HANDLED;
319 }
320
321 /**
322  * be_isr_msix - The isr routine of the driver.
323  * @irq: Not used
324  * @dev_id: Pointer to host adapter structure
325  */
326 static irqreturn_t be_isr_msix(int irq, void *dev_id)
327 {
328         struct beiscsi_hba *phba;
329         struct be_eq_entry *eqe = NULL;
330         struct be_queue_info *eq;
331         struct be_queue_info *cq;
332         unsigned int num_eq_processed;
333         struct be_eq_obj *pbe_eq;
334         unsigned long flags;
335
336         pbe_eq = dev_id;
337         eq = &pbe_eq->q;
338         cq = pbe_eq->cq;
339         eqe = queue_tail_node(eq);
340         if (!eqe)
341                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
342
343         phba = pbe_eq->phba;
344         num_eq_processed = 0;
345         if (blk_iopoll_enabled) {
346                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
347                                         & EQE_VALID_MASK) {
348                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
349                                 blk_iopoll_sched(&pbe_eq->iopoll);
350
351                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
352                         queue_tail_inc(eq);
353                         eqe = queue_tail_node(eq);
354                         num_eq_processed++;
355                 }
356                 if (num_eq_processed)
357                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
358
359                 return IRQ_HANDLED;
360         } else {
361                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
362                                                 & EQE_VALID_MASK) {
363                         spin_lock_irqsave(&phba->isr_lock, flags);
364                         phba->todo_cq = 1;
365                         spin_unlock_irqrestore(&phba->isr_lock, flags);
366                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
367                         queue_tail_inc(eq);
368                         eqe = queue_tail_node(eq);
369                         num_eq_processed++;
370                 }
371                 if (phba->todo_cq)
372                         queue_work(phba->wq, &phba->work_cqs);
373
374                 if (num_eq_processed)
375                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
376
377                 return IRQ_HANDLED;
378         }
379 }
380
381 /**
382  * be_isr - The isr routine of the driver.
383  * @irq: Not used
384  * @dev_id: Pointer to host adapter structure
385  */
386 static irqreturn_t be_isr(int irq, void *dev_id)
387 {
388         struct beiscsi_hba *phba;
389         struct hwi_controller *phwi_ctrlr;
390         struct hwi_context_memory *phwi_context;
391         struct be_eq_entry *eqe = NULL;
392         struct be_queue_info *eq;
393         struct be_queue_info *cq;
394         struct be_queue_info *mcc;
395         unsigned long flags, index;
396         unsigned int num_mcceq_processed, num_ioeq_processed;
397         struct be_ctrl_info *ctrl;
398         struct be_eq_obj *pbe_eq;
399         int isr;
400
401         phba = dev_id;
402         ctrl = &phba->ctrl;;
403         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
404                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
405         if (!isr)
406                 return IRQ_NONE;
407
408         phwi_ctrlr = phba->phwi_ctrlr;
409         phwi_context = phwi_ctrlr->phwi_ctxt;
410         pbe_eq = &phwi_context->be_eq[0];
411
412         eq = &phwi_context->be_eq[0].q;
413         mcc = &phba->ctrl.mcc_obj.cq;
414         index = 0;
415         eqe = queue_tail_node(eq);
416         if (!eqe)
417                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
418
419         num_ioeq_processed = 0;
420         num_mcceq_processed = 0;
421         if (blk_iopoll_enabled) {
422                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
423                                         & EQE_VALID_MASK) {
424                         if (((eqe->dw[offsetof(struct amap_eq_entry,
425                              resource_id) / 32] &
426                              EQE_RESID_MASK) >> 16) == mcc->id) {
427                                 spin_lock_irqsave(&phba->isr_lock, flags);
428                                 phba->todo_mcc_cq = 1;
429                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
430                                 num_mcceq_processed++;
431                         } else {
432                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
433                                         blk_iopoll_sched(&pbe_eq->iopoll);
434                                 num_ioeq_processed++;
435                         }
436                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
437                         queue_tail_inc(eq);
438                         eqe = queue_tail_node(eq);
439                 }
440                 if (num_ioeq_processed || num_mcceq_processed) {
441                         if (phba->todo_mcc_cq)
442                                 queue_work(phba->wq, &phba->work_cqs);
443
444                 if ((num_mcceq_processed) && (!num_ioeq_processed))
445                                 hwi_ring_eq_db(phba, eq->id, 0,
446                                               (num_ioeq_processed +
447                                                num_mcceq_processed) , 1, 1);
448                         else
449                                 hwi_ring_eq_db(phba, eq->id, 0,
450                                                (num_ioeq_processed +
451                                                 num_mcceq_processed), 0, 1);
452
453                         return IRQ_HANDLED;
454                 } else
455                         return IRQ_NONE;
456         } else {
457                 cq = &phwi_context->be_cq[0];
458                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
459                                                 & EQE_VALID_MASK) {
460
461                         if (((eqe->dw[offsetof(struct amap_eq_entry,
462                              resource_id) / 32] &
463                              EQE_RESID_MASK) >> 16) != cq->id) {
464                                 spin_lock_irqsave(&phba->isr_lock, flags);
465                                 phba->todo_mcc_cq = 1;
466                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
467                         } else {
468                                 spin_lock_irqsave(&phba->isr_lock, flags);
469                                 phba->todo_cq = 1;
470                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
471                         }
472                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
473                         queue_tail_inc(eq);
474                         eqe = queue_tail_node(eq);
475                         num_ioeq_processed++;
476                 }
477                 if (phba->todo_cq || phba->todo_mcc_cq)
478                         queue_work(phba->wq, &phba->work_cqs);
479
480                 if (num_ioeq_processed) {
481                         hwi_ring_eq_db(phba, eq->id, 0,
482                                        num_ioeq_processed, 1, 1);
483                         return IRQ_HANDLED;
484                 } else
485                         return IRQ_NONE;
486         }
487 }
488
489 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
490 {
491         struct pci_dev *pcidev = phba->pcidev;
492         struct hwi_controller *phwi_ctrlr;
493         struct hwi_context_memory *phwi_context;
494         int ret, msix_vec, i = 0;
495         char desc[32];
496
497         phwi_ctrlr = phba->phwi_ctrlr;
498         phwi_context = phwi_ctrlr->phwi_ctxt;
499
500         if (phba->msix_enabled) {
501                 for (i = 0; i < phba->num_cpus; i++) {
502                         sprintf(desc, "beiscsi_msix_%04x", i);
503                         msix_vec = phba->msix_entries[i].vector;
504                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
505                                           &phwi_context->be_eq[i]);
506                 }
507                 msix_vec = phba->msix_entries[i].vector;
508                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
509                                   &phwi_context->be_eq[i]);
510         } else {
511                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
512                                   "beiscsi", phba);
513                 if (ret) {
514                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
515                                      "Failed to register irq\\n");
516                         return ret;
517                 }
518         }
519         return 0;
520 }
521
522 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
523                            unsigned int id, unsigned int num_processed,
524                            unsigned char rearm, unsigned char event)
525 {
526         u32 val = 0;
527         val |= id & DB_CQ_RING_ID_MASK;
528         if (rearm)
529                 val |= 1 << DB_CQ_REARM_SHIFT;
530         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
531         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
532 }
533
534 static unsigned int
535 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
536                           struct beiscsi_hba *phba,
537                           unsigned short cid,
538                           struct pdu_base *ppdu,
539                           unsigned long pdu_len,
540                           void *pbuffer, unsigned long buf_len)
541 {
542         struct iscsi_conn *conn = beiscsi_conn->conn;
543         struct iscsi_session *session = conn->session;
544         struct iscsi_task *task;
545         struct beiscsi_io_task *io_task;
546         struct iscsi_hdr *login_hdr;
547
548         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
549                                                 PDUBASE_OPCODE_MASK) {
550         case ISCSI_OP_NOOP_IN:
551                 pbuffer = NULL;
552                 buf_len = 0;
553                 break;
554         case ISCSI_OP_ASYNC_EVENT:
555                 break;
556         case ISCSI_OP_REJECT:
557                 WARN_ON(!pbuffer);
558                 WARN_ON(!(buf_len == 48));
559                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
560                 break;
561         case ISCSI_OP_LOGIN_RSP:
562                 task = conn->login_task;
563                 io_task = task->dd_data;
564                 login_hdr = (struct iscsi_hdr *)ppdu;
565                 login_hdr->itt = io_task->libiscsi_itt;
566                 break;
567         default:
568                 shost_printk(KERN_WARNING, phba->shost,
569                              "Unrecognized opcode 0x%x in async msg \n",
570                              (ppdu->
571                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
572                                                 & PDUBASE_OPCODE_MASK));
573                 return 1;
574         }
575
576         spin_lock_bh(&session->lock);
577         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
578         spin_unlock_bh(&session->lock);
579         return 0;
580 }
581
582 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
583 {
584         struct sgl_handle *psgl_handle;
585
586         if (phba->io_sgl_hndl_avbl) {
587                 SE_DEBUG(DBG_LVL_8,
588                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
589                          phba->io_sgl_alloc_index);
590                 psgl_handle = phba->io_sgl_hndl_base[phba->
591                                                 io_sgl_alloc_index];
592                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
593                 phba->io_sgl_hndl_avbl--;
594                 if (phba->io_sgl_alloc_index == (phba->params.
595                                                  ios_per_ctrl - 1))
596                         phba->io_sgl_alloc_index = 0;
597                 else
598                         phba->io_sgl_alloc_index++;
599         } else
600                 psgl_handle = NULL;
601         return psgl_handle;
602 }
603
604 static void
605 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
606 {
607         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
608                  phba->io_sgl_free_index);
609         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
610                 /*
611                  * this can happen if clean_task is called on a task that
612                  * failed in xmit_task or alloc_pdu.
613                  */
614                  SE_DEBUG(DBG_LVL_8,
615                          "Double Free in IO SGL io_sgl_free_index=%d,"
616                          "value there=%p \n", phba->io_sgl_free_index,
617                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
618                 return;
619         }
620         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
621         phba->io_sgl_hndl_avbl++;
622         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
623                 phba->io_sgl_free_index = 0;
624         else
625                 phba->io_sgl_free_index++;
626 }
627
628 /**
629  * alloc_wrb_handle - To allocate a wrb handle
630  * @phba: The hba pointer
631  * @cid: The cid to use for allocation
632  *
633  * This happens under session_lock until submission to chip
634  */
635 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
636 {
637         struct hwi_wrb_context *pwrb_context;
638         struct hwi_controller *phwi_ctrlr;
639         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
640
641         phwi_ctrlr = phba->phwi_ctrlr;
642         pwrb_context = &phwi_ctrlr->wrb_context[cid];
643         if (pwrb_context->wrb_handles_available >= 2) {
644                 pwrb_handle = pwrb_context->pwrb_handle_base[
645                                             pwrb_context->alloc_index];
646                 pwrb_context->wrb_handles_available--;
647                 if (pwrb_context->alloc_index ==
648                                                 (phba->params.wrbs_per_cxn - 1))
649                         pwrb_context->alloc_index = 0;
650                 else
651                         pwrb_context->alloc_index++;
652
653                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
654                                                 pwrb_context->alloc_index];
655                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
656         } else
657                 pwrb_handle = NULL;
658         return pwrb_handle;
659 }
660
661 /**
662  * free_wrb_handle - To free the wrb handle back to pool
663  * @phba: The hba pointer
664  * @pwrb_context: The context to free from
665  * @pwrb_handle: The wrb_handle to free
666  *
667  * This happens under session_lock until submission to chip
668  */
669 static void
670 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
671                 struct wrb_handle *pwrb_handle)
672 {
673         if (!ring_mode)
674                 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
675                                                pwrb_handle;
676         pwrb_context->wrb_handles_available++;
677         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
678                 pwrb_context->free_index = 0;
679         else
680                 pwrb_context->free_index++;
681
682         SE_DEBUG(DBG_LVL_8,
683                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
684                  "wrb_handles_available=%d \n",
685                  pwrb_handle, pwrb_context->free_index,
686                  pwrb_context->wrb_handles_available);
687 }
688
689 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
690 {
691         struct sgl_handle *psgl_handle;
692
693         if (phba->eh_sgl_hndl_avbl) {
694                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
695                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
696                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
697                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
698                 phba->eh_sgl_hndl_avbl--;
699                 if (phba->eh_sgl_alloc_index ==
700                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
701                      1))
702                         phba->eh_sgl_alloc_index = 0;
703                 else
704                         phba->eh_sgl_alloc_index++;
705         } else
706                 psgl_handle = NULL;
707         return psgl_handle;
708 }
709
710 void
711 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
712 {
713
714         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
715                              phba->eh_sgl_free_index);
716         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
717                 /*
718                  * this can happen if clean_task is called on a task that
719                  * failed in xmit_task or alloc_pdu.
720                  */
721                 SE_DEBUG(DBG_LVL_8,
722                          "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
723                          phba->eh_sgl_free_index);
724                 return;
725         }
726         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
727         phba->eh_sgl_hndl_avbl++;
728         if (phba->eh_sgl_free_index ==
729             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
730                 phba->eh_sgl_free_index = 0;
731         else
732                 phba->eh_sgl_free_index++;
733 }
734
735 static void
736 be_complete_io(struct beiscsi_conn *beiscsi_conn,
737                struct iscsi_task *task, struct sol_cqe *psol)
738 {
739         struct beiscsi_io_task *io_task = task->dd_data;
740         struct be_status_bhs *sts_bhs =
741                                 (struct be_status_bhs *)io_task->cmd_bhs;
742         struct iscsi_conn *conn = beiscsi_conn->conn;
743         unsigned int sense_len;
744         unsigned char *sense;
745         u32 resid = 0, exp_cmdsn, max_cmdsn;
746         u8 rsp, status, flags;
747
748         exp_cmdsn = (psol->
749                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
750                         & SOL_EXP_CMD_SN_MASK);
751         max_cmdsn = ((psol->
752                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
753                         & SOL_EXP_CMD_SN_MASK) +
754                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
755                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
756         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
757                                                 & SOL_RESP_MASK) >> 16);
758         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
759                                                 & SOL_STS_MASK) >> 8);
760         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
761                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
762
763         task->sc->result = (DID_OK << 16) | status;
764         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
765                 task->sc->result = DID_ERROR << 16;
766                 goto unmap;
767         }
768
769         /* bidi not initially supported */
770         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
771                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
772                                 32] & SOL_RES_CNT_MASK);
773
774                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
775                         task->sc->result = DID_ERROR << 16;
776
777                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
778                         scsi_set_resid(task->sc, resid);
779                         if (!status && (scsi_bufflen(task->sc) - resid <
780                             task->sc->underflow))
781                                 task->sc->result = DID_ERROR << 16;
782                 }
783         }
784
785         if (status == SAM_STAT_CHECK_CONDITION) {
786                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
787                 sense = sts_bhs->sense_info + sizeof(unsigned short);
788                 sense_len =  cpu_to_be16(*slen);
789                 memcpy(task->sc->sense_buffer, sense,
790                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
791         }
792         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
793                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
794                                                         & SOL_RES_CNT_MASK)
795                          conn->rxdata_octets += (psol->
796                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
797                              & SOL_RES_CNT_MASK);
798         }
799 unmap:
800         scsi_dma_unmap(io_task->scsi_cmnd);
801         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
802 }
803
804 static void
805 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
806                    struct iscsi_task *task, struct sol_cqe *psol)
807 {
808         struct iscsi_logout_rsp *hdr;
809         struct beiscsi_io_task *io_task = task->dd_data;
810         struct iscsi_conn *conn = beiscsi_conn->conn;
811
812         hdr = (struct iscsi_logout_rsp *)task->hdr;
813         hdr->t2wait = 5;
814         hdr->t2retain = 0;
815         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
816                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
817         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
818                                         32] & SOL_RESP_MASK);
819         hdr->exp_cmdsn = cpu_to_be32(psol->
820                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
821                                         & SOL_EXP_CMD_SN_MASK);
822         hdr->max_cmdsn = be32_to_cpu((psol->
823                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
824                                         & SOL_EXP_CMD_SN_MASK) +
825                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
826                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
827         hdr->hlength = 0;
828         hdr->itt = io_task->libiscsi_itt;
829         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
830 }
831
832 static void
833 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
834                 struct iscsi_task *task, struct sol_cqe *psol)
835 {
836         struct iscsi_tm_rsp *hdr;
837         struct iscsi_conn *conn = beiscsi_conn->conn;
838         struct beiscsi_io_task *io_task = task->dd_data;
839
840         hdr = (struct iscsi_tm_rsp *)task->hdr;
841         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
842                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
843         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
844                                         32] & SOL_RESP_MASK);
845         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
846                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
847         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
848                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
849                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
850                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
851         hdr->itt = io_task->libiscsi_itt;
852         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
853 }
854
855 static void
856 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
857                        struct beiscsi_hba *phba, struct sol_cqe *psol)
858 {
859         struct hwi_wrb_context *pwrb_context;
860         struct wrb_handle *pwrb_handle = NULL;
861         struct sgl_handle *psgl_handle = NULL;
862         struct hwi_controller *phwi_ctrlr;
863         struct iscsi_task *task;
864         struct beiscsi_io_task *io_task;
865         struct iscsi_conn *conn = beiscsi_conn->conn;
866         struct iscsi_session *session = conn->session;
867
868         phwi_ctrlr = phba->phwi_ctrlr;
869         if (ring_mode) {
870                 psgl_handle = phba->sgl_hndl_array[((psol->
871                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
872                                 32] & SOL_ICD_INDEX_MASK) >> 6)];
873                 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
874                 task = psgl_handle->task;
875                 pwrb_handle = NULL;
876         } else {
877                 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
878                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
879                                 SOL_CID_MASK) >> 6) -
880                                 phba->fw_config.iscsi_cid_start];
881                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
882                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
883                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
884                 task = pwrb_handle->pio_handle;
885         }
886
887         io_task = task->dd_data;
888         spin_lock(&phba->mgmt_sgl_lock);
889         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
890         spin_unlock(&phba->mgmt_sgl_lock);
891         spin_lock_bh(&session->lock);
892         free_wrb_handle(phba, pwrb_context, pwrb_handle);
893         spin_unlock_bh(&session->lock);
894 }
895
896 static void
897 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
898                        struct iscsi_task *task, struct sol_cqe *psol)
899 {
900         struct iscsi_nopin *hdr;
901         struct iscsi_conn *conn = beiscsi_conn->conn;
902         struct beiscsi_io_task *io_task = task->dd_data;
903
904         hdr = (struct iscsi_nopin *)task->hdr;
905         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
906                         & SOL_FLAGS_MASK) >> 24) | 0x80;
907         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
908                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
909         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
910                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
911                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
912                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
913         hdr->opcode = ISCSI_OP_NOOP_IN;
914         hdr->itt = io_task->libiscsi_itt;
915         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
916 }
917
918 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
919                              struct beiscsi_hba *phba, struct sol_cqe *psol)
920 {
921         struct hwi_wrb_context *pwrb_context;
922         struct wrb_handle *pwrb_handle;
923         struct iscsi_wrb *pwrb = NULL;
924         struct hwi_controller *phwi_ctrlr;
925         struct iscsi_task *task;
926         struct sgl_handle *psgl_handle = NULL;
927         unsigned int type;
928         struct iscsi_conn *conn = beiscsi_conn->conn;
929         struct iscsi_session *session = conn->session;
930
931         phwi_ctrlr = phba->phwi_ctrlr;
932         if (ring_mode) {
933                 psgl_handle = phba->sgl_hndl_array[((psol->
934                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
935                               32] & SOL_ICD_INDEX_MASK) >> 6)];
936                 task = psgl_handle->task;
937                 type = psgl_handle->type;
938         } else {
939                 pwrb_context = &phwi_ctrlr->
940                                 wrb_context[((psol->dw[offsetof
941                                 (struct amap_sol_cqe, cid) / 32]
942                                 & SOL_CID_MASK) >> 6) -
943                                 phba->fw_config.iscsi_cid_start];
944                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
945                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
946                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
947                 task = pwrb_handle->pio_handle;
948                 pwrb = pwrb_handle->pwrb;
949                 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
950                          WRB_TYPE_MASK) >> 28;
951         }
952         spin_lock_bh(&session->lock);
953         switch (type) {
954         case HWH_TYPE_IO:
955         case HWH_TYPE_IO_RD:
956                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
957                     ISCSI_OP_NOOP_OUT) {
958                         be_complete_nopin_resp(beiscsi_conn, task, psol);
959                 } else
960                         be_complete_io(beiscsi_conn, task, psol);
961                 break;
962
963         case HWH_TYPE_LOGOUT:
964                 be_complete_logout(beiscsi_conn, task, psol);
965                 break;
966
967         case HWH_TYPE_LOGIN:
968                 SE_DEBUG(DBG_LVL_1,
969                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
970                          "- Solicited path \n");
971                 break;
972
973         case HWH_TYPE_TMF:
974                 be_complete_tmf(beiscsi_conn, task, psol);
975                 break;
976
977         case HWH_TYPE_NOP:
978                 be_complete_nopin_resp(beiscsi_conn, task, psol);
979                 break;
980
981         default:
982                 if (ring_mode)
983                         shost_printk(KERN_WARNING, phba->shost,
984                                 "In hwi_complete_cmd, unknown type = %d"
985                                 "icd_index 0x%x CID 0x%x\n", type,
986                                 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
987                                 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
988                                 psgl_handle->cid);
989                 else
990                         shost_printk(KERN_WARNING, phba->shost,
991                                 "In hwi_complete_cmd, unknown type = %d"
992                                 "wrb_index 0x%x CID 0x%x\n", type,
993                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
994                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
995                                 ((psol->dw[offsetof(struct amap_sol_cqe,
996                                 cid) / 32] & SOL_CID_MASK) >> 6));
997                 break;
998         }
999
1000         spin_unlock_bh(&session->lock);
1001 }
1002
1003 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1004                                           *pasync_ctx, unsigned int is_header,
1005                                           unsigned int host_write_ptr)
1006 {
1007         if (is_header)
1008                 return &pasync_ctx->async_entry[host_write_ptr].
1009                     header_busy_list;
1010         else
1011                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1012 }
1013
1014 static struct async_pdu_handle *
1015 hwi_get_async_handle(struct beiscsi_hba *phba,
1016                      struct beiscsi_conn *beiscsi_conn,
1017                      struct hwi_async_pdu_context *pasync_ctx,
1018                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1019 {
1020         struct be_bus_address phys_addr;
1021         struct list_head *pbusy_list;
1022         struct async_pdu_handle *pasync_handle = NULL;
1023         int buffer_len = 0;
1024         unsigned char buffer_index = -1;
1025         unsigned char is_header = 0;
1026
1027         phys_addr.u.a32.address_lo =
1028             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1029             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1030                                                 & PDUCQE_DPL_MASK) >> 16);
1031         phys_addr.u.a32.address_hi =
1032             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1033
1034         phys_addr.u.a64.address =
1035                         *((unsigned long long *)(&phys_addr.u.a64.address));
1036
1037         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1038                         & PDUCQE_CODE_MASK) {
1039         case UNSOL_HDR_NOTIFY:
1040                 is_header = 1;
1041
1042                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1043                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1044                         index) / 32] & PDUCQE_INDEX_MASK));
1045
1046                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1047                                 pasync_ctx->async_header.pa_base.u.a64.address);
1048
1049                 buffer_index = buffer_len /
1050                                 pasync_ctx->async_header.buffer_size;
1051
1052                 break;
1053         case UNSOL_DATA_NOTIFY:
1054                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1055                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1056                                         index) / 32] & PDUCQE_INDEX_MASK));
1057                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1058                                         pasync_ctx->async_data.pa_base.u.
1059                                         a64.address);
1060                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1061                 break;
1062         default:
1063                 pbusy_list = NULL;
1064                 shost_printk(KERN_WARNING, phba->shost,
1065                         "Unexpected code=%d \n",
1066                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1067                                         code) / 32] & PDUCQE_CODE_MASK);
1068                 return NULL;
1069         }
1070
1071         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1072         WARN_ON(list_empty(pbusy_list));
1073         list_for_each_entry(pasync_handle, pbusy_list, link) {
1074                 WARN_ON(pasync_handle->consumed);
1075                 if (pasync_handle->index == buffer_index)
1076                         break;
1077         }
1078
1079         WARN_ON(!pasync_handle);
1080
1081         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1082                                              phba->fw_config.iscsi_cid_start;
1083         pasync_handle->is_header = is_header;
1084         pasync_handle->buffer_len = ((pdpdu_cqe->
1085                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1086                         & PDUCQE_DPL_MASK) >> 16);
1087
1088         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1089                         index) / 32] & PDUCQE_INDEX_MASK);
1090         return pasync_handle;
1091 }
1092
1093 static unsigned int
1094 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1095                            unsigned int is_header, unsigned int cq_index)
1096 {
1097         struct list_head *pbusy_list;
1098         struct async_pdu_handle *pasync_handle;
1099         unsigned int num_entries, writables = 0;
1100         unsigned int *pep_read_ptr, *pwritables;
1101
1102
1103         if (is_header) {
1104                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1105                 pwritables = &pasync_ctx->async_header.writables;
1106                 num_entries = pasync_ctx->async_header.num_entries;
1107         } else {
1108                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1109                 pwritables = &pasync_ctx->async_data.writables;
1110                 num_entries = pasync_ctx->async_data.num_entries;
1111         }
1112
1113         while ((*pep_read_ptr) != cq_index) {
1114                 (*pep_read_ptr)++;
1115                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1116
1117                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1118                                                      *pep_read_ptr);
1119                 if (writables == 0)
1120                         WARN_ON(list_empty(pbusy_list));
1121
1122                 if (!list_empty(pbusy_list)) {
1123                         pasync_handle = list_entry(pbusy_list->next,
1124                                                    struct async_pdu_handle,
1125                                                    link);
1126                         WARN_ON(!pasync_handle);
1127                         pasync_handle->consumed = 1;
1128                 }
1129
1130                 writables++;
1131         }
1132
1133         if (!writables) {
1134                 SE_DEBUG(DBG_LVL_1,
1135                          "Duplicate notification received - index 0x%x!!\n",
1136                          cq_index);
1137                 WARN_ON(1);
1138         }
1139
1140         *pwritables = *pwritables + writables;
1141         return 0;
1142 }
1143
1144 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1145                                        unsigned int cri)
1146 {
1147         struct hwi_controller *phwi_ctrlr;
1148         struct hwi_async_pdu_context *pasync_ctx;
1149         struct async_pdu_handle *pasync_handle, *tmp_handle;
1150         struct list_head *plist;
1151         unsigned int i = 0;
1152
1153         phwi_ctrlr = phba->phwi_ctrlr;
1154         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1155
1156         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1157
1158         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1159                 list_del(&pasync_handle->link);
1160
1161                 if (i == 0) {
1162                         list_add_tail(&pasync_handle->link,
1163                                       &pasync_ctx->async_header.free_list);
1164                         pasync_ctx->async_header.free_entries++;
1165                         i++;
1166                 } else {
1167                         list_add_tail(&pasync_handle->link,
1168                                       &pasync_ctx->async_data.free_list);
1169                         pasync_ctx->async_data.free_entries++;
1170                         i++;
1171                 }
1172         }
1173
1174         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1175         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1176         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1177         return 0;
1178 }
1179
1180 static struct phys_addr *
1181 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1182                      unsigned int is_header, unsigned int host_write_ptr)
1183 {
1184         struct phys_addr *pasync_sge = NULL;
1185
1186         if (is_header)
1187                 pasync_sge = pasync_ctx->async_header.ring_base;
1188         else
1189                 pasync_sge = pasync_ctx->async_data.ring_base;
1190
1191         return pasync_sge + host_write_ptr;
1192 }
1193
1194 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1195                                    unsigned int is_header)
1196 {
1197         struct hwi_controller *phwi_ctrlr;
1198         struct hwi_async_pdu_context *pasync_ctx;
1199         struct async_pdu_handle *pasync_handle;
1200         struct list_head *pfree_link, *pbusy_list;
1201         struct phys_addr *pasync_sge;
1202         unsigned int ring_id, num_entries;
1203         unsigned int host_write_num;
1204         unsigned int writables;
1205         unsigned int i = 0;
1206         u32 doorbell = 0;
1207
1208         phwi_ctrlr = phba->phwi_ctrlr;
1209         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1210
1211         if (is_header) {
1212                 num_entries = pasync_ctx->async_header.num_entries;
1213                 writables = min(pasync_ctx->async_header.writables,
1214                                 pasync_ctx->async_header.free_entries);
1215                 pfree_link = pasync_ctx->async_header.free_list.next;
1216                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1217                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1218         } else {
1219                 num_entries = pasync_ctx->async_data.num_entries;
1220                 writables = min(pasync_ctx->async_data.writables,
1221                                 pasync_ctx->async_data.free_entries);
1222                 pfree_link = pasync_ctx->async_data.free_list.next;
1223                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1224                 ring_id = phwi_ctrlr->default_pdu_data.id;
1225         }
1226
1227         writables = (writables / 8) * 8;
1228         if (writables) {
1229                 for (i = 0; i < writables; i++) {
1230                         pbusy_list =
1231                             hwi_get_async_busy_list(pasync_ctx, is_header,
1232                                                     host_write_num);
1233                         pasync_handle =
1234                             list_entry(pfree_link, struct async_pdu_handle,
1235                                                                 link);
1236                         WARN_ON(!pasync_handle);
1237                         pasync_handle->consumed = 0;
1238
1239                         pfree_link = pfree_link->next;
1240
1241                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1242                                                 is_header, host_write_num);
1243
1244                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1245                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1246
1247                         list_move(&pasync_handle->link, pbusy_list);
1248
1249                         host_write_num++;
1250                         host_write_num = host_write_num % num_entries;
1251                 }
1252
1253                 if (is_header) {
1254                         pasync_ctx->async_header.host_write_ptr =
1255                                                         host_write_num;
1256                         pasync_ctx->async_header.free_entries -= writables;
1257                         pasync_ctx->async_header.writables -= writables;
1258                         pasync_ctx->async_header.busy_entries += writables;
1259                 } else {
1260                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1261                         pasync_ctx->async_data.free_entries -= writables;
1262                         pasync_ctx->async_data.writables -= writables;
1263                         pasync_ctx->async_data.busy_entries += writables;
1264                 }
1265
1266                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1267                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1268                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1269                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1270                                         << DB_DEF_PDU_CQPROC_SHIFT;
1271
1272                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1273         }
1274 }
1275
1276 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1277                                          struct beiscsi_conn *beiscsi_conn,
1278                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1279 {
1280         struct hwi_controller *phwi_ctrlr;
1281         struct hwi_async_pdu_context *pasync_ctx;
1282         struct async_pdu_handle *pasync_handle = NULL;
1283         unsigned int cq_index = -1;
1284
1285         phwi_ctrlr = phba->phwi_ctrlr;
1286         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1287
1288         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1289                                              pdpdu_cqe, &cq_index);
1290         BUG_ON(pasync_handle->is_header != 0);
1291         if (pasync_handle->consumed == 0)
1292                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1293                                            cq_index);
1294
1295         hwi_free_async_msg(phba, pasync_handle->cri);
1296         hwi_post_async_buffers(phba, pasync_handle->is_header);
1297 }
1298
1299 static unsigned int
1300 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1301                   struct beiscsi_hba *phba,
1302                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1303 {
1304         struct list_head *plist;
1305         struct async_pdu_handle *pasync_handle;
1306         void *phdr = NULL;
1307         unsigned int hdr_len = 0, buf_len = 0;
1308         unsigned int status, index = 0, offset = 0;
1309         void *pfirst_buffer = NULL;
1310         unsigned int num_buf = 0;
1311
1312         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1313
1314         list_for_each_entry(pasync_handle, plist, link) {
1315                 if (index == 0) {
1316                         phdr = pasync_handle->pbuffer;
1317                         hdr_len = pasync_handle->buffer_len;
1318                 } else {
1319                         buf_len = pasync_handle->buffer_len;
1320                         if (!num_buf) {
1321                                 pfirst_buffer = pasync_handle->pbuffer;
1322                                 num_buf++;
1323                         }
1324                         memcpy(pfirst_buffer + offset,
1325                                pasync_handle->pbuffer, buf_len);
1326                         offset = buf_len;
1327                 }
1328                 index++;
1329         }
1330
1331         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1332                                            (beiscsi_conn->beiscsi_conn_cid -
1333                                             phba->fw_config.iscsi_cid_start),
1334                                             phdr, hdr_len, pfirst_buffer,
1335                                             buf_len);
1336
1337         if (status == 0)
1338                 hwi_free_async_msg(phba, cri);
1339         return 0;
1340 }
1341
1342 static unsigned int
1343 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1344                      struct beiscsi_hba *phba,
1345                      struct async_pdu_handle *pasync_handle)
1346 {
1347         struct hwi_async_pdu_context *pasync_ctx;
1348         struct hwi_controller *phwi_ctrlr;
1349         unsigned int bytes_needed = 0, status = 0;
1350         unsigned short cri = pasync_handle->cri;
1351         struct pdu_base *ppdu;
1352
1353         phwi_ctrlr = phba->phwi_ctrlr;
1354         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1355
1356         list_del(&pasync_handle->link);
1357         if (pasync_handle->is_header) {
1358                 pasync_ctx->async_header.busy_entries--;
1359                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1360                         hwi_free_async_msg(phba, cri);
1361                         BUG();
1362                 }
1363
1364                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1365                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1366                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1367                                 (unsigned short)pasync_handle->buffer_len;
1368                 list_add_tail(&pasync_handle->link,
1369                               &pasync_ctx->async_entry[cri].wait_queue.list);
1370
1371                 ppdu = pasync_handle->pbuffer;
1372                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1373                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1374                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1375                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1376                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1377
1378                 if (status == 0) {
1379                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1380                             bytes_needed;
1381
1382                         if (bytes_needed == 0)
1383                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1384                                                            pasync_ctx, cri);
1385                 }
1386         } else {
1387                 pasync_ctx->async_data.busy_entries--;
1388                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1389                         list_add_tail(&pasync_handle->link,
1390                                       &pasync_ctx->async_entry[cri].wait_queue.
1391                                       list);
1392                         pasync_ctx->async_entry[cri].wait_queue.
1393                                 bytes_received +=
1394                                 (unsigned short)pasync_handle->buffer_len;
1395
1396                         if (pasync_ctx->async_entry[cri].wait_queue.
1397                             bytes_received >=
1398                             pasync_ctx->async_entry[cri].wait_queue.
1399                             bytes_needed)
1400                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1401                                                            pasync_ctx, cri);
1402                 }
1403         }
1404         return status;
1405 }
1406
1407 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1408                                          struct beiscsi_hba *phba,
1409                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1410 {
1411         struct hwi_controller *phwi_ctrlr;
1412         struct hwi_async_pdu_context *pasync_ctx;
1413         struct async_pdu_handle *pasync_handle = NULL;
1414         unsigned int cq_index = -1;
1415
1416         phwi_ctrlr = phba->phwi_ctrlr;
1417         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1418         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1419                                              pdpdu_cqe, &cq_index);
1420
1421         if (pasync_handle->consumed == 0)
1422                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1423                                            cq_index);
1424         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1425         hwi_post_async_buffers(phba, pasync_handle->is_header);
1426 }
1427
1428
1429 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1430 {
1431         struct be_queue_info *cq;
1432         struct sol_cqe *sol;
1433         struct dmsg_cqe *dmsg;
1434         unsigned int num_processed = 0;
1435         unsigned int tot_nump = 0;
1436         struct beiscsi_conn *beiscsi_conn;
1437         struct sgl_handle *psgl_handle = NULL;
1438         struct beiscsi_endpoint *beiscsi_ep;
1439         struct iscsi_endpoint *ep;
1440         struct beiscsi_hba *phba;
1441
1442         cq = pbe_eq->cq;
1443         sol = queue_tail_node(cq);
1444         phba = pbe_eq->phba;
1445
1446         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1447                CQE_VALID_MASK) {
1448                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1449
1450                 if (ring_mode) {
1451                         psgl_handle = phba->sgl_hndl_array[((sol->
1452                                       dw[offsetof(struct amap_sol_cqe_ring,
1453                                       icd_index) / 32] & SOL_ICD_INDEX_MASK)
1454                                       >> 6)];
1455                         ep = phba->ep_array[psgl_handle->cid];
1456                 } else {
1457                         ep = phba->ep_array[(u32) ((sol->
1458                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1459                                    SOL_CID_MASK) >> 6) -
1460                                    phba->fw_config.iscsi_cid_start];
1461                 }
1462                 beiscsi_ep = ep->dd_data;
1463                 beiscsi_conn = beiscsi_ep->conn;
1464                 if (num_processed >= 32) {
1465                         hwi_ring_cq_db(phba, cq->id,
1466                                         num_processed, 0, 0);
1467                         tot_nump += num_processed;
1468                         num_processed = 0;
1469                 }
1470
1471                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1472                         32] & CQE_CODE_MASK) {
1473                 case SOL_CMD_COMPLETE:
1474                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1475                         break;
1476                 case DRIVERMSG_NOTIFY:
1477                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1478                         dmsg = (struct dmsg_cqe *)sol;
1479                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1480                         break;
1481                 case UNSOL_HDR_NOTIFY:
1482                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1483                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1484                                              (struct i_t_dpdu_cqe *)sol);
1485                         break;
1486                 case UNSOL_DATA_NOTIFY:
1487                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1488                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1489                                              (struct i_t_dpdu_cqe *)sol);
1490                         break;
1491                 case CXN_INVALIDATE_INDEX_NOTIFY:
1492                 case CMD_INVALIDATED_NOTIFY:
1493                 case CXN_INVALIDATE_NOTIFY:
1494                         SE_DEBUG(DBG_LVL_1,
1495                                  "Ignoring CQ Error notification for cmd/cxn"
1496                                  "invalidate\n");
1497                         break;
1498                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1499                 case CMD_KILLED_INVALID_STATSN_RCVD:
1500                 case CMD_KILLED_INVALID_R2T_RCVD:
1501                 case CMD_CXN_KILLED_LUN_INVALID:
1502                 case CMD_CXN_KILLED_ICD_INVALID:
1503                 case CMD_CXN_KILLED_ITT_INVALID:
1504                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1505                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1506                         if (ring_mode) {
1507                                 SE_DEBUG(DBG_LVL_1,
1508                                  "CQ Error notification for cmd.. "
1509                                  "code %d cid 0x%x\n",
1510                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1511                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1512                         } else {
1513                                 SE_DEBUG(DBG_LVL_1,
1514                                  "CQ Error notification for cmd.. "
1515                                  "code %d cid 0x%x\n",
1516                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1517                                  32] & CQE_CODE_MASK,
1518                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1519                                  32] & SOL_CID_MASK));
1520                         }
1521                         break;
1522                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1523                         SE_DEBUG(DBG_LVL_1,
1524                                  "Digest error on def pdu ring, dropping..\n");
1525                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1526                                              (struct i_t_dpdu_cqe *) sol);
1527                         break;
1528                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1529                 case CXN_KILLED_BURST_LEN_MISMATCH:
1530                 case CXN_KILLED_AHS_RCVD:
1531                 case CXN_KILLED_HDR_DIGEST_ERR:
1532                 case CXN_KILLED_UNKNOWN_HDR:
1533                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1534                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1535                 case CXN_KILLED_TIMED_OUT:
1536                 case CXN_KILLED_FIN_RCVD:
1537                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1538                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1539                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1540                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1541                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1542                         if (ring_mode) {
1543                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1544                                  "0x%x...\n",
1545                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1546                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1547                         } else {
1548                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1549                                  "0x%x...\n",
1550                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1551                                  32] & CQE_CODE_MASK,
1552                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1553                                  32] & CQE_CID_MASK));
1554                         }
1555                         iscsi_conn_failure(beiscsi_conn->conn,
1556                                            ISCSI_ERR_CONN_FAILED);
1557                         break;
1558                 case CXN_KILLED_RST_SENT:
1559                 case CXN_KILLED_RST_RCVD:
1560                         if (ring_mode) {
1561                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1562                                 "received/sent on CID 0x%x...\n",
1563                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1564                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1565                         } else {
1566                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1567                                 "received/sent on CID 0x%x...\n",
1568                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1569                                  32] & CQE_CODE_MASK,
1570                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1571                                  32] & CQE_CID_MASK));
1572                         }
1573                         iscsi_conn_failure(beiscsi_conn->conn,
1574                                            ISCSI_ERR_CONN_FAILED);
1575                         break;
1576                 default:
1577                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1578                                  "received on CID 0x%x...\n",
1579                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1580                                  32] & CQE_CODE_MASK,
1581                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1582                                  32] & CQE_CID_MASK));
1583                         break;
1584                 }
1585
1586                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1587                 queue_tail_inc(cq);
1588                 sol = queue_tail_node(cq);
1589                 num_processed++;
1590         }
1591
1592         if (num_processed > 0) {
1593                 tot_nump += num_processed;
1594                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1595         }
1596         return tot_nump;
1597 }
1598
1599 static void beiscsi_process_all_cqs(struct work_struct *work)
1600 {
1601         unsigned long flags;
1602         struct hwi_controller *phwi_ctrlr;
1603         struct hwi_context_memory *phwi_context;
1604         struct be_eq_obj *pbe_eq;
1605         struct beiscsi_hba *phba =
1606             container_of(work, struct beiscsi_hba, work_cqs);
1607
1608         phwi_ctrlr = phba->phwi_ctrlr;
1609         phwi_context = phwi_ctrlr->phwi_ctxt;
1610         if (phba->msix_enabled)
1611                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1612         else
1613                 pbe_eq = &phwi_context->be_eq[0];
1614
1615         if (phba->todo_mcc_cq) {
1616                 spin_lock_irqsave(&phba->isr_lock, flags);
1617                 phba->todo_mcc_cq = 0;
1618                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1619         }
1620
1621         if (phba->todo_cq) {
1622                 spin_lock_irqsave(&phba->isr_lock, flags);
1623                 phba->todo_cq = 0;
1624                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1625                 beiscsi_process_cq(pbe_eq);
1626         }
1627 }
1628
1629 static int be_iopoll(struct blk_iopoll *iop, int budget)
1630 {
1631         static unsigned int ret;
1632         struct beiscsi_hba *phba;
1633         struct be_eq_obj *pbe_eq;
1634
1635         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1636         ret = beiscsi_process_cq(pbe_eq);
1637         if (ret < budget) {
1638                 phba = pbe_eq->phba;
1639                 blk_iopoll_complete(iop);
1640                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1641                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1642         }
1643         return ret;
1644 }
1645
1646 static void
1647 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1648               unsigned int num_sg, struct beiscsi_io_task *io_task)
1649 {
1650         struct iscsi_sge *psgl;
1651         unsigned short sg_len, index;
1652         unsigned int sge_len = 0;
1653         unsigned long long addr;
1654         struct scatterlist *l_sg;
1655         unsigned int offset;
1656
1657         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1658                                       io_task->bhs_pa.u.a32.address_lo);
1659         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1660                                       io_task->bhs_pa.u.a32.address_hi);
1661
1662         l_sg = sg;
1663         for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1664                 if (index == 0) {
1665                         sg_len = sg_dma_len(sg);
1666                         addr = (u64) sg_dma_address(sg);
1667                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1668                                                         (addr & 0xFFFFFFFF));
1669                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1670                                                         (addr >> 32));
1671                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1672                                                         sg_len);
1673                         sge_len = sg_len;
1674                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1675                                                         1);
1676                 } else {
1677                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1678                                                         0);
1679                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1680                                                         pwrb, sge_len);
1681                         sg_len = sg_dma_len(sg);
1682                         addr = (u64) sg_dma_address(sg);
1683                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1684                                                         (addr & 0xFFFFFFFF));
1685                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1686                                                         (addr >> 32));
1687                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1688                                                         sg_len);
1689                 }
1690         }
1691         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1692         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1693
1694         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1695
1696         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1697                         io_task->bhs_pa.u.a32.address_hi);
1698         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1699                         io_task->bhs_pa.u.a32.address_lo);
1700
1701         if (num_sg == 2)
1702                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1703         sg = l_sg;
1704         psgl++;
1705         psgl++;
1706         offset = 0;
1707         for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1708                 sg_len = sg_dma_len(sg);
1709                 addr = (u64) sg_dma_address(sg);
1710                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1711                                                 (addr & 0xFFFFFFFF));
1712                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1713                                                 (addr >> 32));
1714                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1715                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1716                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1717                 offset += sg_len;
1718         }
1719         psgl--;
1720         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1721 }
1722
1723 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1724 {
1725         struct iscsi_sge *psgl;
1726         unsigned long long addr;
1727         struct beiscsi_io_task *io_task = task->dd_data;
1728         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1729         struct beiscsi_hba *phba = beiscsi_conn->phba;
1730
1731         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1732         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1733                                 io_task->bhs_pa.u.a32.address_lo);
1734         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1735                                 io_task->bhs_pa.u.a32.address_hi);
1736
1737         if (task->data) {
1738                 if (task->data_count) {
1739                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1740                         addr = (u64) pci_map_single(phba->pcidev,
1741                                                     task->data,
1742                                                     task->data_count, 1);
1743                 } else {
1744                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1745                         addr = 0;
1746                 }
1747                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1748                                                 (addr & 0xFFFFFFFF));
1749                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1750                                                 (addr >> 32));
1751                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1752                                                 task->data_count);
1753
1754                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1755         } else {
1756                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1757                 addr = 0;
1758         }
1759
1760         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1761
1762         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1763
1764         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1765                       io_task->bhs_pa.u.a32.address_hi);
1766         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1767                       io_task->bhs_pa.u.a32.address_lo);
1768         if (task->data) {
1769                 psgl++;
1770                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1771                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1772                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1773                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1774                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1775                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1776
1777                 psgl++;
1778                 if (task->data) {
1779                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1780                                                 (addr & 0xFFFFFFFF));
1781                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1782                                                 (addr >> 32));
1783                 }
1784                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1785         }
1786         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1787 }
1788
1789 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1790 {
1791         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1792         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1793         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1794
1795         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1796                                       sizeof(struct sol_cqe));
1797         num_async_pdu_buf_pages =
1798                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1799                                        phba->params.defpdu_hdr_sz);
1800         num_async_pdu_buf_sgl_pages =
1801                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1802                                        sizeof(struct phys_addr));
1803         num_async_pdu_data_pages =
1804                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1805                                        phba->params.defpdu_data_sz);
1806         num_async_pdu_data_sgl_pages =
1807                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1808                                        sizeof(struct phys_addr));
1809
1810         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1811
1812         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1813                                                  BE_ISCSI_PDU_HEADER_SIZE;
1814         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1815                                             sizeof(struct hwi_context_memory);
1816
1817
1818         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1819             * (phba->params.wrbs_per_cxn)
1820             * phba->params.cxns_per_ctrl;
1821         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1822                                  (phba->params.wrbs_per_cxn);
1823         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1824                                 phba->params.cxns_per_ctrl);
1825
1826         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1827                 phba->params.icds_per_ctrl;
1828         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1829                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1830
1831         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1832                 num_async_pdu_buf_pages * PAGE_SIZE;
1833         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1834                 num_async_pdu_data_pages * PAGE_SIZE;
1835         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1836                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1837         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1838                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1839         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1840                 phba->params.asyncpdus_per_ctrl *
1841                 sizeof(struct async_pdu_handle);
1842         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1843                 phba->params.asyncpdus_per_ctrl *
1844                 sizeof(struct async_pdu_handle);
1845         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1846                 sizeof(struct hwi_async_pdu_context) +
1847                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1848 }
1849
1850 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1851 {
1852         struct be_mem_descriptor *mem_descr;
1853         dma_addr_t bus_add;
1854         struct mem_array *mem_arr, *mem_arr_orig;
1855         unsigned int i, j, alloc_size, curr_alloc_size;
1856
1857         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1858         if (!phba->phwi_ctrlr)
1859                 return -ENOMEM;
1860
1861         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1862                                  GFP_KERNEL);
1863         if (!phba->init_mem) {
1864                 kfree(phba->phwi_ctrlr);
1865                 return -ENOMEM;
1866         }
1867
1868         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1869                                GFP_KERNEL);
1870         if (!mem_arr_orig) {
1871                 kfree(phba->init_mem);
1872                 kfree(phba->phwi_ctrlr);
1873                 return -ENOMEM;
1874         }
1875
1876         mem_descr = phba->init_mem;
1877         for (i = 0; i < SE_MEM_MAX; i++) {
1878                 j = 0;
1879                 mem_arr = mem_arr_orig;
1880                 alloc_size = phba->mem_req[i];
1881                 memset(mem_arr, 0, sizeof(struct mem_array) *
1882                        BEISCSI_MAX_FRAGS_INIT);
1883                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1884                 do {
1885                         mem_arr->virtual_address = pci_alloc_consistent(
1886                                                         phba->pcidev,
1887                                                         curr_alloc_size,
1888                                                         &bus_add);
1889                         if (!mem_arr->virtual_address) {
1890                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1891                                         goto free_mem;
1892                                 if (curr_alloc_size -
1893                                         rounddown_pow_of_two(curr_alloc_size))
1894                                         curr_alloc_size = rounddown_pow_of_two
1895                                                              (curr_alloc_size);
1896                                 else
1897                                         curr_alloc_size = curr_alloc_size / 2;
1898                         } else {
1899                                 mem_arr->bus_address.u.
1900                                     a64.address = (__u64) bus_add;
1901                                 mem_arr->size = curr_alloc_size;
1902                                 alloc_size -= curr_alloc_size;
1903                                 curr_alloc_size = min(be_max_phys_size *
1904                                                       1024, alloc_size);
1905                                 j++;
1906                                 mem_arr++;
1907                         }
1908                 } while (alloc_size);
1909                 mem_descr->num_elements = j;
1910                 mem_descr->size_in_bytes = phba->mem_req[i];
1911                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1912                                                GFP_KERNEL);
1913                 if (!mem_descr->mem_array)
1914                         goto free_mem;
1915
1916                 memcpy(mem_descr->mem_array, mem_arr_orig,
1917                        sizeof(struct mem_array) * j);
1918                 mem_descr++;
1919         }
1920         kfree(mem_arr_orig);
1921         return 0;
1922 free_mem:
1923         mem_descr->num_elements = j;
1924         while ((i) || (j)) {
1925                 for (j = mem_descr->num_elements; j > 0; j--) {
1926                         pci_free_consistent(phba->pcidev,
1927                                             mem_descr->mem_array[j - 1].size,
1928                                             mem_descr->mem_array[j - 1].
1929                                             virtual_address,
1930                                             mem_descr->mem_array[j - 1].
1931                                             bus_address.u.a64.address);
1932                 }
1933                 if (i) {
1934                         i--;
1935                         kfree(mem_descr->mem_array);
1936                         mem_descr--;
1937                 }
1938         }
1939         kfree(mem_arr_orig);
1940         kfree(phba->init_mem);
1941         kfree(phba->phwi_ctrlr);
1942         return -ENOMEM;
1943 }
1944
1945 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1946 {
1947         beiscsi_find_mem_req(phba);
1948         return beiscsi_alloc_mem(phba);
1949 }
1950
1951 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1952 {
1953         struct pdu_data_out *pdata_out;
1954         struct pdu_nop_out *pnop_out;
1955         struct be_mem_descriptor *mem_descr;
1956
1957         mem_descr = phba->init_mem;
1958         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1959         pdata_out =
1960             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1961         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1962
1963         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1964                       IIOC_SCSI_DATA);
1965
1966         pnop_out =
1967             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1968                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1969
1970         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1971         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1972         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1973         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1974 }
1975
1976 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1977 {
1978         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1979         struct wrb_handle *pwrb_handle;
1980         struct hwi_controller *phwi_ctrlr;
1981         struct hwi_wrb_context *pwrb_context;
1982         struct iscsi_wrb *pwrb;
1983         unsigned int num_cxn_wrbh;
1984         unsigned int num_cxn_wrb, j, idx, index;
1985
1986         mem_descr_wrbh = phba->init_mem;
1987         mem_descr_wrbh += HWI_MEM_WRBH;
1988
1989         mem_descr_wrb = phba->init_mem;
1990         mem_descr_wrb += HWI_MEM_WRB;
1991
1992         idx = 0;
1993         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
1994         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
1995                         ((sizeof(struct wrb_handle)) *
1996                          phba->params.wrbs_per_cxn));
1997         phwi_ctrlr = phba->phwi_ctrlr;
1998
1999         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2000                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2001                 pwrb_context->pwrb_handle_base =
2002                                 kzalloc(sizeof(struct wrb_handle *) *
2003                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2004                 pwrb_context->pwrb_handle_basestd =
2005                                 kzalloc(sizeof(struct wrb_handle *) *
2006                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2007                 if (num_cxn_wrbh) {
2008                         pwrb_context->alloc_index = 0;
2009                         pwrb_context->wrb_handles_available = 0;
2010                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2011                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2012                                 pwrb_context->pwrb_handle_basestd[j] =
2013                                                                 pwrb_handle;
2014                                 pwrb_context->wrb_handles_available++;
2015                                 pwrb_handle->wrb_index = j;
2016                                 pwrb_handle++;
2017                         }
2018                         pwrb_context->free_index = 0;
2019                         num_cxn_wrbh--;
2020                 } else {
2021                         idx++;
2022                         pwrb_handle =
2023                             mem_descr_wrbh->mem_array[idx].virtual_address;
2024                         num_cxn_wrbh =
2025                             ((mem_descr_wrbh->mem_array[idx].size) /
2026                              ((sizeof(struct wrb_handle)) *
2027                               phba->params.wrbs_per_cxn));
2028                         pwrb_context->alloc_index = 0;
2029                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2030                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2031                                 pwrb_context->pwrb_handle_basestd[j] =
2032                                     pwrb_handle;
2033                                 pwrb_context->wrb_handles_available++;
2034                                 pwrb_handle->wrb_index = j;
2035                                 pwrb_handle++;
2036                         }
2037                         pwrb_context->free_index = 0;
2038                         num_cxn_wrbh--;
2039                 }
2040         }
2041         idx = 0;
2042         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2043         num_cxn_wrb =
2044             ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2045              phba->params.wrbs_per_cxn);
2046
2047         for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2048                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2049                 if (num_cxn_wrb) {
2050                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2051                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2052                                 pwrb_handle->pwrb = pwrb;
2053                                 pwrb++;
2054                         }
2055                         num_cxn_wrb--;
2056                 } else {
2057                         idx++;
2058                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2059                         num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2060                                         (sizeof(struct iscsi_wrb)) *
2061                                         phba->params.wrbs_per_cxn);
2062                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2063                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2064                                 pwrb_handle->pwrb = pwrb;
2065                                 pwrb++;
2066                         }
2067                         num_cxn_wrb--;
2068                 }
2069         }
2070 }
2071
2072 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2073 {
2074         struct hwi_controller *phwi_ctrlr;
2075         struct hba_parameters *p = &phba->params;
2076         struct hwi_async_pdu_context *pasync_ctx;
2077         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2078         unsigned int index;
2079         struct be_mem_descriptor *mem_descr;
2080
2081         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2082         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2083
2084         phwi_ctrlr = phba->phwi_ctrlr;
2085         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2086                                 mem_descr->mem_array[0].virtual_address;
2087         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2088         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2089
2090         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2091         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2092         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2093         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2094
2095         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2096         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2097         if (mem_descr->mem_array[0].virtual_address) {
2098                 SE_DEBUG(DBG_LVL_8,
2099                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2100                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2101         } else
2102                 shost_printk(KERN_WARNING, phba->shost,
2103                              "No Virtual address \n");
2104
2105         pasync_ctx->async_header.va_base =
2106                         mem_descr->mem_array[0].virtual_address;
2107
2108         pasync_ctx->async_header.pa_base.u.a64.address =
2109                         mem_descr->mem_array[0].bus_address.u.a64.address;
2110
2111         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2112         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2113         if (mem_descr->mem_array[0].virtual_address) {
2114                 SE_DEBUG(DBG_LVL_8,
2115                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2116                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2117         } else
2118                 shost_printk(KERN_WARNING, phba->shost,
2119                             "No Virtual address \n");
2120         pasync_ctx->async_header.ring_base =
2121                         mem_descr->mem_array[0].virtual_address;
2122
2123         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2124         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2125         if (mem_descr->mem_array[0].virtual_address) {
2126                 SE_DEBUG(DBG_LVL_8,
2127                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2128                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2129         } else
2130                 shost_printk(KERN_WARNING, phba->shost,
2131                             "No Virtual address \n");
2132
2133         pasync_ctx->async_header.handle_base =
2134                         mem_descr->mem_array[0].virtual_address;
2135         pasync_ctx->async_header.writables = 0;
2136         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2137
2138         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2139         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2140         if (mem_descr->mem_array[0].virtual_address) {
2141                 SE_DEBUG(DBG_LVL_8,
2142                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2143                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2144         } else
2145                 shost_printk(KERN_WARNING, phba->shost,
2146                             "No Virtual address \n");
2147         pasync_ctx->async_data.va_base =
2148                         mem_descr->mem_array[0].virtual_address;
2149         pasync_ctx->async_data.pa_base.u.a64.address =
2150                         mem_descr->mem_array[0].bus_address.u.a64.address;
2151
2152         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2153         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2154         if (mem_descr->mem_array[0].virtual_address) {
2155                 SE_DEBUG(DBG_LVL_8,
2156                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2157                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2158         } else
2159                 shost_printk(KERN_WARNING, phba->shost,
2160                              "No Virtual address \n");
2161
2162         pasync_ctx->async_data.ring_base =
2163                         mem_descr->mem_array[0].virtual_address;
2164
2165         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2166         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2167         if (!mem_descr->mem_array[0].virtual_address)
2168                 shost_printk(KERN_WARNING, phba->shost,
2169                             "No Virtual address \n");
2170
2171         pasync_ctx->async_data.handle_base =
2172                         mem_descr->mem_array[0].virtual_address;
2173         pasync_ctx->async_data.writables = 0;
2174         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2175
2176         pasync_header_h =
2177                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2178         pasync_data_h =
2179                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2180
2181         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2182                 pasync_header_h->cri = -1;
2183                 pasync_header_h->index = (char)index;
2184                 INIT_LIST_HEAD(&pasync_header_h->link);
2185                 pasync_header_h->pbuffer =
2186                         (void *)((unsigned long)
2187                         (pasync_ctx->async_header.va_base) +
2188                         (p->defpdu_hdr_sz * index));
2189
2190                 pasync_header_h->pa.u.a64.address =
2191                         pasync_ctx->async_header.pa_base.u.a64.address +
2192                         (p->defpdu_hdr_sz * index);
2193
2194                 list_add_tail(&pasync_header_h->link,
2195                                 &pasync_ctx->async_header.free_list);
2196                 pasync_header_h++;
2197                 pasync_ctx->async_header.free_entries++;
2198                 pasync_ctx->async_header.writables++;
2199
2200                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2201                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2202                                header_busy_list);
2203                 pasync_data_h->cri = -1;
2204                 pasync_data_h->index = (char)index;
2205                 INIT_LIST_HEAD(&pasync_data_h->link);
2206                 pasync_data_h->pbuffer =
2207                         (void *)((unsigned long)
2208                         (pasync_ctx->async_data.va_base) +
2209                         (p->defpdu_data_sz * index));
2210
2211                 pasync_data_h->pa.u.a64.address =
2212                     pasync_ctx->async_data.pa_base.u.a64.address +
2213                     (p->defpdu_data_sz * index);
2214
2215                 list_add_tail(&pasync_data_h->link,
2216                               &pasync_ctx->async_data.free_list);
2217                 pasync_data_h++;
2218                 pasync_ctx->async_data.free_entries++;
2219                 pasync_ctx->async_data.writables++;
2220
2221                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2222         }
2223
2224         pasync_ctx->async_header.host_write_ptr = 0;
2225         pasync_ctx->async_header.ep_read_ptr = -1;
2226         pasync_ctx->async_data.host_write_ptr = 0;
2227         pasync_ctx->async_data.ep_read_ptr = -1;
2228 }
2229
2230 static int
2231 be_sgl_create_contiguous(void *virtual_address,
2232                          u64 physical_address, u32 length,
2233                          struct be_dma_mem *sgl)
2234 {
2235         WARN_ON(!virtual_address);
2236         WARN_ON(!physical_address);
2237         WARN_ON(!length > 0);
2238         WARN_ON(!sgl);
2239
2240         sgl->va = virtual_address;
2241         sgl->dma = physical_address;
2242         sgl->size = length;
2243
2244         return 0;
2245 }
2246
2247 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2248 {
2249         memset(sgl, 0, sizeof(*sgl));
2250 }
2251
2252 static void
2253 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2254                      struct mem_array *pmem, struct be_dma_mem *sgl)
2255 {
2256         if (sgl->va)
2257                 be_sgl_destroy_contiguous(sgl);
2258
2259         be_sgl_create_contiguous(pmem->virtual_address,
2260                                  pmem->bus_address.u.a64.address,
2261                                  pmem->size, sgl);
2262 }
2263
2264 static void
2265 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2266                            struct mem_array *pmem, struct be_dma_mem *sgl)
2267 {
2268         if (sgl->va)
2269                 be_sgl_destroy_contiguous(sgl);
2270
2271         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2272                                  pmem->bus_address.u.a64.address,
2273                                  pmem->size, sgl);
2274 }
2275
2276 static int be_fill_queue(struct be_queue_info *q,
2277                 u16 len, u16 entry_size, void *vaddress)
2278 {
2279         struct be_dma_mem *mem = &q->dma_mem;
2280
2281         memset(q, 0, sizeof(*q));
2282         q->len = len;
2283         q->entry_size = entry_size;
2284         mem->size = len * entry_size;
2285         mem->va = vaddress;
2286         if (!mem->va)
2287                 return -ENOMEM;
2288         memset(mem->va, 0, mem->size);
2289         return 0;
2290 }
2291
2292 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2293                              struct hwi_context_memory *phwi_context)
2294 {
2295         unsigned int i, num_eq_pages;
2296         int ret, eq_for_mcc;
2297         struct be_queue_info *eq;
2298         struct be_dma_mem *mem;
2299         void *eq_vaddress;
2300         dma_addr_t paddr;
2301
2302         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2303                                       sizeof(struct be_eq_entry));
2304
2305         if (phba->msix_enabled)
2306                 eq_for_mcc = 1;
2307         else
2308                 eq_for_mcc = 0;
2309         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2310                 eq = &phwi_context->be_eq[i].q;
2311                 mem = &eq->dma_mem;
2312                 phwi_context->be_eq[i].phba = phba;
2313                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2314                                                      num_eq_pages * PAGE_SIZE,
2315                                                      &paddr);
2316                 if (!eq_vaddress)
2317                         goto create_eq_error;
2318
2319                 mem->va = eq_vaddress;
2320                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2321                                     sizeof(struct be_eq_entry), eq_vaddress);
2322                 if (ret) {
2323                         shost_printk(KERN_ERR, phba->shost,
2324                                      "be_fill_queue Failed for EQ \n");
2325                         goto create_eq_error;
2326                 }
2327
2328                 mem->dma = paddr;
2329                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2330                                             phwi_context->cur_eqd);
2331                 if (ret) {
2332                         shost_printk(KERN_ERR, phba->shost,
2333                                      "beiscsi_cmd_eq_create"
2334                                      "Failedfor EQ \n");
2335                         goto create_eq_error;
2336                 }
2337                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2338         }
2339         return 0;
2340 create_eq_error:
2341         for (i = 0; i < (phba->num_cpus + 1); i++) {
2342                 eq = &phwi_context->be_eq[i].q;
2343                 mem = &eq->dma_mem;
2344                 if (mem->va)
2345                         pci_free_consistent(phba->pcidev, num_eq_pages
2346                                             * PAGE_SIZE,
2347                                             mem->va, mem->dma);
2348         }
2349         return ret;
2350 }
2351
2352 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2353                              struct hwi_context_memory *phwi_context)
2354 {
2355         unsigned int i, num_cq_pages;
2356         int ret;
2357         struct be_queue_info *cq, *eq;
2358         struct be_dma_mem *mem;
2359         struct be_eq_obj *pbe_eq;
2360         void *cq_vaddress;
2361         dma_addr_t paddr;
2362
2363         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2364                                       sizeof(struct sol_cqe));
2365
2366         for (i = 0; i < phba->num_cpus; i++) {
2367                 cq = &phwi_context->be_cq[i];
2368                 eq = &phwi_context->be_eq[i].q;
2369                 pbe_eq = &phwi_context->be_eq[i];
2370                 pbe_eq->cq = cq;
2371                 pbe_eq->phba = phba;
2372                 mem = &cq->dma_mem;
2373                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2374                                                      num_cq_pages * PAGE_SIZE,
2375                                                      &paddr);
2376                 if (!cq_vaddress)
2377                         goto create_cq_error;
2378                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2379                                     sizeof(struct sol_cqe), cq_vaddress);
2380                 if (ret) {
2381                         shost_printk(KERN_ERR, phba->shost,
2382                                      "be_fill_queue Failed for ISCSI CQ \n");
2383                         goto create_cq_error;
2384                 }
2385
2386                 mem->dma = paddr;
2387                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2388                                             false, 0);
2389                 if (ret) {
2390                         shost_printk(KERN_ERR, phba->shost,
2391                                      "beiscsi_cmd_eq_create"
2392                                      "Failed for ISCSI CQ \n");
2393                         goto create_cq_error;
2394                 }
2395                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2396                                                  cq->id, eq->id);
2397                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2398         }
2399         return 0;
2400
2401 create_cq_error:
2402         for (i = 0; i < phba->num_cpus; i++) {
2403                 cq = &phwi_context->be_cq[i];
2404                 mem = &cq->dma_mem;
2405                 if (mem->va)
2406                         pci_free_consistent(phba->pcidev, num_cq_pages
2407                                             * PAGE_SIZE,
2408                                             mem->va, mem->dma);
2409         }
2410         return ret;
2411
2412 }
2413
2414 static int
2415 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2416                        struct hwi_context_memory *phwi_context,
2417                        struct hwi_controller *phwi_ctrlr,
2418                        unsigned int def_pdu_ring_sz)
2419 {
2420         unsigned int idx;
2421         int ret;
2422         struct be_queue_info *dq, *cq;
2423         struct be_dma_mem *mem;
2424         struct be_mem_descriptor *mem_descr;
2425         void *dq_vaddress;
2426
2427         idx = 0;
2428         dq = &phwi_context->be_def_hdrq;
2429         cq = &phwi_context->be_cq[0];
2430         mem = &dq->dma_mem;
2431         mem_descr = phba->init_mem;
2432         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2433         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2434         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2435                             sizeof(struct phys_addr),
2436                             sizeof(struct phys_addr), dq_vaddress);
2437         if (ret) {
2438                 shost_printk(KERN_ERR, phba->shost,
2439                              "be_fill_queue Failed for DEF PDU HDR\n");
2440                 return ret;
2441         }
2442         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2443         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2444                                               def_pdu_ring_sz,
2445                                               phba->params.defpdu_hdr_sz);
2446         if (ret) {
2447                 shost_printk(KERN_ERR, phba->shost,
2448                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2449                 return ret;
2450         }
2451         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2452         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2453                  phwi_context->be_def_hdrq.id);
2454         hwi_post_async_buffers(phba, 1);
2455         return 0;
2456 }
2457
2458 static int
2459 beiscsi_create_def_data(struct beiscsi_hba *phba,
2460                         struct hwi_context_memory *phwi_context,
2461                         struct hwi_controller *phwi_ctrlr,
2462                         unsigned int def_pdu_ring_sz)
2463 {
2464         unsigned int idx;
2465         int ret;
2466         struct be_queue_info *dataq, *cq;
2467         struct be_dma_mem *mem;
2468         struct be_mem_descriptor *mem_descr;
2469         void *dq_vaddress;
2470
2471         idx = 0;
2472         dataq = &phwi_context->be_def_dataq;
2473         cq = &phwi_context->be_cq[0];
2474         mem = &dataq->dma_mem;
2475         mem_descr = phba->init_mem;
2476         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2477         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2478         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2479                             sizeof(struct phys_addr),
2480                             sizeof(struct phys_addr), dq_vaddress);
2481         if (ret) {
2482                 shost_printk(KERN_ERR, phba->shost,
2483                              "be_fill_queue Failed for DEF PDU DATA\n");
2484                 return ret;
2485         }
2486         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2487         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2488                                               def_pdu_ring_sz,
2489                                               phba->params.defpdu_data_sz);
2490         if (ret) {
2491                 shost_printk(KERN_ERR, phba->shost,
2492                              "be_cmd_create_default_pdu_queue Failed"
2493                              " for DEF PDU DATA\n");
2494                 return ret;
2495         }
2496         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2497         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2498                  phwi_context->be_def_dataq.id);
2499         hwi_post_async_buffers(phba, 0);
2500         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2501         return 0;
2502 }
2503
2504 static int
2505 beiscsi_post_pages(struct beiscsi_hba *phba)
2506 {
2507         struct be_mem_descriptor *mem_descr;
2508         struct mem_array *pm_arr;
2509         unsigned int page_offset, i;
2510         struct be_dma_mem sgl;
2511         int status;
2512
2513         mem_descr = phba->init_mem;
2514         mem_descr += HWI_MEM_SGE;
2515         pm_arr = mem_descr->mem_array;
2516
2517         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2518                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2519         for (i = 0; i < mem_descr->num_elements; i++) {
2520                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2521                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2522                                                 page_offset,
2523                                                 (pm_arr->size / PAGE_SIZE));
2524                 page_offset += pm_arr->size / PAGE_SIZE;
2525                 if (status != 0) {
2526                         shost_printk(KERN_ERR, phba->shost,
2527                                      "post sgl failed.\n");
2528                         return status;
2529                 }
2530                 pm_arr++;
2531         }
2532         SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2533         return 0;
2534 }
2535
2536 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2537 {
2538         struct be_dma_mem *mem = &q->dma_mem;
2539         if (mem->va)
2540                 pci_free_consistent(phba->pcidev, mem->size,
2541                         mem->va, mem->dma);
2542 }
2543
2544 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2545                 u16 len, u16 entry_size)
2546 {
2547         struct be_dma_mem *mem = &q->dma_mem;
2548
2549         memset(q, 0, sizeof(*q));
2550         q->len = len;
2551         q->entry_size = entry_size;
2552         mem->size = len * entry_size;
2553         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2554         if (!mem->va)
2555                 return -1;
2556         memset(mem->va, 0, mem->size);
2557         return 0;
2558 }
2559
2560 static int
2561 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2562                          struct hwi_context_memory *phwi_context,
2563                          struct hwi_controller *phwi_ctrlr)
2564 {
2565         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2566         u64 pa_addr_lo;
2567         unsigned int idx, num, i;
2568         struct mem_array *pwrb_arr;
2569         void *wrb_vaddr;
2570         struct be_dma_mem sgl;
2571         struct be_mem_descriptor *mem_descr;
2572         int status;
2573
2574         idx = 0;
2575         mem_descr = phba->init_mem;
2576         mem_descr += HWI_MEM_WRB;
2577         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2578                            GFP_KERNEL);
2579         if (!pwrb_arr) {
2580                 shost_printk(KERN_ERR, phba->shost,
2581                              "Memory alloc failed in create wrb ring.\n");
2582                 return -ENOMEM;
2583         }
2584         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2585         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2586         num_wrb_rings = mem_descr->mem_array[idx].size /
2587                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2588
2589         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2590                 if (num_wrb_rings) {
2591                         pwrb_arr[num].virtual_address = wrb_vaddr;
2592                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2593                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2594                                             sizeof(struct iscsi_wrb);
2595                         wrb_vaddr += pwrb_arr[num].size;
2596                         pa_addr_lo += pwrb_arr[num].size;
2597                         num_wrb_rings--;
2598                 } else {
2599                         idx++;
2600                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2601                         pa_addr_lo = mem_descr->mem_array[idx].\
2602                                         bus_address.u.a64.address;
2603                         num_wrb_rings = mem_descr->mem_array[idx].size /
2604                                         (phba->params.wrbs_per_cxn *
2605                                         sizeof(struct iscsi_wrb));
2606                         pwrb_arr[num].virtual_address = wrb_vaddr;
2607                         pwrb_arr[num].bus_address.u.a64.address\
2608                                                 = pa_addr_lo;
2609                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2610                                                  sizeof(struct iscsi_wrb);
2611                         wrb_vaddr += pwrb_arr[num].size;
2612                         pa_addr_lo   += pwrb_arr[num].size;
2613                         num_wrb_rings--;
2614                 }
2615         }
2616         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2617                 wrb_mem_index = 0;
2618                 offset = 0;
2619                 size = 0;
2620
2621                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2622                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2623                                             &phwi_context->be_wrbq[i]);
2624                 if (status != 0) {
2625                         shost_printk(KERN_ERR, phba->shost,
2626                                      "wrbq create failed.");
2627                         return status;
2628                 }
2629                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2630                                                                    id;
2631         }
2632         kfree(pwrb_arr);
2633         return 0;
2634 }
2635
2636 static void free_wrb_handles(struct beiscsi_hba *phba)
2637 {
2638         unsigned int index;
2639         struct hwi_controller *phwi_ctrlr;
2640         struct hwi_wrb_context *pwrb_context;
2641
2642         phwi_ctrlr = phba->phwi_ctrlr;
2643         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2644                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2645                 kfree(pwrb_context->pwrb_handle_base);
2646                 kfree(pwrb_context->pwrb_handle_basestd);
2647         }
2648 }
2649
2650 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2651 {
2652         struct be_queue_info *q;
2653         struct be_ctrl_info *ctrl = &phba->ctrl;
2654
2655         q = &phba->ctrl.mcc_obj.q;
2656         if (q->created)
2657                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2658         be_queue_free(phba, q);
2659
2660         q = &phba->ctrl.mcc_obj.cq;
2661         if (q->created)
2662                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2663         be_queue_free(phba, q);
2664 }
2665
2666 static void hwi_cleanup(struct beiscsi_hba *phba)
2667 {
2668         struct be_queue_info *q;
2669         struct be_ctrl_info *ctrl = &phba->ctrl;
2670         struct hwi_controller *phwi_ctrlr;
2671         struct hwi_context_memory *phwi_context;
2672         int i, eq_num;
2673
2674         phwi_ctrlr = phba->phwi_ctrlr;
2675         phwi_context = phwi_ctrlr->phwi_ctxt;
2676         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2677                 q = &phwi_context->be_wrbq[i];
2678                 if (q->created)
2679                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2680         }
2681         free_wrb_handles(phba);
2682
2683         q = &phwi_context->be_def_hdrq;
2684         if (q->created)
2685                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2686
2687         q = &phwi_context->be_def_dataq;
2688         if (q->created)
2689                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2690
2691         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2692
2693         for (i = 0; i < (phba->num_cpus); i++) {
2694                 q = &phwi_context->be_cq[i];
2695                 if (q->created)
2696                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2697         }
2698         if (phba->msix_enabled)
2699                 eq_num = 1;
2700         else
2701                 eq_num = 0;
2702         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2703                 q = &phwi_context->be_eq[i].q;
2704                 if (q->created)
2705                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2706         }
2707         be_mcc_queues_destroy(phba);
2708 }
2709
2710 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2711                                 struct hwi_context_memory *phwi_context)
2712 {
2713         struct be_queue_info *q, *cq;
2714         struct be_ctrl_info *ctrl = &phba->ctrl;
2715
2716         /* Alloc MCC compl queue */
2717         cq = &phba->ctrl.mcc_obj.cq;
2718         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2719                         sizeof(struct be_mcc_compl)))
2720                 goto err;
2721         /* Ask BE to create MCC compl queue; */
2722         if (phba->msix_enabled) {
2723                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2724                                          [phba->num_cpus].q, false, true, 0))
2725                 goto mcc_cq_free;
2726         } else {
2727                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2728                                           false, true, 0))
2729                 goto mcc_cq_free;
2730         }
2731
2732         /* Alloc MCC queue */
2733         q = &phba->ctrl.mcc_obj.q;
2734         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2735                 goto mcc_cq_destroy;
2736
2737         /* Ask BE to create MCC queue */
2738         if (beiscsi_cmd_mccq_create(phba, q, cq))
2739                 goto mcc_q_free;
2740
2741         return 0;
2742
2743 mcc_q_free:
2744         be_queue_free(phba, q);
2745 mcc_cq_destroy:
2746         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2747 mcc_cq_free:
2748         be_queue_free(phba, cq);
2749 err:
2750         return -1;
2751 }
2752
2753 static int find_num_cpus(void)
2754 {
2755         int  num_cpus = 0;
2756
2757         num_cpus = num_online_cpus();
2758         if (num_cpus >= MAX_CPUS)
2759                 num_cpus = MAX_CPUS - 1;
2760
2761         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2762         return num_cpus;
2763 }
2764
2765 static int hwi_init_port(struct beiscsi_hba *phba)
2766 {
2767         struct hwi_controller *phwi_ctrlr;
2768         struct hwi_context_memory *phwi_context;
2769         unsigned int def_pdu_ring_sz;
2770         struct be_ctrl_info *ctrl = &phba->ctrl;
2771         int status;
2772
2773         def_pdu_ring_sz =
2774                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2775         phwi_ctrlr = phba->phwi_ctrlr;
2776         phwi_context = phwi_ctrlr->phwi_ctxt;
2777         phwi_context->max_eqd = 0;
2778         phwi_context->min_eqd = 0;
2779         phwi_context->cur_eqd = 64;
2780         be_cmd_fw_initialize(&phba->ctrl);
2781
2782         status = beiscsi_create_eqs(phba, phwi_context);
2783         if (status != 0) {
2784                 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2785                 goto error;
2786         }
2787
2788         status = be_mcc_queues_create(phba, phwi_context);
2789         if (status != 0)
2790                 goto error;
2791
2792         status = mgmt_check_supported_fw(ctrl, phba);
2793         if (status != 0) {
2794                 shost_printk(KERN_ERR, phba->shost,
2795                              "Unsupported fw version \n");
2796                 goto error;
2797         }
2798
2799         if (phba->fw_config.iscsi_features == 0x1)
2800                 ring_mode = 1;
2801         else
2802                 ring_mode = 0;
2803
2804         status = beiscsi_create_cqs(phba, phwi_context);
2805         if (status != 0) {
2806                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2807                 goto error;
2808         }
2809
2810         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2811                                         def_pdu_ring_sz);
2812         if (status != 0) {
2813                 shost_printk(KERN_ERR, phba->shost,
2814                              "Default Header not created\n");
2815                 goto error;
2816         }
2817
2818         status = beiscsi_create_def_data(phba, phwi_context,
2819                                          phwi_ctrlr, def_pdu_ring_sz);
2820         if (status != 0) {
2821                 shost_printk(KERN_ERR, phba->shost,
2822                              "Default Data not created\n");
2823                 goto error;
2824         }
2825
2826         status = beiscsi_post_pages(phba);
2827         if (status != 0) {
2828                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2829                 goto error;
2830         }
2831
2832         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2833         if (status != 0) {
2834                 shost_printk(KERN_ERR, phba->shost,
2835                              "WRB Rings not created\n");
2836                 goto error;
2837         }
2838
2839         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2840         return 0;
2841
2842 error:
2843         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2844         hwi_cleanup(phba);
2845         return -ENOMEM;
2846 }
2847
2848 static int hwi_init_controller(struct beiscsi_hba *phba)
2849 {
2850         struct hwi_controller *phwi_ctrlr;
2851
2852         phwi_ctrlr = phba->phwi_ctrlr;
2853         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2854                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2855                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2856                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2857                          phwi_ctrlr->phwi_ctxt);
2858         } else {
2859                 shost_printk(KERN_ERR, phba->shost,
2860                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2861                              "Failing to load\n");
2862                 return -ENOMEM;
2863         }
2864
2865         iscsi_init_global_templates(phba);
2866         beiscsi_init_wrb_handle(phba);
2867         hwi_init_async_pdu_ctx(phba);
2868         if (hwi_init_port(phba) != 0) {
2869                 shost_printk(KERN_ERR, phba->shost,
2870                              "hwi_init_controller failed\n");
2871                 return -ENOMEM;
2872         }
2873         return 0;
2874 }
2875
2876 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2877 {
2878         struct be_mem_descriptor *mem_descr;
2879         int i, j;
2880
2881         mem_descr = phba->init_mem;
2882         i = 0;
2883         j = 0;
2884         for (i = 0; i < SE_MEM_MAX; i++) {
2885                 for (j = mem_descr->num_elements; j > 0; j--) {
2886                         pci_free_consistent(phba->pcidev,
2887                           mem_descr->mem_array[j - 1].size,
2888                           mem_descr->mem_array[j - 1].virtual_address,
2889                           mem_descr->mem_array[j - 1].bus_address.
2890                                 u.a64.address);
2891                 }
2892                 kfree(mem_descr->mem_array);
2893                 mem_descr++;
2894         }
2895         kfree(phba->init_mem);
2896         kfree(phba->phwi_ctrlr);
2897 }
2898
2899 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2900 {
2901         int ret = -ENOMEM;
2902
2903         ret = beiscsi_get_memory(phba);
2904         if (ret < 0) {
2905                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2906                              "Failed in beiscsi_alloc_memory \n");
2907                 return ret;
2908         }
2909
2910         ret = hwi_init_controller(phba);
2911         if (ret)
2912                 goto free_init;
2913         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2914         return 0;
2915
2916 free_init:
2917         beiscsi_free_mem(phba);
2918         return -ENOMEM;
2919 }
2920
2921 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2922 {
2923         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2924         struct sgl_handle *psgl_handle;
2925         struct iscsi_sge *pfrag;
2926         unsigned int arr_index, i, idx;
2927
2928         phba->io_sgl_hndl_avbl = 0;
2929         phba->eh_sgl_hndl_avbl = 0;
2930
2931         if (ring_mode) {
2932                 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2933                                               phba->params.icds_per_ctrl,
2934                                                  GFP_KERNEL);
2935                 if (!phba->sgl_hndl_array) {
2936                         shost_printk(KERN_ERR, phba->shost,
2937                              "Mem Alloc Failed. Failing to load\n");
2938                         return -ENOMEM;
2939                 }
2940         }
2941
2942         mem_descr_sglh = phba->init_mem;
2943         mem_descr_sglh += HWI_MEM_SGLH;
2944         if (1 == mem_descr_sglh->num_elements) {
2945                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2946                                                  phba->params.ios_per_ctrl,
2947                                                  GFP_KERNEL);
2948                 if (!phba->io_sgl_hndl_base) {
2949                         if (ring_mode)
2950                                 kfree(phba->sgl_hndl_array);
2951                         shost_printk(KERN_ERR, phba->shost,
2952                                      "Mem Alloc Failed. Failing to load\n");
2953                         return -ENOMEM;
2954                 }
2955                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2956                                                  (phba->params.icds_per_ctrl -
2957                                                  phba->params.ios_per_ctrl),
2958                                                  GFP_KERNEL);
2959                 if (!phba->eh_sgl_hndl_base) {
2960                         kfree(phba->io_sgl_hndl_base);
2961                         shost_printk(KERN_ERR, phba->shost,
2962                                      "Mem Alloc Failed. Failing to load\n");
2963                         return -ENOMEM;
2964                 }
2965         } else {
2966                 shost_printk(KERN_ERR, phba->shost,
2967                              "HWI_MEM_SGLH is more than one element."
2968                              "Failing to load\n");
2969                 return -ENOMEM;
2970         }
2971
2972         arr_index = 0;
2973         idx = 0;
2974         while (idx < mem_descr_sglh->num_elements) {
2975                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2976
2977                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2978                       sizeof(struct sgl_handle)); i++) {
2979                         if (arr_index < phba->params.ios_per_ctrl) {
2980                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2981                                 phba->io_sgl_hndl_avbl++;
2982                                 arr_index++;
2983                         } else {
2984                                 phba->eh_sgl_hndl_base[arr_index -
2985                                         phba->params.ios_per_ctrl] =
2986                                                                 psgl_handle;
2987                                 arr_index++;
2988                                 phba->eh_sgl_hndl_avbl++;
2989                         }
2990                         psgl_handle++;
2991                 }
2992                 idx++;
2993         }
2994         SE_DEBUG(DBG_LVL_8,
2995                  "phba->io_sgl_hndl_avbl=%d"
2996                  "phba->eh_sgl_hndl_avbl=%d \n",
2997                  phba->io_sgl_hndl_avbl,
2998                  phba->eh_sgl_hndl_avbl);
2999         mem_descr_sg = phba->init_mem;
3000         mem_descr_sg += HWI_MEM_SGE;
3001         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3002                  mem_descr_sg->num_elements);
3003         arr_index = 0;
3004         idx = 0;
3005         while (idx < mem_descr_sg->num_elements) {
3006                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3007
3008                 for (i = 0;
3009                      i < (mem_descr_sg->mem_array[idx].size) /
3010                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3011                      i++) {
3012                         if (arr_index < phba->params.ios_per_ctrl)
3013                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3014                         else
3015                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3016                                                 phba->params.ios_per_ctrl];
3017                         psgl_handle->pfrag = pfrag;
3018                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3019                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3020                         pfrag += phba->params.num_sge_per_io;
3021                         psgl_handle->sgl_index =
3022                                 phba->fw_config.iscsi_icd_start + arr_index++;
3023                 }
3024                 idx++;
3025         }
3026         phba->io_sgl_free_index = 0;
3027         phba->io_sgl_alloc_index = 0;
3028         phba->eh_sgl_free_index = 0;
3029         phba->eh_sgl_alloc_index = 0;
3030         return 0;
3031 }
3032
3033 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3034 {
3035         int i, new_cid;
3036
3037         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3038                                   GFP_KERNEL);
3039         if (!phba->cid_array) {
3040                 shost_printk(KERN_ERR, phba->shost,
3041                              "Failed to allocate memory in "
3042                              "hba_setup_cid_tbls\n");
3043                 return -ENOMEM;
3044         }
3045         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3046                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3047         if (!phba->ep_array) {
3048                 shost_printk(KERN_ERR, phba->shost,
3049                              "Failed to allocate memory in "
3050                              "hba_setup_cid_tbls \n");
3051                 kfree(phba->cid_array);
3052                 return -ENOMEM;
3053         }
3054         new_cid = phba->fw_config.iscsi_cid_start;
3055         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3056                 phba->cid_array[i] = new_cid;
3057                 new_cid += 2;
3058         }
3059         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3060         return 0;
3061 }
3062
3063 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3064 {
3065         struct be_ctrl_info *ctrl = &phba->ctrl;
3066         struct hwi_controller *phwi_ctrlr;
3067         struct hwi_context_memory *phwi_context;
3068         struct be_queue_info *eq;
3069         u8 __iomem *addr;
3070         u32 reg, i;
3071         u32 enabled;
3072
3073         phwi_ctrlr = phba->phwi_ctrlr;
3074         phwi_context = phwi_ctrlr->phwi_ctxt;
3075
3076         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3077                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3078         reg = ioread32(addr);
3079         SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3080
3081         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3082         if (!enabled) {
3083                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3084                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3085                 iowrite32(reg, addr);
3086                 for (i = 0; i <= phba->num_cpus; i++) {
3087                         eq = &phwi_context->be_eq[i].q;
3088                         SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3089                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3090                 }
3091         } else
3092                 shost_printk(KERN_WARNING, phba->shost,
3093                              "In hwi_enable_intr, Not Enabled \n");
3094         return true;
3095 }
3096
3097 static void hwi_disable_intr(struct beiscsi_hba *phba)
3098 {
3099         struct be_ctrl_info *ctrl = &phba->ctrl;
3100
3101         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3102         u32 reg = ioread32(addr);
3103
3104         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3105         if (enabled) {
3106                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3107                 iowrite32(reg, addr);
3108         } else
3109                 shost_printk(KERN_WARNING, phba->shost,
3110                              "In hwi_disable_intr, Already Disabled \n");
3111 }
3112
3113 static int beiscsi_init_port(struct beiscsi_hba *phba)
3114 {
3115         int ret;
3116
3117         ret = beiscsi_init_controller(phba);
3118         if (ret < 0) {
3119                 shost_printk(KERN_ERR, phba->shost,
3120                              "beiscsi_dev_probe - Failed in"
3121                              "beiscsi_init_controller \n");
3122                 return ret;
3123         }
3124         ret = beiscsi_init_sgl_handle(phba);
3125         if (ret < 0) {
3126                 shost_printk(KERN_ERR, phba->shost,
3127                              "beiscsi_dev_probe - Failed in"
3128                              "beiscsi_init_sgl_handle \n");
3129                 goto do_cleanup_ctrlr;
3130         }
3131
3132         if (hba_setup_cid_tbls(phba)) {
3133                 shost_printk(KERN_ERR, phba->shost,
3134                              "Failed in hba_setup_cid_tbls\n");
3135                 if (ring_mode)
3136                         kfree(phba->sgl_hndl_array);
3137                 kfree(phba->io_sgl_hndl_base);
3138                 kfree(phba->eh_sgl_hndl_base);
3139                 goto do_cleanup_ctrlr;
3140         }
3141
3142         return ret;
3143
3144 do_cleanup_ctrlr:
3145         hwi_cleanup(phba);
3146         return ret;
3147 }
3148
3149 static void hwi_purge_eq(struct beiscsi_hba *phba)
3150 {
3151         struct hwi_controller *phwi_ctrlr;
3152         struct hwi_context_memory *phwi_context;
3153         struct be_queue_info *eq;
3154         struct be_eq_entry *eqe = NULL;
3155         int i, eq_msix;
3156
3157         phwi_ctrlr = phba->phwi_ctrlr;
3158         phwi_context = phwi_ctrlr->phwi_ctxt;
3159         if (phba->msix_enabled)
3160                 eq_msix = 1;
3161         else
3162                 eq_msix = 0;
3163
3164         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3165                 eq = &phwi_context->be_eq[i].q;
3166                 eqe = queue_tail_node(eq);
3167
3168                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3169                                         & EQE_VALID_MASK) {
3170                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3171                         queue_tail_inc(eq);
3172                         eqe = queue_tail_node(eq);
3173                 }
3174         }
3175 }
3176
3177 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3178 {
3179         unsigned char mgmt_status;
3180
3181         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3182         if (mgmt_status)
3183                 shost_printk(KERN_WARNING, phba->shost,
3184                              "mgmt_epfw_cleanup FAILED \n");
3185         hwi_cleanup(phba);
3186         hwi_purge_eq(phba);
3187         if (ring_mode)
3188                 kfree(phba->sgl_hndl_array);
3189         kfree(phba->io_sgl_hndl_base);
3190         kfree(phba->eh_sgl_hndl_base);
3191         kfree(phba->cid_array);
3192         kfree(phba->ep_array);
3193 }
3194
3195 void
3196 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3197                            struct beiscsi_offload_params *params)
3198 {
3199         struct wrb_handle *pwrb_handle;
3200         struct iscsi_target_context_update_wrb *pwrb = NULL;
3201         struct be_mem_descriptor *mem_descr;
3202         struct beiscsi_hba *phba = beiscsi_conn->phba;
3203         u32 doorbell = 0;
3204
3205         /*
3206          * We can always use 0 here because it is reserved by libiscsi for
3207          * login/startup related tasks.
3208          */
3209         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3210                                        phba->fw_config.iscsi_cid_start));
3211         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3212         memset(pwrb, 0, sizeof(*pwrb));
3213         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3214                       max_burst_length, pwrb, params->dw[offsetof
3215                       (struct amap_beiscsi_offload_params,
3216                       max_burst_length) / 32]);
3217         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3218                       max_send_data_segment_length, pwrb,
3219                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3220                       max_send_data_segment_length) / 32]);
3221         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3222                       first_burst_length,
3223                       pwrb,
3224                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3225                       first_burst_length) / 32]);
3226
3227         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3228                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3229                       erl) / 32] & OFFLD_PARAMS_ERL));
3230         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3231                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3232                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3233         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3234                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3235                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3236         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3237                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3238                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3239         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3240                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3241                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3242         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3243                       pwrb,
3244                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3245                       exp_statsn) / 32] + 1));
3246         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3247                       0x7);
3248         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3249                       pwrb, pwrb_handle->wrb_index);
3250         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3251                       pwrb, pwrb_handle->nxt_wrb_index);
3252         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3253                         session_state, pwrb, 0);
3254         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3255                       pwrb, 1);
3256         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3257                       pwrb, 0);
3258         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3259                       0);
3260
3261         mem_descr = phba->init_mem;
3262         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3263
3264         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3265                         pad_buffer_addr_hi, pwrb,
3266                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3267         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3268                         pad_buffer_addr_lo, pwrb,
3269                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3270
3271         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3272
3273         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3274         if (!ring_mode)
3275                 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3276                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3277         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3278
3279         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3280 }
3281
3282 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3283                               int *index, int *age)
3284 {
3285         *index = (int)itt;
3286         if (age)
3287                 *age = conn->session->age;
3288 }
3289
3290 /**
3291  * beiscsi_alloc_pdu - allocates pdu and related resources
3292  * @task: libiscsi task
3293  * @opcode: opcode of pdu for task
3294  *
3295  * This is called with the session lock held. It will allocate
3296  * the wrb and sgl if needed for the command. And it will prep
3297  * the pdu's itt. beiscsi_parse_pdu will later translate
3298  * the pdu itt to the libiscsi task itt.
3299  */
3300 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3301 {
3302         struct beiscsi_io_task *io_task = task->dd_data;
3303         struct iscsi_conn *conn = task->conn;
3304         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3305         struct beiscsi_hba *phba = beiscsi_conn->phba;
3306         struct hwi_wrb_context *pwrb_context;
3307         struct hwi_controller *phwi_ctrlr;
3308         itt_t itt;
3309         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3310         dma_addr_t paddr;
3311
3312         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3313                                           GFP_KERNEL, &paddr);
3314         if (!io_task->cmd_bhs)
3315                 return -ENOMEM;
3316         io_task->bhs_pa.u.a64.address = paddr;
3317         io_task->libiscsi_itt = (itt_t)task->itt;
3318         io_task->pwrb_handle = alloc_wrb_handle(phba,
3319                                                 beiscsi_conn->beiscsi_conn_cid -
3320                                                 phba->fw_config.iscsi_cid_start
3321                                                 );
3322         io_task->conn = beiscsi_conn;
3323
3324         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3325         task->hdr_max = sizeof(struct be_cmd_bhs);
3326
3327         if (task->sc) {
3328                 spin_lock(&phba->io_sgl_lock);
3329                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3330                 spin_unlock(&phba->io_sgl_lock);
3331                 if (!io_task->psgl_handle)
3332                         goto free_hndls;
3333         } else {
3334                 io_task->scsi_cmnd = NULL;
3335                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3336                         if (!beiscsi_conn->login_in_progress) {
3337                                 spin_lock(&phba->mgmt_sgl_lock);
3338                                 io_task->psgl_handle = (struct sgl_handle *)
3339                                                 alloc_mgmt_sgl_handle(phba);
3340                                 spin_unlock(&phba->mgmt_sgl_lock);
3341                                 if (!io_task->psgl_handle)
3342                                         goto free_hndls;
3343
3344                                 beiscsi_conn->login_in_progress = 1;
3345                                 beiscsi_conn->plogin_sgl_handle =
3346                                                         io_task->psgl_handle;
3347                         } else {
3348                                 io_task->psgl_handle =
3349                                                 beiscsi_conn->plogin_sgl_handle;
3350                         }
3351                 } else {
3352                         spin_lock(&phba->mgmt_sgl_lock);
3353                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3354                         spin_unlock(&phba->mgmt_sgl_lock);
3355                         if (!io_task->psgl_handle)
3356                                 goto free_hndls;
3357                 }
3358         }
3359         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3360                                  wrb_index << 16) | (unsigned int)
3361                                 (io_task->psgl_handle->sgl_index));
3362         if (ring_mode) {
3363                 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
3364                                      phba->fw_config.iscsi_icd_start] =
3365                                      io_task->psgl_handle;
3366                 io_task->psgl_handle->task = task;
3367                 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid  -
3368                                             phba->fw_config.iscsi_cid_start;
3369         } else
3370                 io_task->pwrb_handle->pio_handle = task;
3371
3372         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3373         return 0;
3374
3375 free_hndls:
3376         phwi_ctrlr = phba->phwi_ctrlr;
3377         pwrb_context = &phwi_ctrlr->wrb_context[
3378                         beiscsi_conn->beiscsi_conn_cid -
3379                         phba->fw_config.iscsi_cid_start];
3380         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3381         io_task->pwrb_handle = NULL;
3382         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3383                       io_task->bhs_pa.u.a64.address);
3384         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3385         return -ENOMEM;
3386 }
3387
3388 static void beiscsi_cleanup_task(struct iscsi_task *task)
3389 {
3390         struct beiscsi_io_task *io_task = task->dd_data;
3391         struct iscsi_conn *conn = task->conn;
3392         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3393         struct beiscsi_hba *phba = beiscsi_conn->phba;
3394         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3395         struct hwi_wrb_context *pwrb_context;
3396         struct hwi_controller *phwi_ctrlr;
3397
3398         phwi_ctrlr = phba->phwi_ctrlr;
3399         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3400                         - phba->fw_config.iscsi_cid_start];
3401         if (io_task->pwrb_handle) {
3402                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3403                 io_task->pwrb_handle = NULL;
3404         }
3405
3406         if (io_task->cmd_bhs) {
3407                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3408                               io_task->bhs_pa.u.a64.address);
3409         }
3410
3411         if (task->sc) {
3412                 if (io_task->psgl_handle) {
3413                         spin_lock(&phba->io_sgl_lock);
3414                         free_io_sgl_handle(phba, io_task->psgl_handle);
3415                         spin_unlock(&phba->io_sgl_lock);
3416                         io_task->psgl_handle = NULL;
3417                 }
3418         } else {
3419                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3420                         return;
3421                 if (io_task->psgl_handle) {
3422                         spin_lock(&phba->mgmt_sgl_lock);
3423                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3424                         spin_unlock(&phba->mgmt_sgl_lock);
3425                         io_task->psgl_handle = NULL;
3426                 }
3427         }
3428 }
3429
3430 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3431                           unsigned int num_sg, unsigned int xferlen,
3432                           unsigned int writedir)
3433 {
3434
3435         struct beiscsi_io_task *io_task = task->dd_data;
3436         struct iscsi_conn *conn = task->conn;
3437         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3438         struct beiscsi_hba *phba = beiscsi_conn->phba;
3439         struct iscsi_wrb *pwrb = NULL;
3440         unsigned int doorbell = 0;
3441
3442         pwrb = io_task->pwrb_handle->pwrb;
3443         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3444         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3445
3446         if (writedir) {
3447                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3448                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3449                               &io_task->cmd_bhs->iscsi_data_pdu,
3450                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3451                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3452                               &io_task->cmd_bhs->iscsi_data_pdu,
3453                               ISCSI_OPCODE_SCSI_DATA_OUT);
3454                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3455                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3456                 if (ring_mode)
3457                         io_task->psgl_handle->type = INI_WR_CMD;
3458                 else
3459                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3460                                       INI_WR_CMD);
3461                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3462         } else {
3463                 if (ring_mode)
3464                         io_task->psgl_handle->type = INI_RD_CMD;
3465                 else
3466                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3467                                       INI_RD_CMD);
3468                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3469         }
3470         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3471                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3472                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3473
3474         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3475                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3476                                   lun[0]));
3477         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3478         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3479                       io_task->pwrb_handle->wrb_index);
3480         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3481                       be32_to_cpu(task->cmdsn));
3482         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3483                       io_task->psgl_handle->sgl_index);
3484
3485         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3486
3487         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3488                       io_task->pwrb_handle->nxt_wrb_index);
3489         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3490
3491         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3492         if (!ring_mode)
3493                 doorbell |= (io_task->pwrb_handle->wrb_index &
3494                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3495         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3496
3497         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3498         return 0;
3499 }
3500
3501 static int beiscsi_mtask(struct iscsi_task *task)
3502 {
3503         struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3504         struct iscsi_conn *conn = task->conn;
3505         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3506         struct beiscsi_hba *phba = beiscsi_conn->phba;
3507         struct iscsi_session *session;
3508         struct iscsi_wrb *pwrb = NULL;
3509         struct hwi_controller *phwi_ctrlr;
3510         struct hwi_wrb_context *pwrb_context;
3511         struct wrb_handle *pwrb_handle;
3512         unsigned int doorbell = 0;
3513         unsigned int i, cid;
3514         struct iscsi_task *aborted_task;
3515
3516         cid = beiscsi_conn->beiscsi_conn_cid;
3517         pwrb = io_task->pwrb_handle->pwrb;
3518         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3519                       be32_to_cpu(task->cmdsn));
3520         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3521                       io_task->pwrb_handle->wrb_index);
3522         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3523                       io_task->psgl_handle->sgl_index);
3524
3525         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3526         case ISCSI_OP_LOGIN:
3527                 if (ring_mode)
3528                         io_task->psgl_handle->type = TGT_DM_CMD;
3529                 else
3530                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3531                                       TGT_DM_CMD);
3532                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3533                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3534                 hwi_write_buffer(pwrb, task);
3535                 break;
3536         case ISCSI_OP_NOOP_OUT:
3537                 if (ring_mode)
3538                         io_task->psgl_handle->type = INI_RD_CMD;
3539                 else
3540                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3541                                       INI_RD_CMD);
3542                 hwi_write_buffer(pwrb, task);
3543                 break;
3544         case ISCSI_OP_TEXT:
3545                 if (ring_mode)
3546                         io_task->psgl_handle->type = INI_WR_CMD;
3547                 else
3548                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3549                                       INI_WR_CMD);
3550                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3551                 hwi_write_buffer(pwrb, task);
3552                 break;
3553         case ISCSI_OP_SCSI_TMFUNC:
3554                 session = conn->session;
3555                 i = ((struct iscsi_tm *)task->hdr)->rtt;
3556                 phwi_ctrlr = phba->phwi_ctrlr;
3557                 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3558                                             phba->fw_config.iscsi_cid_start];
3559                 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3560                                                                 >> 16];
3561                 aborted_task = pwrb_handle->pio_handle;
3562                  if (!aborted_task)
3563                         return 0;
3564
3565                 aborted_io_task = aborted_task->dd_data;
3566                 if (!aborted_io_task->scsi_cmnd)
3567                         return 0;
3568
3569                 mgmt_invalidate_icds(phba,
3570                                      aborted_io_task->psgl_handle->sgl_index,
3571                                      cid);
3572                 if (ring_mode)
3573                         io_task->psgl_handle->type = INI_TMF_CMD;
3574                 else
3575                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3576                                       INI_TMF_CMD);
3577                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3578                 hwi_write_buffer(pwrb, task);
3579                 break;
3580         case ISCSI_OP_LOGOUT:
3581                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3582                 if (ring_mode)
3583                         io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3584                 else
3585                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3586                                 HWH_TYPE_LOGOUT);
3587                 hwi_write_buffer(pwrb, task);
3588                 break;
3589
3590         default:
3591                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3592                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3593                 return -EINVAL;
3594         }
3595
3596         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3597                       be32_to_cpu(task->data_count));
3598         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3599                       io_task->pwrb_handle->nxt_wrb_index);
3600         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3601
3602         doorbell |= cid & DB_WRB_POST_CID_MASK;
3603         if (!ring_mode)
3604                 doorbell |= (io_task->pwrb_handle->wrb_index &
3605                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3606         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3607         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3608         return 0;
3609 }
3610
3611 static int beiscsi_task_xmit(struct iscsi_task *task)
3612 {
3613         struct iscsi_conn *conn = task->conn;
3614         struct beiscsi_io_task *io_task = task->dd_data;
3615         struct scsi_cmnd *sc = task->sc;
3616         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3617         struct scatterlist *sg;
3618         int num_sg;
3619         unsigned int  writedir = 0, xferlen = 0;
3620
3621         SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3622                  "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3623                  task, conn, beiscsi_conn);
3624         if (!sc)
3625                 return beiscsi_mtask(task);
3626
3627         io_task->scsi_cmnd = sc;
3628         num_sg = scsi_dma_map(sc);
3629         if (num_sg < 0) {
3630                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3631                 return num_sg;
3632         }
3633         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3634                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3635         xferlen = scsi_bufflen(sc);
3636         sg = scsi_sglist(sc);
3637         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3638                 writedir = 1;
3639                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3640                          task->imm_count);
3641         } else
3642                 writedir = 0;
3643         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3644 }
3645
3646
3647 static void beiscsi_remove(struct pci_dev *pcidev)
3648 {
3649         struct beiscsi_hba *phba = NULL;
3650         struct hwi_controller *phwi_ctrlr;
3651         struct hwi_context_memory *phwi_context;
3652         struct be_eq_obj *pbe_eq;
3653         unsigned int i, msix_vec;
3654
3655         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3656         if (!phba) {
3657                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3658                 return;
3659         }
3660
3661         phwi_ctrlr = phba->phwi_ctrlr;
3662         phwi_context = phwi_ctrlr->phwi_ctxt;
3663         hwi_disable_intr(phba);
3664         if (phba->msix_enabled) {
3665                 for (i = 0; i <= phba->num_cpus; i++) {
3666                         msix_vec = phba->msix_entries[i].vector;
3667                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3668                 }
3669         } else
3670                 if (phba->pcidev->irq)
3671                         free_irq(phba->pcidev->irq, phba);
3672         pci_disable_msix(phba->pcidev);
3673         destroy_workqueue(phba->wq);
3674         if (blk_iopoll_enabled)
3675                 for (i = 0; i < phba->num_cpus; i++) {
3676                         pbe_eq = &phwi_context->be_eq[i];
3677                         blk_iopoll_disable(&pbe_eq->iopoll);
3678                 }
3679
3680         beiscsi_clean_port(phba);
3681         beiscsi_free_mem(phba);
3682         beiscsi_unmap_pci_function(phba);
3683         pci_free_consistent(phba->pcidev,
3684                             phba->ctrl.mbox_mem_alloced.size,
3685                             phba->ctrl.mbox_mem_alloced.va,
3686                             phba->ctrl.mbox_mem_alloced.dma);
3687         iscsi_host_remove(phba->shost);
3688         pci_dev_put(phba->pcidev);
3689         iscsi_host_free(phba->shost);
3690 }
3691
3692 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3693 {
3694         int i, status;
3695
3696         for (i = 0; i <= phba->num_cpus; i++)
3697                 phba->msix_entries[i].entry = i;
3698
3699         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3700                                  (phba->num_cpus + 1));
3701         if (!status)
3702                 phba->msix_enabled = true;
3703
3704         return;
3705 }
3706
3707 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3708                                 const struct pci_device_id *id)
3709 {
3710         struct beiscsi_hba *phba = NULL;
3711         struct hwi_controller *phwi_ctrlr;
3712         struct hwi_context_memory *phwi_context;
3713         struct be_eq_obj *pbe_eq;
3714         int ret, msix_vec, num_cpus, i;
3715
3716         ret = beiscsi_enable_pci(pcidev);
3717         if (ret < 0) {
3718                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3719                              "Failed to enable pci device \n");
3720                 return ret;
3721         }
3722
3723         phba = beiscsi_hba_alloc(pcidev);
3724         if (!phba) {
3725                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3726                         " Failed in beiscsi_hba_alloc \n");
3727                 goto disable_pci;
3728         }
3729         SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3730
3731         pci_set_drvdata(pcidev, phba);
3732         if (enable_msix)
3733                 num_cpus = find_num_cpus();
3734         else
3735                 num_cpus = 1;
3736         phba->num_cpus = num_cpus;
3737         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3738
3739         if (enable_msix)
3740                 beiscsi_msix_enable(phba);
3741         ret = be_ctrl_init(phba, pcidev);
3742         if (ret) {
3743                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3744                                 "Failed in be_ctrl_init\n");
3745                 goto hba_free;
3746         }
3747
3748         spin_lock_init(&phba->io_sgl_lock);
3749         spin_lock_init(&phba->mgmt_sgl_lock);
3750         spin_lock_init(&phba->isr_lock);
3751         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3752         if (ret != 0) {
3753                 shost_printk(KERN_ERR, phba->shost,
3754                              "Error getting fw config\n");
3755                 goto free_port;
3756         }
3757         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3758         phba->shost->can_queue = phba->params.ios_per_ctrl;
3759         beiscsi_get_params(phba);
3760         ret = beiscsi_init_port(phba);
3761         if (ret < 0) {
3762                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3763                              "Failed in beiscsi_init_port\n");
3764                 goto free_port;
3765         }
3766
3767         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3768                  phba->shost->host_no);
3769         phba->wq = create_workqueue(phba->wq_name);
3770         if (!phba->wq) {
3771                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3772                                 "Failed to allocate work queue\n");
3773                 goto free_twq;
3774         }
3775
3776         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3777
3778         phwi_ctrlr = phba->phwi_ctrlr;
3779         phwi_context = phwi_ctrlr->phwi_ctxt;
3780         if (blk_iopoll_enabled) {
3781                 for (i = 0; i < phba->num_cpus; i++) {
3782                         pbe_eq = &phwi_context->be_eq[i];
3783                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3784                                         be_iopoll);
3785                         blk_iopoll_enable(&pbe_eq->iopoll);
3786                 }
3787         }
3788         ret = beiscsi_init_irqs(phba);
3789         if (ret < 0) {
3790                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3791                              "Failed to beiscsi_init_irqs\n");
3792                 goto free_blkenbld;
3793         }
3794         ret = hwi_enable_intr(phba);
3795         if (ret < 0) {
3796                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3797                              "Failed to hwi_enable_intr\n");
3798                 goto free_ctrlr;
3799         }
3800         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3801         return 0;
3802
3803 free_ctrlr:
3804         if (phba->msix_enabled) {
3805                 for (i = 0; i <= phba->num_cpus; i++) {
3806                         msix_vec = phba->msix_entries[i].vector;
3807                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3808                 }
3809         } else
3810                 if (phba->pcidev->irq)
3811                         free_irq(phba->pcidev->irq, phba);
3812         pci_disable_msix(phba->pcidev);
3813 free_blkenbld:
3814         destroy_workqueue(phba->wq);
3815         if (blk_iopoll_enabled)
3816                 for (i = 0; i < phba->num_cpus; i++) {
3817                         pbe_eq = &phwi_context->be_eq[i];
3818                         blk_iopoll_disable(&pbe_eq->iopoll);
3819                 }
3820 free_twq:
3821         beiscsi_clean_port(phba);
3822         beiscsi_free_mem(phba);
3823 free_port:
3824         pci_free_consistent(phba->pcidev,
3825                             phba->ctrl.mbox_mem_alloced.size,
3826                             phba->ctrl.mbox_mem_alloced.va,
3827                            phba->ctrl.mbox_mem_alloced.dma);
3828         beiscsi_unmap_pci_function(phba);
3829 hba_free:
3830         iscsi_host_remove(phba->shost);
3831         pci_dev_put(phba->pcidev);
3832         iscsi_host_free(phba->shost);
3833 disable_pci:
3834         pci_disable_device(pcidev);
3835         return ret;
3836 }
3837
3838 struct iscsi_transport beiscsi_iscsi_transport = {
3839         .owner = THIS_MODULE,
3840         .name = DRV_NAME,
3841         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3842                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3843         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3844                 ISCSI_MAX_XMIT_DLENGTH |
3845                 ISCSI_HDRDGST_EN |
3846                 ISCSI_DATADGST_EN |
3847                 ISCSI_INITIAL_R2T_EN |
3848                 ISCSI_MAX_R2T |
3849                 ISCSI_IMM_DATA_EN |
3850                 ISCSI_FIRST_BURST |
3851                 ISCSI_MAX_BURST |
3852                 ISCSI_PDU_INORDER_EN |
3853                 ISCSI_DATASEQ_INORDER_EN |
3854                 ISCSI_ERL |
3855                 ISCSI_CONN_PORT |
3856                 ISCSI_CONN_ADDRESS |
3857                 ISCSI_EXP_STATSN |
3858                 ISCSI_PERSISTENT_PORT |
3859                 ISCSI_PERSISTENT_ADDRESS |
3860                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3861                 ISCSI_USERNAME | ISCSI_PASSWORD |
3862                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3863                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3864                 ISCSI_LU_RESET_TMO |
3865                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3866                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3867         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3868                                 ISCSI_HOST_INITIATOR_NAME,
3869         .create_session = beiscsi_session_create,
3870         .destroy_session = beiscsi_session_destroy,
3871         .create_conn = beiscsi_conn_create,
3872         .bind_conn = beiscsi_conn_bind,
3873         .destroy_conn = iscsi_conn_teardown,
3874         .set_param = beiscsi_set_param,
3875         .get_conn_param = beiscsi_conn_get_param,
3876         .get_session_param = iscsi_session_get_param,
3877         .get_host_param = beiscsi_get_host_param,
3878         .start_conn = beiscsi_conn_start,
3879         .stop_conn = beiscsi_conn_stop,
3880         .send_pdu = iscsi_conn_send_pdu,
3881         .xmit_task = beiscsi_task_xmit,
3882         .cleanup_task = beiscsi_cleanup_task,
3883         .alloc_pdu = beiscsi_alloc_pdu,
3884         .parse_pdu_itt = beiscsi_parse_pdu,
3885         .get_stats = beiscsi_conn_get_stats,
3886         .ep_connect = beiscsi_ep_connect,
3887         .ep_poll = beiscsi_ep_poll,
3888         .ep_disconnect = beiscsi_ep_disconnect,
3889         .session_recovery_timedout = iscsi_session_recovery_timedout,
3890 };
3891
3892 static struct pci_driver beiscsi_pci_driver = {
3893         .name = DRV_NAME,
3894         .probe = beiscsi_dev_probe,
3895         .remove = beiscsi_remove,
3896         .id_table = beiscsi_pci_id_table
3897 };
3898
3899
3900 static int __init beiscsi_module_init(void)
3901 {
3902         int ret;
3903
3904         beiscsi_scsi_transport =
3905                         iscsi_register_transport(&beiscsi_iscsi_transport);
3906         if (!beiscsi_scsi_transport) {
3907                 SE_DEBUG(DBG_LVL_1,
3908                          "beiscsi_module_init - Unable to  register beiscsi"
3909                          "transport.\n");
3910                 ret = -ENOMEM;
3911         }
3912         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3913                  &beiscsi_iscsi_transport);
3914
3915         ret = pci_register_driver(&beiscsi_pci_driver);
3916         if (ret) {
3917                 SE_DEBUG(DBG_LVL_1,
3918                          "beiscsi_module_init - Unable to  register"
3919                          "beiscsi pci driver.\n");
3920                 goto unregister_iscsi_transport;
3921         }
3922         ring_mode = 0;
3923         return 0;
3924
3925 unregister_iscsi_transport:
3926         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3927         return ret;
3928 }
3929
3930 static void __exit beiscsi_module_exit(void)
3931 {
3932         pci_unregister_driver(&beiscsi_pci_driver);
3933         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3934 }
3935
3936 module_init(beiscsi_module_init);
3937 module_exit(beiscsi_module_exit);