[SCSI] be2iscsi: Move freeing of resources to stop_conn
[linux-2.6.git] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
28
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
39
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
43 static unsigned int ring_mode;
44
45 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47 MODULE_AUTHOR("ServerEngines Corporation");
48 MODULE_LICENSE("GPL");
49 module_param(be_iopoll_budget, int, 0);
50 module_param(enable_msix, int, 0);
51 module_param(be_max_phys_size, uint, S_IRUGO);
52 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53                                    "contiguous memory that can be allocated."
54                                    "Range is 16 - 128");
55
56 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 {
58         blk_queue_max_segment_size(sdev->request_queue, 65536);
59         return 0;
60 }
61
62 /*------------------- PCI Driver operations and data ----------------- */
63 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69         { 0 }
70 };
71 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
73 static struct scsi_host_template beiscsi_sht = {
74         .module = THIS_MODULE,
75         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
76         .proc_name = DRV_NAME,
77         .queuecommand = iscsi_queuecommand,
78         .eh_abort_handler = iscsi_eh_abort,
79         .change_queue_depth = iscsi_change_queue_depth,
80         .slave_configure = beiscsi_slave_configure,
81         .target_alloc = iscsi_target_alloc,
82         .eh_device_reset_handler = iscsi_eh_device_reset,
83         .eh_target_reset_handler = iscsi_eh_target_reset,
84         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
85         .can_queue = BE2_IO_DEPTH,
86         .this_id = -1,
87         .max_sectors = BEISCSI_MAX_SECTORS,
88         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
89         .use_clustering = ENABLE_CLUSTERING,
90 };
91
92 static struct scsi_transport_template *beiscsi_scsi_transport;
93
94 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95 {
96         struct beiscsi_hba *phba;
97         struct Scsi_Host *shost;
98
99         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
100         if (!shost) {
101                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
102                         "iscsi_host_alloc failed \n");
103                 return NULL;
104         }
105         shost->dma_boundary = pcidev->dma_mask;
106         shost->max_id = BE2_MAX_SESSIONS;
107         shost->max_channel = 0;
108         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
109         shost->max_lun = BEISCSI_NUM_MAX_LUN;
110         shost->transportt = beiscsi_scsi_transport;
111         phba = iscsi_host_priv(shost);
112         memset(phba, 0, sizeof(*phba));
113         phba->shost = shost;
114         phba->pcidev = pci_dev_get(pcidev);
115
116         if (iscsi_host_add(shost, &phba->pcidev->dev))
117                 goto free_devices;
118         return phba;
119
120 free_devices:
121         pci_dev_put(phba->pcidev);
122         iscsi_host_free(phba->shost);
123         return NULL;
124 }
125
126 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
127 {
128         if (phba->csr_va) {
129                 iounmap(phba->csr_va);
130                 phba->csr_va = NULL;
131         }
132         if (phba->db_va) {
133                 iounmap(phba->db_va);
134                 phba->db_va = NULL;
135         }
136         if (phba->pci_va) {
137                 iounmap(phba->pci_va);
138                 phba->pci_va = NULL;
139         }
140 }
141
142 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143                                 struct pci_dev *pcidev)
144 {
145         u8 __iomem *addr;
146
147         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148                                pci_resource_len(pcidev, 2));
149         if (addr == NULL)
150                 return -ENOMEM;
151         phba->ctrl.csr = addr;
152         phba->csr_va = addr;
153         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
154
155         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
156         if (addr == NULL)
157                 goto pci_map_err;
158         phba->ctrl.db = addr;
159         phba->db_va = addr;
160         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
161
162         addr = ioremap_nocache(pci_resource_start(pcidev, 1),
163                                pci_resource_len(pcidev, 1));
164         if (addr == NULL)
165                 goto pci_map_err;
166         phba->ctrl.pcicfg = addr;
167         phba->pci_va = addr;
168         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
169         return 0;
170
171 pci_map_err:
172         beiscsi_unmap_pci_function(phba);
173         return -ENOMEM;
174 }
175
176 static int beiscsi_enable_pci(struct pci_dev *pcidev)
177 {
178         int ret;
179
180         ret = pci_enable_device(pcidev);
181         if (ret) {
182                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
183                         "failed. Returning -ENODEV\n");
184                 return ret;
185         }
186
187         pci_set_master(pcidev);
188         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
189                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
190                 if (ret) {
191                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
192                         pci_disable_device(pcidev);
193                         return ret;
194                 }
195         }
196         return 0;
197 }
198
199 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
200 {
201         struct be_ctrl_info *ctrl = &phba->ctrl;
202         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
203         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
204         int status = 0;
205
206         ctrl->pdev = pdev;
207         status = beiscsi_map_pci_bars(phba, pdev);
208         if (status)
209                 return status;
210         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
211         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
212                                                   mbox_mem_alloc->size,
213                                                   &mbox_mem_alloc->dma);
214         if (!mbox_mem_alloc->va) {
215                 beiscsi_unmap_pci_function(phba);
216                 status = -ENOMEM;
217                 return status;
218         }
219
220         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
221         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
222         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
223         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
224         spin_lock_init(&ctrl->mbox_lock);
225         spin_lock_init(&phba->ctrl.mcc_lock);
226         spin_lock_init(&phba->ctrl.mcc_cq_lock);
227
228         return status;
229 }
230
231 static void beiscsi_get_params(struct beiscsi_hba *phba)
232 {
233         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
234                                     - (phba->fw_config.iscsi_cid_count
235                                     + BE2_TMFS
236                                     + BE2_NOPOUT_REQ));
237         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
238         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
239         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
240         phba->params.num_sge_per_io = BE2_SGE;
241         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
242         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
243         phba->params.eq_timer = 64;
244         phba->params.num_eq_entries =
245             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
246                                     + BE2_TMFS) / 512) + 1) * 512;
247         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
248                                 ? 1024 : phba->params.num_eq_entries;
249         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
250                              phba->params.num_eq_entries);
251         phba->params.num_cq_entries =
252             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
253                                     + BE2_TMFS) / 512) + 1) * 512;
254         phba->params.wrbs_per_cxn = 256;
255 }
256
257 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
258                            unsigned int id, unsigned int clr_interrupt,
259                            unsigned int num_processed,
260                            unsigned char rearm, unsigned char event)
261 {
262         u32 val = 0;
263         val |= id & DB_EQ_RING_ID_MASK;
264         if (rearm)
265                 val |= 1 << DB_EQ_REARM_SHIFT;
266         if (clr_interrupt)
267                 val |= 1 << DB_EQ_CLR_SHIFT;
268         if (event)
269                 val |= 1 << DB_EQ_EVNT_SHIFT;
270         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
271         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
272 }
273
274 /**
275  * be_isr_mcc - The isr routine of the driver.
276  * @irq: Not used
277  * @dev_id: Pointer to host adapter structure
278  */
279 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
280 {
281         struct beiscsi_hba *phba;
282         struct be_eq_entry *eqe = NULL;
283         struct be_queue_info *eq;
284         struct be_queue_info *mcc;
285         unsigned int num_eq_processed;
286         struct be_eq_obj *pbe_eq;
287         unsigned long flags;
288
289         pbe_eq = dev_id;
290         eq = &pbe_eq->q;
291         phba =  pbe_eq->phba;
292         mcc = &phba->ctrl.mcc_obj.cq;
293         eqe = queue_tail_node(eq);
294         if (!eqe)
295                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
296
297         num_eq_processed = 0;
298
299         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
300                                 & EQE_VALID_MASK) {
301                 if (((eqe->dw[offsetof(struct amap_eq_entry,
302                      resource_id) / 32] &
303                      EQE_RESID_MASK) >> 16) == mcc->id) {
304                         spin_lock_irqsave(&phba->isr_lock, flags);
305                         phba->todo_mcc_cq = 1;
306                         spin_unlock_irqrestore(&phba->isr_lock, flags);
307                 }
308                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
309                 queue_tail_inc(eq);
310                 eqe = queue_tail_node(eq);
311                 num_eq_processed++;
312         }
313         if (phba->todo_mcc_cq)
314                 queue_work(phba->wq, &phba->work_cqs);
315         if (num_eq_processed)
316                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
317
318         return IRQ_HANDLED;
319 }
320
321 /**
322  * be_isr_msix - The isr routine of the driver.
323  * @irq: Not used
324  * @dev_id: Pointer to host adapter structure
325  */
326 static irqreturn_t be_isr_msix(int irq, void *dev_id)
327 {
328         struct beiscsi_hba *phba;
329         struct be_eq_entry *eqe = NULL;
330         struct be_queue_info *eq;
331         struct be_queue_info *cq;
332         unsigned int num_eq_processed;
333         struct be_eq_obj *pbe_eq;
334         unsigned long flags;
335
336         pbe_eq = dev_id;
337         eq = &pbe_eq->q;
338         cq = pbe_eq->cq;
339         eqe = queue_tail_node(eq);
340         if (!eqe)
341                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
342
343         phba = pbe_eq->phba;
344         num_eq_processed = 0;
345         if (blk_iopoll_enabled) {
346                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
347                                         & EQE_VALID_MASK) {
348                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
349                                 blk_iopoll_sched(&pbe_eq->iopoll);
350
351                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
352                         queue_tail_inc(eq);
353                         eqe = queue_tail_node(eq);
354                         num_eq_processed++;
355                 }
356                 if (num_eq_processed)
357                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
358
359                 return IRQ_HANDLED;
360         } else {
361                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
362                                                 & EQE_VALID_MASK) {
363                         spin_lock_irqsave(&phba->isr_lock, flags);
364                         phba->todo_cq = 1;
365                         spin_unlock_irqrestore(&phba->isr_lock, flags);
366                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
367                         queue_tail_inc(eq);
368                         eqe = queue_tail_node(eq);
369                         num_eq_processed++;
370                 }
371                 if (phba->todo_cq)
372                         queue_work(phba->wq, &phba->work_cqs);
373
374                 if (num_eq_processed)
375                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
376
377                 return IRQ_HANDLED;
378         }
379 }
380
381 /**
382  * be_isr - The isr routine of the driver.
383  * @irq: Not used
384  * @dev_id: Pointer to host adapter structure
385  */
386 static irqreturn_t be_isr(int irq, void *dev_id)
387 {
388         struct beiscsi_hba *phba;
389         struct hwi_controller *phwi_ctrlr;
390         struct hwi_context_memory *phwi_context;
391         struct be_eq_entry *eqe = NULL;
392         struct be_queue_info *eq;
393         struct be_queue_info *cq;
394         struct be_queue_info *mcc;
395         unsigned long flags, index;
396         unsigned int num_mcceq_processed, num_ioeq_processed;
397         struct be_ctrl_info *ctrl;
398         struct be_eq_obj *pbe_eq;
399         int isr;
400
401         phba = dev_id;
402         ctrl = &phba->ctrl;;
403         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
404                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
405         if (!isr)
406                 return IRQ_NONE;
407
408         phwi_ctrlr = phba->phwi_ctrlr;
409         phwi_context = phwi_ctrlr->phwi_ctxt;
410         pbe_eq = &phwi_context->be_eq[0];
411
412         eq = &phwi_context->be_eq[0].q;
413         mcc = &phba->ctrl.mcc_obj.cq;
414         index = 0;
415         eqe = queue_tail_node(eq);
416         if (!eqe)
417                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
418
419         num_ioeq_processed = 0;
420         num_mcceq_processed = 0;
421         if (blk_iopoll_enabled) {
422                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
423                                         & EQE_VALID_MASK) {
424                         if (((eqe->dw[offsetof(struct amap_eq_entry,
425                              resource_id) / 32] &
426                              EQE_RESID_MASK) >> 16) == mcc->id) {
427                                 spin_lock_irqsave(&phba->isr_lock, flags);
428                                 phba->todo_mcc_cq = 1;
429                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
430                                 num_mcceq_processed++;
431                         } else {
432                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
433                                         blk_iopoll_sched(&pbe_eq->iopoll);
434                                 num_ioeq_processed++;
435                         }
436                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
437                         queue_tail_inc(eq);
438                         eqe = queue_tail_node(eq);
439                 }
440                 if (num_ioeq_processed || num_mcceq_processed) {
441                         if (phba->todo_mcc_cq)
442                                 queue_work(phba->wq, &phba->work_cqs);
443
444                 if ((num_mcceq_processed) && (!num_ioeq_processed))
445                                 hwi_ring_eq_db(phba, eq->id, 0,
446                                               (num_ioeq_processed +
447                                                num_mcceq_processed) , 1, 1);
448                         else
449                                 hwi_ring_eq_db(phba, eq->id, 0,
450                                                (num_ioeq_processed +
451                                                 num_mcceq_processed), 0, 1);
452
453                         return IRQ_HANDLED;
454                 } else
455                         return IRQ_NONE;
456         } else {
457                 cq = &phwi_context->be_cq[0];
458                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
459                                                 & EQE_VALID_MASK) {
460
461                         if (((eqe->dw[offsetof(struct amap_eq_entry,
462                              resource_id) / 32] &
463                              EQE_RESID_MASK) >> 16) != cq->id) {
464                                 spin_lock_irqsave(&phba->isr_lock, flags);
465                                 phba->todo_mcc_cq = 1;
466                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
467                         } else {
468                                 spin_lock_irqsave(&phba->isr_lock, flags);
469                                 phba->todo_cq = 1;
470                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
471                         }
472                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
473                         queue_tail_inc(eq);
474                         eqe = queue_tail_node(eq);
475                         num_ioeq_processed++;
476                 }
477                 if (phba->todo_cq || phba->todo_mcc_cq)
478                         queue_work(phba->wq, &phba->work_cqs);
479
480                 if (num_ioeq_processed) {
481                         hwi_ring_eq_db(phba, eq->id, 0,
482                                        num_ioeq_processed, 1, 1);
483                         return IRQ_HANDLED;
484                 } else
485                         return IRQ_NONE;
486         }
487 }
488
489 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
490 {
491         struct pci_dev *pcidev = phba->pcidev;
492         struct hwi_controller *phwi_ctrlr;
493         struct hwi_context_memory *phwi_context;
494         int ret, msix_vec, i = 0;
495         char desc[32];
496
497         phwi_ctrlr = phba->phwi_ctrlr;
498         phwi_context = phwi_ctrlr->phwi_ctxt;
499
500         if (phba->msix_enabled) {
501                 for (i = 0; i < phba->num_cpus; i++) {
502                         sprintf(desc, "beiscsi_msix_%04x", i);
503                         msix_vec = phba->msix_entries[i].vector;
504                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
505                                           &phwi_context->be_eq[i]);
506                 }
507                 msix_vec = phba->msix_entries[i].vector;
508                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
509                                   &phwi_context->be_eq[i]);
510         } else {
511                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
512                                   "beiscsi", phba);
513                 if (ret) {
514                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
515                                      "Failed to register irq\\n");
516                         return ret;
517                 }
518         }
519         return 0;
520 }
521
522 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
523                            unsigned int id, unsigned int num_processed,
524                            unsigned char rearm, unsigned char event)
525 {
526         u32 val = 0;
527         val |= id & DB_CQ_RING_ID_MASK;
528         if (rearm)
529                 val |= 1 << DB_CQ_REARM_SHIFT;
530         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
531         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
532 }
533
534 static unsigned int
535 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
536                           struct beiscsi_hba *phba,
537                           unsigned short cid,
538                           struct pdu_base *ppdu,
539                           unsigned long pdu_len,
540                           void *pbuffer, unsigned long buf_len)
541 {
542         struct iscsi_conn *conn = beiscsi_conn->conn;
543         struct iscsi_session *session = conn->session;
544         struct iscsi_task *task;
545         struct beiscsi_io_task *io_task;
546         struct iscsi_hdr *login_hdr;
547
548         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
549                                                 PDUBASE_OPCODE_MASK) {
550         case ISCSI_OP_NOOP_IN:
551                 pbuffer = NULL;
552                 buf_len = 0;
553                 break;
554         case ISCSI_OP_ASYNC_EVENT:
555                 break;
556         case ISCSI_OP_REJECT:
557                 WARN_ON(!pbuffer);
558                 WARN_ON(!(buf_len == 48));
559                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
560                 break;
561         case ISCSI_OP_LOGIN_RSP:
562                 task = conn->login_task;
563                 io_task = task->dd_data;
564                 login_hdr = (struct iscsi_hdr *)ppdu;
565                 login_hdr->itt = io_task->libiscsi_itt;
566                 break;
567         default:
568                 shost_printk(KERN_WARNING, phba->shost,
569                              "Unrecognized opcode 0x%x in async msg \n",
570                              (ppdu->
571                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
572                                                 & PDUBASE_OPCODE_MASK));
573                 return 1;
574         }
575
576         spin_lock_bh(&session->lock);
577         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
578         spin_unlock_bh(&session->lock);
579         return 0;
580 }
581
582 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
583 {
584         struct sgl_handle *psgl_handle;
585
586         if (phba->io_sgl_hndl_avbl) {
587                 SE_DEBUG(DBG_LVL_8,
588                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
589                          phba->io_sgl_alloc_index);
590                 psgl_handle = phba->io_sgl_hndl_base[phba->
591                                                 io_sgl_alloc_index];
592                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
593                 phba->io_sgl_hndl_avbl--;
594                 if (phba->io_sgl_alloc_index == (phba->params.
595                                                  ios_per_ctrl - 1))
596                         phba->io_sgl_alloc_index = 0;
597                 else
598                         phba->io_sgl_alloc_index++;
599         } else
600                 psgl_handle = NULL;
601         return psgl_handle;
602 }
603
604 static void
605 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
606 {
607         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
608                  phba->io_sgl_free_index);
609         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
610                 /*
611                  * this can happen if clean_task is called on a task that
612                  * failed in xmit_task or alloc_pdu.
613                  */
614                  SE_DEBUG(DBG_LVL_8,
615                          "Double Free in IO SGL io_sgl_free_index=%d,"
616                          "value there=%p \n", phba->io_sgl_free_index,
617                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
618                 return;
619         }
620         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
621         phba->io_sgl_hndl_avbl++;
622         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
623                 phba->io_sgl_free_index = 0;
624         else
625                 phba->io_sgl_free_index++;
626 }
627
628 /**
629  * alloc_wrb_handle - To allocate a wrb handle
630  * @phba: The hba pointer
631  * @cid: The cid to use for allocation
632  * @index: index allocation and wrb index
633  *
634  * This happens under session_lock until submission to chip
635  */
636 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
637                                     int index)
638 {
639         struct hwi_wrb_context *pwrb_context;
640         struct hwi_controller *phwi_ctrlr;
641         struct wrb_handle *pwrb_handle;
642
643         phwi_ctrlr = phba->phwi_ctrlr;
644         pwrb_context = &phwi_ctrlr->wrb_context[cid];
645         if (pwrb_context->wrb_handles_available) {
646                 pwrb_handle = pwrb_context->pwrb_handle_base[
647                                             pwrb_context->alloc_index];
648                 pwrb_context->wrb_handles_available--;
649                 pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
650                 if (pwrb_context->alloc_index ==
651                                                 (phba->params.wrbs_per_cxn - 1))
652                         pwrb_context->alloc_index = 0;
653                 else
654                         pwrb_context->alloc_index++;
655         } else
656                 pwrb_handle = NULL;
657         return pwrb_handle;
658 }
659
660 /**
661  * free_wrb_handle - To free the wrb handle back to pool
662  * @phba: The hba pointer
663  * @pwrb_context: The context to free from
664  * @pwrb_handle: The wrb_handle to free
665  *
666  * This happens under session_lock until submission to chip
667  */
668 static void
669 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
670                 struct wrb_handle *pwrb_handle)
671 {
672         if (!ring_mode)
673                 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
674                                                pwrb_handle;
675         pwrb_context->wrb_handles_available++;
676         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
677                 pwrb_context->free_index = 0;
678         else
679                 pwrb_context->free_index++;
680
681         SE_DEBUG(DBG_LVL_8,
682                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
683                  "wrb_handles_available=%d \n",
684                  pwrb_handle, pwrb_context->free_index,
685                  pwrb_context->wrb_handles_available);
686 }
687
688 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
689 {
690         struct sgl_handle *psgl_handle;
691
692         if (phba->eh_sgl_hndl_avbl) {
693                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
694                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
695                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
696                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
697                 phba->eh_sgl_hndl_avbl--;
698                 if (phba->eh_sgl_alloc_index ==
699                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
700                      1))
701                         phba->eh_sgl_alloc_index = 0;
702                 else
703                         phba->eh_sgl_alloc_index++;
704         } else
705                 psgl_handle = NULL;
706         return psgl_handle;
707 }
708
709 void
710 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
711 {
712
713         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
714                              phba->eh_sgl_free_index);
715         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
716                 /*
717                  * this can happen if clean_task is called on a task that
718                  * failed in xmit_task or alloc_pdu.
719                  */
720                 SE_DEBUG(DBG_LVL_8,
721                          "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
722                          phba->eh_sgl_free_index);
723                 return;
724         }
725         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
726         phba->eh_sgl_hndl_avbl++;
727         if (phba->eh_sgl_free_index ==
728             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
729                 phba->eh_sgl_free_index = 0;
730         else
731                 phba->eh_sgl_free_index++;
732 }
733
734 static void
735 be_complete_io(struct beiscsi_conn *beiscsi_conn,
736                struct iscsi_task *task, struct sol_cqe *psol)
737 {
738         struct beiscsi_io_task *io_task = task->dd_data;
739         struct be_status_bhs *sts_bhs =
740                                 (struct be_status_bhs *)io_task->cmd_bhs;
741         struct iscsi_conn *conn = beiscsi_conn->conn;
742         unsigned int sense_len;
743         unsigned char *sense;
744         u32 resid = 0, exp_cmdsn, max_cmdsn;
745         u8 rsp, status, flags;
746
747         exp_cmdsn = (psol->
748                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
749                         & SOL_EXP_CMD_SN_MASK);
750         max_cmdsn = ((psol->
751                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
752                         & SOL_EXP_CMD_SN_MASK) +
753                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
754                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
755         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
756                                                 & SOL_RESP_MASK) >> 16);
757         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
758                                                 & SOL_STS_MASK) >> 8);
759         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
760                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
761
762         task->sc->result = (DID_OK << 16) | status;
763         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
764                 task->sc->result = DID_ERROR << 16;
765                 goto unmap;
766         }
767
768         /* bidi not initially supported */
769         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
770                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
771                                 32] & SOL_RES_CNT_MASK);
772
773                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
774                         task->sc->result = DID_ERROR << 16;
775
776                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
777                         scsi_set_resid(task->sc, resid);
778                         if (!status && (scsi_bufflen(task->sc) - resid <
779                             task->sc->underflow))
780                                 task->sc->result = DID_ERROR << 16;
781                 }
782         }
783
784         if (status == SAM_STAT_CHECK_CONDITION) {
785                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
786                 sense = sts_bhs->sense_info + sizeof(unsigned short);
787                 sense_len =  cpu_to_be16(*slen);
788                 memcpy(task->sc->sense_buffer, sense,
789                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
790         }
791         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
792                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
793                                                         & SOL_RES_CNT_MASK)
794                          conn->rxdata_octets += (psol->
795                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
796                              & SOL_RES_CNT_MASK);
797         }
798 unmap:
799         scsi_dma_unmap(io_task->scsi_cmnd);
800         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
801 }
802
803 static void
804 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
805                    struct iscsi_task *task, struct sol_cqe *psol)
806 {
807         struct iscsi_logout_rsp *hdr;
808         struct beiscsi_io_task *io_task = task->dd_data;
809         struct iscsi_conn *conn = beiscsi_conn->conn;
810
811         hdr = (struct iscsi_logout_rsp *)task->hdr;
812         hdr->t2wait = 5;
813         hdr->t2retain = 0;
814         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
815                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
816         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
817                                         32] & SOL_RESP_MASK);
818         hdr->exp_cmdsn = cpu_to_be32(psol->
819                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
820                                         & SOL_EXP_CMD_SN_MASK);
821         hdr->max_cmdsn = be32_to_cpu((psol->
822                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
823                                         & SOL_EXP_CMD_SN_MASK) +
824                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
825                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
826         hdr->hlength = 0;
827         hdr->itt = io_task->libiscsi_itt;
828         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
829 }
830
831 static void
832 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
833                 struct iscsi_task *task, struct sol_cqe *psol)
834 {
835         struct iscsi_tm_rsp *hdr;
836         struct iscsi_conn *conn = beiscsi_conn->conn;
837         struct beiscsi_io_task *io_task = task->dd_data;
838
839         hdr = (struct iscsi_tm_rsp *)task->hdr;
840         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
841                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
842         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
843                                         32] & SOL_RESP_MASK);
844         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
845                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
846         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
847                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
848                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
849                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
850         hdr->itt = io_task->libiscsi_itt;
851         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
852 }
853
854 static void
855 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
856                        struct beiscsi_hba *phba, struct sol_cqe *psol)
857 {
858         struct hwi_wrb_context *pwrb_context;
859         struct wrb_handle *pwrb_handle = NULL;
860         struct sgl_handle *psgl_handle = NULL;
861         struct hwi_controller *phwi_ctrlr;
862         struct iscsi_task *task;
863         struct beiscsi_io_task *io_task;
864         struct iscsi_conn *conn = beiscsi_conn->conn;
865         struct iscsi_session *session = conn->session;
866
867         phwi_ctrlr = phba->phwi_ctrlr;
868         if (ring_mode) {
869                 psgl_handle = phba->sgl_hndl_array[((psol->
870                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
871                                 32] & SOL_ICD_INDEX_MASK) >> 6)];
872                 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
873                 task = psgl_handle->task;
874                 pwrb_handle = NULL;
875         } else {
876                 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
877                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
878                                 SOL_CID_MASK) >> 6) -
879                                 phba->fw_config.iscsi_cid_start];
880                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
881                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
882                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
883                 task = pwrb_handle->pio_handle;
884         }
885
886         io_task = task->dd_data;
887         spin_lock(&phba->mgmt_sgl_lock);
888         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
889         spin_unlock(&phba->mgmt_sgl_lock);
890         spin_lock_bh(&session->lock);
891         free_wrb_handle(phba, pwrb_context, pwrb_handle);
892         spin_unlock_bh(&session->lock);
893 }
894
895 static void
896 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
897                        struct iscsi_task *task, struct sol_cqe *psol)
898 {
899         struct iscsi_nopin *hdr;
900         struct iscsi_conn *conn = beiscsi_conn->conn;
901         struct beiscsi_io_task *io_task = task->dd_data;
902
903         hdr = (struct iscsi_nopin *)task->hdr;
904         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
905                         & SOL_FLAGS_MASK) >> 24) | 0x80;
906         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
907                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
908         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
909                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
910                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
911                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
912         hdr->opcode = ISCSI_OP_NOOP_IN;
913         hdr->itt = io_task->libiscsi_itt;
914         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
915 }
916
917 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
918                              struct beiscsi_hba *phba, struct sol_cqe *psol)
919 {
920         struct hwi_wrb_context *pwrb_context;
921         struct wrb_handle *pwrb_handle;
922         struct iscsi_wrb *pwrb = NULL;
923         struct hwi_controller *phwi_ctrlr;
924         struct iscsi_task *task;
925         struct sgl_handle *psgl_handle = NULL;
926         unsigned int type;
927         struct iscsi_conn *conn = beiscsi_conn->conn;
928         struct iscsi_session *session = conn->session;
929
930         phwi_ctrlr = phba->phwi_ctrlr;
931         if (ring_mode) {
932                 psgl_handle = phba->sgl_hndl_array[((psol->
933                               dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
934                               32] & SOL_ICD_INDEX_MASK) >> 6)];
935                 task = psgl_handle->task;
936                 type = psgl_handle->type;
937         } else {
938                 pwrb_context = &phwi_ctrlr->
939                                 wrb_context[((psol->dw[offsetof
940                                 (struct amap_sol_cqe, cid) / 32]
941                                 & SOL_CID_MASK) >> 6) -
942                                 phba->fw_config.iscsi_cid_start];
943                 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
944                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
945                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
946                 task = pwrb_handle->pio_handle;
947                 pwrb = pwrb_handle->pwrb;
948                 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
949                          WRB_TYPE_MASK) >> 28;
950         }
951         spin_lock_bh(&session->lock);
952         switch (type) {
953         case HWH_TYPE_IO:
954         case HWH_TYPE_IO_RD:
955                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
956                     ISCSI_OP_NOOP_OUT) {
957                         be_complete_nopin_resp(beiscsi_conn, task, psol);
958                 } else
959                         be_complete_io(beiscsi_conn, task, psol);
960                 break;
961
962         case HWH_TYPE_LOGOUT:
963                 be_complete_logout(beiscsi_conn, task, psol);
964                 break;
965
966         case HWH_TYPE_LOGIN:
967                 SE_DEBUG(DBG_LVL_1,
968                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
969                          "- Solicited path \n");
970                 break;
971
972         case HWH_TYPE_TMF:
973                 be_complete_tmf(beiscsi_conn, task, psol);
974                 break;
975
976         case HWH_TYPE_NOP:
977                 be_complete_nopin_resp(beiscsi_conn, task, psol);
978                 break;
979
980         default:
981                 if (ring_mode)
982                         shost_printk(KERN_WARNING, phba->shost,
983                                 "In hwi_complete_cmd, unknown type = %d"
984                                 "icd_index 0x%x CID 0x%x\n", type,
985                                 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
986                                 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
987                                 psgl_handle->cid);
988                 else
989                         shost_printk(KERN_WARNING, phba->shost,
990                                 "In hwi_complete_cmd, unknown type = %d"
991                                 "wrb_index 0x%x CID 0x%x\n", type,
992                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
993                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
994                                 ((psol->dw[offsetof(struct amap_sol_cqe,
995                                 cid) / 32] & SOL_CID_MASK) >> 6));
996                 break;
997         }
998
999         spin_unlock_bh(&session->lock);
1000 }
1001
1002 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1003                                           *pasync_ctx, unsigned int is_header,
1004                                           unsigned int host_write_ptr)
1005 {
1006         if (is_header)
1007                 return &pasync_ctx->async_entry[host_write_ptr].
1008                     header_busy_list;
1009         else
1010                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1011 }
1012
1013 static struct async_pdu_handle *
1014 hwi_get_async_handle(struct beiscsi_hba *phba,
1015                      struct beiscsi_conn *beiscsi_conn,
1016                      struct hwi_async_pdu_context *pasync_ctx,
1017                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1018 {
1019         struct be_bus_address phys_addr;
1020         struct list_head *pbusy_list;
1021         struct async_pdu_handle *pasync_handle = NULL;
1022         int buffer_len = 0;
1023         unsigned char buffer_index = -1;
1024         unsigned char is_header = 0;
1025
1026         phys_addr.u.a32.address_lo =
1027             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1028             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1029                                                 & PDUCQE_DPL_MASK) >> 16);
1030         phys_addr.u.a32.address_hi =
1031             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1032
1033         phys_addr.u.a64.address =
1034                         *((unsigned long long *)(&phys_addr.u.a64.address));
1035
1036         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1037                         & PDUCQE_CODE_MASK) {
1038         case UNSOL_HDR_NOTIFY:
1039                 is_header = 1;
1040
1041                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1042                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1043                         index) / 32] & PDUCQE_INDEX_MASK));
1044
1045                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1046                                 pasync_ctx->async_header.pa_base.u.a64.address);
1047
1048                 buffer_index = buffer_len /
1049                                 pasync_ctx->async_header.buffer_size;
1050
1051                 break;
1052         case UNSOL_DATA_NOTIFY:
1053                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1054                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1055                                         index) / 32] & PDUCQE_INDEX_MASK));
1056                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1057                                         pasync_ctx->async_data.pa_base.u.
1058                                         a64.address);
1059                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1060                 break;
1061         default:
1062                 pbusy_list = NULL;
1063                 shost_printk(KERN_WARNING, phba->shost,
1064                         "Unexpected code=%d \n",
1065                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1066                                         code) / 32] & PDUCQE_CODE_MASK);
1067                 return NULL;
1068         }
1069
1070         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1071         WARN_ON(list_empty(pbusy_list));
1072         list_for_each_entry(pasync_handle, pbusy_list, link) {
1073                 WARN_ON(pasync_handle->consumed);
1074                 if (pasync_handle->index == buffer_index)
1075                         break;
1076         }
1077
1078         WARN_ON(!pasync_handle);
1079
1080         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1081                                              phba->fw_config.iscsi_cid_start;
1082         pasync_handle->is_header = is_header;
1083         pasync_handle->buffer_len = ((pdpdu_cqe->
1084                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1085                         & PDUCQE_DPL_MASK) >> 16);
1086
1087         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1088                         index) / 32] & PDUCQE_INDEX_MASK);
1089         return pasync_handle;
1090 }
1091
1092 static unsigned int
1093 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1094                            unsigned int is_header, unsigned int cq_index)
1095 {
1096         struct list_head *pbusy_list;
1097         struct async_pdu_handle *pasync_handle;
1098         unsigned int num_entries, writables = 0;
1099         unsigned int *pep_read_ptr, *pwritables;
1100
1101
1102         if (is_header) {
1103                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1104                 pwritables = &pasync_ctx->async_header.writables;
1105                 num_entries = pasync_ctx->async_header.num_entries;
1106         } else {
1107                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1108                 pwritables = &pasync_ctx->async_data.writables;
1109                 num_entries = pasync_ctx->async_data.num_entries;
1110         }
1111
1112         while ((*pep_read_ptr) != cq_index) {
1113                 (*pep_read_ptr)++;
1114                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1115
1116                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1117                                                      *pep_read_ptr);
1118                 if (writables == 0)
1119                         WARN_ON(list_empty(pbusy_list));
1120
1121                 if (!list_empty(pbusy_list)) {
1122                         pasync_handle = list_entry(pbusy_list->next,
1123                                                    struct async_pdu_handle,
1124                                                    link);
1125                         WARN_ON(!pasync_handle);
1126                         pasync_handle->consumed = 1;
1127                 }
1128
1129                 writables++;
1130         }
1131
1132         if (!writables) {
1133                 SE_DEBUG(DBG_LVL_1,
1134                          "Duplicate notification received - index 0x%x!!\n",
1135                          cq_index);
1136                 WARN_ON(1);
1137         }
1138
1139         *pwritables = *pwritables + writables;
1140         return 0;
1141 }
1142
1143 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1144                                        unsigned int cri)
1145 {
1146         struct hwi_controller *phwi_ctrlr;
1147         struct hwi_async_pdu_context *pasync_ctx;
1148         struct async_pdu_handle *pasync_handle, *tmp_handle;
1149         struct list_head *plist;
1150         unsigned int i = 0;
1151
1152         phwi_ctrlr = phba->phwi_ctrlr;
1153         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1154
1155         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1156
1157         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1158                 list_del(&pasync_handle->link);
1159
1160                 if (i == 0) {
1161                         list_add_tail(&pasync_handle->link,
1162                                       &pasync_ctx->async_header.free_list);
1163                         pasync_ctx->async_header.free_entries++;
1164                         i++;
1165                 } else {
1166                         list_add_tail(&pasync_handle->link,
1167                                       &pasync_ctx->async_data.free_list);
1168                         pasync_ctx->async_data.free_entries++;
1169                         i++;
1170                 }
1171         }
1172
1173         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1174         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1175         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1176         return 0;
1177 }
1178
1179 static struct phys_addr *
1180 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1181                      unsigned int is_header, unsigned int host_write_ptr)
1182 {
1183         struct phys_addr *pasync_sge = NULL;
1184
1185         if (is_header)
1186                 pasync_sge = pasync_ctx->async_header.ring_base;
1187         else
1188                 pasync_sge = pasync_ctx->async_data.ring_base;
1189
1190         return pasync_sge + host_write_ptr;
1191 }
1192
1193 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1194                                    unsigned int is_header)
1195 {
1196         struct hwi_controller *phwi_ctrlr;
1197         struct hwi_async_pdu_context *pasync_ctx;
1198         struct async_pdu_handle *pasync_handle;
1199         struct list_head *pfree_link, *pbusy_list;
1200         struct phys_addr *pasync_sge;
1201         unsigned int ring_id, num_entries;
1202         unsigned int host_write_num;
1203         unsigned int writables;
1204         unsigned int i = 0;
1205         u32 doorbell = 0;
1206
1207         phwi_ctrlr = phba->phwi_ctrlr;
1208         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1209
1210         if (is_header) {
1211                 num_entries = pasync_ctx->async_header.num_entries;
1212                 writables = min(pasync_ctx->async_header.writables,
1213                                 pasync_ctx->async_header.free_entries);
1214                 pfree_link = pasync_ctx->async_header.free_list.next;
1215                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1216                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1217         } else {
1218                 num_entries = pasync_ctx->async_data.num_entries;
1219                 writables = min(pasync_ctx->async_data.writables,
1220                                 pasync_ctx->async_data.free_entries);
1221                 pfree_link = pasync_ctx->async_data.free_list.next;
1222                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1223                 ring_id = phwi_ctrlr->default_pdu_data.id;
1224         }
1225
1226         writables = (writables / 8) * 8;
1227         if (writables) {
1228                 for (i = 0; i < writables; i++) {
1229                         pbusy_list =
1230                             hwi_get_async_busy_list(pasync_ctx, is_header,
1231                                                     host_write_num);
1232                         pasync_handle =
1233                             list_entry(pfree_link, struct async_pdu_handle,
1234                                                                 link);
1235                         WARN_ON(!pasync_handle);
1236                         pasync_handle->consumed = 0;
1237
1238                         pfree_link = pfree_link->next;
1239
1240                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1241                                                 is_header, host_write_num);
1242
1243                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1244                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1245
1246                         list_move(&pasync_handle->link, pbusy_list);
1247
1248                         host_write_num++;
1249                         host_write_num = host_write_num % num_entries;
1250                 }
1251
1252                 if (is_header) {
1253                         pasync_ctx->async_header.host_write_ptr =
1254                                                         host_write_num;
1255                         pasync_ctx->async_header.free_entries -= writables;
1256                         pasync_ctx->async_header.writables -= writables;
1257                         pasync_ctx->async_header.busy_entries += writables;
1258                 } else {
1259                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1260                         pasync_ctx->async_data.free_entries -= writables;
1261                         pasync_ctx->async_data.writables -= writables;
1262                         pasync_ctx->async_data.busy_entries += writables;
1263                 }
1264
1265                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1266                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1267                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1268                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1269                                         << DB_DEF_PDU_CQPROC_SHIFT;
1270
1271                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1272         }
1273 }
1274
1275 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1276                                          struct beiscsi_conn *beiscsi_conn,
1277                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1278 {
1279         struct hwi_controller *phwi_ctrlr;
1280         struct hwi_async_pdu_context *pasync_ctx;
1281         struct async_pdu_handle *pasync_handle = NULL;
1282         unsigned int cq_index = -1;
1283
1284         phwi_ctrlr = phba->phwi_ctrlr;
1285         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1286
1287         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1288                                              pdpdu_cqe, &cq_index);
1289         BUG_ON(pasync_handle->is_header != 0);
1290         if (pasync_handle->consumed == 0)
1291                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1292                                            cq_index);
1293
1294         hwi_free_async_msg(phba, pasync_handle->cri);
1295         hwi_post_async_buffers(phba, pasync_handle->is_header);
1296 }
1297
1298 static unsigned int
1299 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1300                   struct beiscsi_hba *phba,
1301                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1302 {
1303         struct list_head *plist;
1304         struct async_pdu_handle *pasync_handle;
1305         void *phdr = NULL;
1306         unsigned int hdr_len = 0, buf_len = 0;
1307         unsigned int status, index = 0, offset = 0;
1308         void *pfirst_buffer = NULL;
1309         unsigned int num_buf = 0;
1310
1311         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1312
1313         list_for_each_entry(pasync_handle, plist, link) {
1314                 if (index == 0) {
1315                         phdr = pasync_handle->pbuffer;
1316                         hdr_len = pasync_handle->buffer_len;
1317                 } else {
1318                         buf_len = pasync_handle->buffer_len;
1319                         if (!num_buf) {
1320                                 pfirst_buffer = pasync_handle->pbuffer;
1321                                 num_buf++;
1322                         }
1323                         memcpy(pfirst_buffer + offset,
1324                                pasync_handle->pbuffer, buf_len);
1325                         offset = buf_len;
1326                 }
1327                 index++;
1328         }
1329
1330         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1331                                            (beiscsi_conn->beiscsi_conn_cid -
1332                                             phba->fw_config.iscsi_cid_start),
1333                                             phdr, hdr_len, pfirst_buffer,
1334                                             buf_len);
1335
1336         if (status == 0)
1337                 hwi_free_async_msg(phba, cri);
1338         return 0;
1339 }
1340
1341 static unsigned int
1342 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1343                      struct beiscsi_hba *phba,
1344                      struct async_pdu_handle *pasync_handle)
1345 {
1346         struct hwi_async_pdu_context *pasync_ctx;
1347         struct hwi_controller *phwi_ctrlr;
1348         unsigned int bytes_needed = 0, status = 0;
1349         unsigned short cri = pasync_handle->cri;
1350         struct pdu_base *ppdu;
1351
1352         phwi_ctrlr = phba->phwi_ctrlr;
1353         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1354
1355         list_del(&pasync_handle->link);
1356         if (pasync_handle->is_header) {
1357                 pasync_ctx->async_header.busy_entries--;
1358                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1359                         hwi_free_async_msg(phba, cri);
1360                         BUG();
1361                 }
1362
1363                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1364                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1365                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1366                                 (unsigned short)pasync_handle->buffer_len;
1367                 list_add_tail(&pasync_handle->link,
1368                               &pasync_ctx->async_entry[cri].wait_queue.list);
1369
1370                 ppdu = pasync_handle->pbuffer;
1371                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1372                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1373                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1374                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1375                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1376
1377                 if (status == 0) {
1378                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1379                             bytes_needed;
1380
1381                         if (bytes_needed == 0)
1382                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1383                                                            pasync_ctx, cri);
1384                 }
1385         } else {
1386                 pasync_ctx->async_data.busy_entries--;
1387                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1388                         list_add_tail(&pasync_handle->link,
1389                                       &pasync_ctx->async_entry[cri].wait_queue.
1390                                       list);
1391                         pasync_ctx->async_entry[cri].wait_queue.
1392                                 bytes_received +=
1393                                 (unsigned short)pasync_handle->buffer_len;
1394
1395                         if (pasync_ctx->async_entry[cri].wait_queue.
1396                             bytes_received >=
1397                             pasync_ctx->async_entry[cri].wait_queue.
1398                             bytes_needed)
1399                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1400                                                            pasync_ctx, cri);
1401                 }
1402         }
1403         return status;
1404 }
1405
1406 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1407                                          struct beiscsi_hba *phba,
1408                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1409 {
1410         struct hwi_controller *phwi_ctrlr;
1411         struct hwi_async_pdu_context *pasync_ctx;
1412         struct async_pdu_handle *pasync_handle = NULL;
1413         unsigned int cq_index = -1;
1414
1415         phwi_ctrlr = phba->phwi_ctrlr;
1416         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1417         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1418                                              pdpdu_cqe, &cq_index);
1419
1420         if (pasync_handle->consumed == 0)
1421                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1422                                            cq_index);
1423         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1424         hwi_post_async_buffers(phba, pasync_handle->is_header);
1425 }
1426
1427
1428 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1429 {
1430         struct be_queue_info *cq;
1431         struct sol_cqe *sol;
1432         struct dmsg_cqe *dmsg;
1433         unsigned int num_processed = 0;
1434         unsigned int tot_nump = 0;
1435         struct beiscsi_conn *beiscsi_conn;
1436         struct sgl_handle *psgl_handle = NULL;
1437         struct beiscsi_endpoint *beiscsi_ep;
1438         struct iscsi_endpoint *ep;
1439         struct beiscsi_hba *phba;
1440
1441         cq = pbe_eq->cq;
1442         sol = queue_tail_node(cq);
1443         phba = pbe_eq->phba;
1444
1445         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1446                CQE_VALID_MASK) {
1447                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1448
1449                 if (ring_mode) {
1450                         psgl_handle = phba->sgl_hndl_array[((sol->
1451                                       dw[offsetof(struct amap_sol_cqe_ring,
1452                                       icd_index) / 32] & SOL_ICD_INDEX_MASK)
1453                                       >> 6)];
1454                         ep = phba->ep_array[psgl_handle->cid];
1455                 } else {
1456                         ep = phba->ep_array[(u32) ((sol->
1457                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1458                                    SOL_CID_MASK) >> 6) -
1459                                    phba->fw_config.iscsi_cid_start];
1460                 }
1461                 beiscsi_ep = ep->dd_data;
1462                 beiscsi_conn = beiscsi_ep->conn;
1463                 if (num_processed >= 32) {
1464                         hwi_ring_cq_db(phba, cq->id,
1465                                         num_processed, 0, 0);
1466                         tot_nump += num_processed;
1467                         num_processed = 0;
1468                 }
1469
1470                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1471                         32] & CQE_CODE_MASK) {
1472                 case SOL_CMD_COMPLETE:
1473                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1474                         break;
1475                 case DRIVERMSG_NOTIFY:
1476                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1477                         dmsg = (struct dmsg_cqe *)sol;
1478                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1479                         break;
1480                 case UNSOL_HDR_NOTIFY:
1481                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1482                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1483                                              (struct i_t_dpdu_cqe *)sol);
1484                         break;
1485                 case UNSOL_DATA_NOTIFY:
1486                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1487                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1488                                              (struct i_t_dpdu_cqe *)sol);
1489                         break;
1490                 case CXN_INVALIDATE_INDEX_NOTIFY:
1491                 case CMD_INVALIDATED_NOTIFY:
1492                 case CXN_INVALIDATE_NOTIFY:
1493                         SE_DEBUG(DBG_LVL_1,
1494                                  "Ignoring CQ Error notification for cmd/cxn"
1495                                  "invalidate\n");
1496                         break;
1497                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1498                 case CMD_KILLED_INVALID_STATSN_RCVD:
1499                 case CMD_KILLED_INVALID_R2T_RCVD:
1500                 case CMD_CXN_KILLED_LUN_INVALID:
1501                 case CMD_CXN_KILLED_ICD_INVALID:
1502                 case CMD_CXN_KILLED_ITT_INVALID:
1503                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1504                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1505                         if (ring_mode) {
1506                                 SE_DEBUG(DBG_LVL_1,
1507                                  "CQ Error notification for cmd.. "
1508                                  "code %d cid 0x%x\n",
1509                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1510                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1511                         } else {
1512                                 SE_DEBUG(DBG_LVL_1,
1513                                  "CQ Error notification for cmd.. "
1514                                  "code %d cid 0x%x\n",
1515                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1516                                  32] & CQE_CODE_MASK,
1517                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1518                                  32] & SOL_CID_MASK));
1519                         }
1520                         break;
1521                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1522                         SE_DEBUG(DBG_LVL_1,
1523                                  "Digest error on def pdu ring, dropping..\n");
1524                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1525                                              (struct i_t_dpdu_cqe *) sol);
1526                         break;
1527                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1528                 case CXN_KILLED_BURST_LEN_MISMATCH:
1529                 case CXN_KILLED_AHS_RCVD:
1530                 case CXN_KILLED_HDR_DIGEST_ERR:
1531                 case CXN_KILLED_UNKNOWN_HDR:
1532                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1533                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1534                 case CXN_KILLED_TIMED_OUT:
1535                 case CXN_KILLED_FIN_RCVD:
1536                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1537                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1538                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1539                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1540                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1541                         if (ring_mode) {
1542                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1543                                  "0x%x...\n",
1544                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1545                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1546                         } else {
1547                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1548                                  "0x%x...\n",
1549                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1550                                  32] & CQE_CODE_MASK,
1551                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1552                                  32] & CQE_CID_MASK));
1553                         }
1554                         iscsi_conn_failure(beiscsi_conn->conn,
1555                                            ISCSI_ERR_CONN_FAILED);
1556                         break;
1557                 case CXN_KILLED_RST_SENT:
1558                 case CXN_KILLED_RST_RCVD:
1559                         if (ring_mode) {
1560                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1561                                 "received/sent on CID 0x%x...\n",
1562                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1563                                  32] & CQE_CODE_MASK, psgl_handle->cid);
1564                         } else {
1565                                 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1566                                 "received/sent on CID 0x%x...\n",
1567                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1568                                  32] & CQE_CODE_MASK,
1569                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1570                                  32] & CQE_CID_MASK));
1571                         }
1572                         iscsi_conn_failure(beiscsi_conn->conn,
1573                                            ISCSI_ERR_CONN_FAILED);
1574                         break;
1575                 default:
1576                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1577                                  "received on CID 0x%x...\n",
1578                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1579                                  32] & CQE_CODE_MASK,
1580                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1581                                  32] & CQE_CID_MASK));
1582                         break;
1583                 }
1584
1585                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1586                 queue_tail_inc(cq);
1587                 sol = queue_tail_node(cq);
1588                 num_processed++;
1589         }
1590
1591         if (num_processed > 0) {
1592                 tot_nump += num_processed;
1593                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1594         }
1595         return tot_nump;
1596 }
1597
1598 static void beiscsi_process_all_cqs(struct work_struct *work)
1599 {
1600         unsigned long flags;
1601         struct hwi_controller *phwi_ctrlr;
1602         struct hwi_context_memory *phwi_context;
1603         struct be_eq_obj *pbe_eq;
1604         struct beiscsi_hba *phba =
1605             container_of(work, struct beiscsi_hba, work_cqs);
1606
1607         phwi_ctrlr = phba->phwi_ctrlr;
1608         phwi_context = phwi_ctrlr->phwi_ctxt;
1609         if (phba->msix_enabled)
1610                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1611         else
1612                 pbe_eq = &phwi_context->be_eq[0];
1613
1614         if (phba->todo_mcc_cq) {
1615                 spin_lock_irqsave(&phba->isr_lock, flags);
1616                 phba->todo_mcc_cq = 0;
1617                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1618         }
1619
1620         if (phba->todo_cq) {
1621                 spin_lock_irqsave(&phba->isr_lock, flags);
1622                 phba->todo_cq = 0;
1623                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1624                 beiscsi_process_cq(pbe_eq);
1625         }
1626 }
1627
1628 static int be_iopoll(struct blk_iopoll *iop, int budget)
1629 {
1630         static unsigned int ret;
1631         struct beiscsi_hba *phba;
1632         struct be_eq_obj *pbe_eq;
1633
1634         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1635         ret = beiscsi_process_cq(pbe_eq);
1636         if (ret < budget) {
1637                 phba = pbe_eq->phba;
1638                 blk_iopoll_complete(iop);
1639                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1640                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1641         }
1642         return ret;
1643 }
1644
1645 static void
1646 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1647               unsigned int num_sg, struct beiscsi_io_task *io_task)
1648 {
1649         struct iscsi_sge *psgl;
1650         unsigned short sg_len, index;
1651         unsigned int sge_len = 0;
1652         unsigned long long addr;
1653         struct scatterlist *l_sg;
1654         unsigned int offset;
1655
1656         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1657                                       io_task->bhs_pa.u.a32.address_lo);
1658         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1659                                       io_task->bhs_pa.u.a32.address_hi);
1660
1661         l_sg = sg;
1662         for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1663                 if (index == 0) {
1664                         sg_len = sg_dma_len(sg);
1665                         addr = (u64) sg_dma_address(sg);
1666                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1667                                                         (addr & 0xFFFFFFFF));
1668                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1669                                                         (addr >> 32));
1670                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1671                                                         sg_len);
1672                         sge_len = sg_len;
1673                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1674                                                         1);
1675                 } else {
1676                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1677                                                         0);
1678                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1679                                                         pwrb, sge_len);
1680                         sg_len = sg_dma_len(sg);
1681                         addr = (u64) sg_dma_address(sg);
1682                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1683                                                         (addr & 0xFFFFFFFF));
1684                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1685                                                         (addr >> 32));
1686                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1687                                                         sg_len);
1688                 }
1689         }
1690         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1691         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1692
1693         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1694
1695         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1696                         io_task->bhs_pa.u.a32.address_hi);
1697         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1698                         io_task->bhs_pa.u.a32.address_lo);
1699
1700         if (num_sg == 2)
1701                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1702         sg = l_sg;
1703         psgl++;
1704         psgl++;
1705         offset = 0;
1706         for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1707                 sg_len = sg_dma_len(sg);
1708                 addr = (u64) sg_dma_address(sg);
1709                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1710                                                 (addr & 0xFFFFFFFF));
1711                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1712                                                 (addr >> 32));
1713                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1714                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1715                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1716                 offset += sg_len;
1717         }
1718         psgl--;
1719         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1720 }
1721
1722 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1723 {
1724         struct iscsi_sge *psgl;
1725         unsigned long long addr;
1726         struct beiscsi_io_task *io_task = task->dd_data;
1727         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1728         struct beiscsi_hba *phba = beiscsi_conn->phba;
1729
1730         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1731         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1732                                 io_task->bhs_pa.u.a32.address_lo);
1733         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1734                                 io_task->bhs_pa.u.a32.address_hi);
1735
1736         if (task->data) {
1737                 if (task->data_count) {
1738                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1739                         addr = (u64) pci_map_single(phba->pcidev,
1740                                                     task->data,
1741                                                     task->data_count, 1);
1742                 } else {
1743                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1744                         addr = 0;
1745                 }
1746                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1747                                                 (addr & 0xFFFFFFFF));
1748                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1749                                                 (addr >> 32));
1750                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1751                                                 task->data_count);
1752
1753                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1754         } else {
1755                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1756                 addr = 0;
1757         }
1758
1759         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1760
1761         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1762
1763         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1764                       io_task->bhs_pa.u.a32.address_hi);
1765         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1766                       io_task->bhs_pa.u.a32.address_lo);
1767         if (task->data) {
1768                 psgl++;
1769                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1770                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1771                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1772                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1773                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1774                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1775
1776                 psgl++;
1777                 if (task->data) {
1778                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1779                                                 (addr & 0xFFFFFFFF));
1780                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1781                                                 (addr >> 32));
1782                 }
1783                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1784         }
1785         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1786 }
1787
1788 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1789 {
1790         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1791         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1792         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1793
1794         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1795                                       sizeof(struct sol_cqe));
1796         num_async_pdu_buf_pages =
1797                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1798                                        phba->params.defpdu_hdr_sz);
1799         num_async_pdu_buf_sgl_pages =
1800                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1801                                        sizeof(struct phys_addr));
1802         num_async_pdu_data_pages =
1803                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1804                                        phba->params.defpdu_data_sz);
1805         num_async_pdu_data_sgl_pages =
1806                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1807                                        sizeof(struct phys_addr));
1808
1809         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1810
1811         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1812                                                  BE_ISCSI_PDU_HEADER_SIZE;
1813         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1814                                             sizeof(struct hwi_context_memory);
1815
1816
1817         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1818             * (phba->params.wrbs_per_cxn)
1819             * phba->params.cxns_per_ctrl;
1820         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1821                                  (phba->params.wrbs_per_cxn);
1822         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1823                                 phba->params.cxns_per_ctrl);
1824
1825         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1826                 phba->params.icds_per_ctrl;
1827         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1828                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1829
1830         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1831                 num_async_pdu_buf_pages * PAGE_SIZE;
1832         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1833                 num_async_pdu_data_pages * PAGE_SIZE;
1834         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1835                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1836         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1837                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1838         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1839                 phba->params.asyncpdus_per_ctrl *
1840                 sizeof(struct async_pdu_handle);
1841         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1842                 phba->params.asyncpdus_per_ctrl *
1843                 sizeof(struct async_pdu_handle);
1844         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1845                 sizeof(struct hwi_async_pdu_context) +
1846                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1847 }
1848
1849 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1850 {
1851         struct be_mem_descriptor *mem_descr;
1852         dma_addr_t bus_add;
1853         struct mem_array *mem_arr, *mem_arr_orig;
1854         unsigned int i, j, alloc_size, curr_alloc_size;
1855
1856         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1857         if (!phba->phwi_ctrlr)
1858                 return -ENOMEM;
1859
1860         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1861                                  GFP_KERNEL);
1862         if (!phba->init_mem) {
1863                 kfree(phba->phwi_ctrlr);
1864                 return -ENOMEM;
1865         }
1866
1867         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1868                                GFP_KERNEL);
1869         if (!mem_arr_orig) {
1870                 kfree(phba->init_mem);
1871                 kfree(phba->phwi_ctrlr);
1872                 return -ENOMEM;
1873         }
1874
1875         mem_descr = phba->init_mem;
1876         for (i = 0; i < SE_MEM_MAX; i++) {
1877                 j = 0;
1878                 mem_arr = mem_arr_orig;
1879                 alloc_size = phba->mem_req[i];
1880                 memset(mem_arr, 0, sizeof(struct mem_array) *
1881                        BEISCSI_MAX_FRAGS_INIT);
1882                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1883                 do {
1884                         mem_arr->virtual_address = pci_alloc_consistent(
1885                                                         phba->pcidev,
1886                                                         curr_alloc_size,
1887                                                         &bus_add);
1888                         if (!mem_arr->virtual_address) {
1889                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1890                                         goto free_mem;
1891                                 if (curr_alloc_size -
1892                                         rounddown_pow_of_two(curr_alloc_size))
1893                                         curr_alloc_size = rounddown_pow_of_two
1894                                                              (curr_alloc_size);
1895                                 else
1896                                         curr_alloc_size = curr_alloc_size / 2;
1897                         } else {
1898                                 mem_arr->bus_address.u.
1899                                     a64.address = (__u64) bus_add;
1900                                 mem_arr->size = curr_alloc_size;
1901                                 alloc_size -= curr_alloc_size;
1902                                 curr_alloc_size = min(be_max_phys_size *
1903                                                       1024, alloc_size);
1904                                 j++;
1905                                 mem_arr++;
1906                         }
1907                 } while (alloc_size);
1908                 mem_descr->num_elements = j;
1909                 mem_descr->size_in_bytes = phba->mem_req[i];
1910                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1911                                                GFP_KERNEL);
1912                 if (!mem_descr->mem_array)
1913                         goto free_mem;
1914
1915                 memcpy(mem_descr->mem_array, mem_arr_orig,
1916                        sizeof(struct mem_array) * j);
1917                 mem_descr++;
1918         }
1919         kfree(mem_arr_orig);
1920         return 0;
1921 free_mem:
1922         mem_descr->num_elements = j;
1923         while ((i) || (j)) {
1924                 for (j = mem_descr->num_elements; j > 0; j--) {
1925                         pci_free_consistent(phba->pcidev,
1926                                             mem_descr->mem_array[j - 1].size,
1927                                             mem_descr->mem_array[j - 1].
1928                                             virtual_address,
1929                                             mem_descr->mem_array[j - 1].
1930                                             bus_address.u.a64.address);
1931                 }
1932                 if (i) {
1933                         i--;
1934                         kfree(mem_descr->mem_array);
1935                         mem_descr--;
1936                 }
1937         }
1938         kfree(mem_arr_orig);
1939         kfree(phba->init_mem);
1940         kfree(phba->phwi_ctrlr);
1941         return -ENOMEM;
1942 }
1943
1944 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1945 {
1946         beiscsi_find_mem_req(phba);
1947         return beiscsi_alloc_mem(phba);
1948 }
1949
1950 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1951 {
1952         struct pdu_data_out *pdata_out;
1953         struct pdu_nop_out *pnop_out;
1954         struct be_mem_descriptor *mem_descr;
1955
1956         mem_descr = phba->init_mem;
1957         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1958         pdata_out =
1959             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1960         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1961
1962         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1963                       IIOC_SCSI_DATA);
1964
1965         pnop_out =
1966             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1967                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1968
1969         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1970         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1971         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1972         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1973 }
1974
1975 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1976 {
1977         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1978         struct wrb_handle *pwrb_handle;
1979         struct hwi_controller *phwi_ctrlr;
1980         struct hwi_wrb_context *pwrb_context;
1981         struct iscsi_wrb *pwrb;
1982         unsigned int num_cxn_wrbh;
1983         unsigned int num_cxn_wrb, j, idx, index;
1984
1985         mem_descr_wrbh = phba->init_mem;
1986         mem_descr_wrbh += HWI_MEM_WRBH;
1987
1988         mem_descr_wrb = phba->init_mem;
1989         mem_descr_wrb += HWI_MEM_WRB;
1990
1991         idx = 0;
1992         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
1993         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
1994                         ((sizeof(struct wrb_handle)) *
1995                          phba->params.wrbs_per_cxn));
1996         phwi_ctrlr = phba->phwi_ctrlr;
1997
1998         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
1999                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2000                 pwrb_context->pwrb_handle_base =
2001                                 kzalloc(sizeof(struct wrb_handle *) *
2002                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2003                 pwrb_context->pwrb_handle_basestd =
2004                                 kzalloc(sizeof(struct wrb_handle *) *
2005                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2006                 if (num_cxn_wrbh) {
2007                         pwrb_context->alloc_index = 0;
2008                         pwrb_context->wrb_handles_available = 0;
2009                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2010                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2011                                 pwrb_context->pwrb_handle_basestd[j] =
2012                                                                 pwrb_handle;
2013                                 pwrb_context->wrb_handles_available++;
2014                                 pwrb_handle->wrb_index = j;
2015                                 pwrb_handle++;
2016                         }
2017                         pwrb_context->free_index = 0;
2018                         num_cxn_wrbh--;
2019                 } else {
2020                         idx++;
2021                         pwrb_handle =
2022                             mem_descr_wrbh->mem_array[idx].virtual_address;
2023                         num_cxn_wrbh =
2024                             ((mem_descr_wrbh->mem_array[idx].size) /
2025                              ((sizeof(struct wrb_handle)) *
2026                               phba->params.wrbs_per_cxn));
2027                         pwrb_context->alloc_index = 0;
2028                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2029                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2030                                 pwrb_context->pwrb_handle_basestd[j] =
2031                                     pwrb_handle;
2032                                 pwrb_context->wrb_handles_available++;
2033                                 pwrb_handle->wrb_index = j;
2034                                 pwrb_handle++;
2035                         }
2036                         pwrb_context->free_index = 0;
2037                         num_cxn_wrbh--;
2038                 }
2039         }
2040         idx = 0;
2041         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2042         num_cxn_wrb =
2043             ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2044              phba->params.wrbs_per_cxn);
2045
2046         for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2047                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2048                 if (num_cxn_wrb) {
2049                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2050                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2051                                 pwrb_handle->pwrb = pwrb;
2052                                 pwrb++;
2053                         }
2054                         num_cxn_wrb--;
2055                 } else {
2056                         idx++;
2057                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2058                         num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2059                                         (sizeof(struct iscsi_wrb)) *
2060                                         phba->params.wrbs_per_cxn);
2061                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2062                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2063                                 pwrb_handle->pwrb = pwrb;
2064                                 pwrb++;
2065                         }
2066                         num_cxn_wrb--;
2067                 }
2068         }
2069 }
2070
2071 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2072 {
2073         struct hwi_controller *phwi_ctrlr;
2074         struct hba_parameters *p = &phba->params;
2075         struct hwi_async_pdu_context *pasync_ctx;
2076         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2077         unsigned int index;
2078         struct be_mem_descriptor *mem_descr;
2079
2080         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2081         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2082
2083         phwi_ctrlr = phba->phwi_ctrlr;
2084         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2085                                 mem_descr->mem_array[0].virtual_address;
2086         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2087         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2088
2089         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2090         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2091         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2092         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2093
2094         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2095         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2096         if (mem_descr->mem_array[0].virtual_address) {
2097                 SE_DEBUG(DBG_LVL_8,
2098                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2099                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2100         } else
2101                 shost_printk(KERN_WARNING, phba->shost,
2102                              "No Virtual address \n");
2103
2104         pasync_ctx->async_header.va_base =
2105                         mem_descr->mem_array[0].virtual_address;
2106
2107         pasync_ctx->async_header.pa_base.u.a64.address =
2108                         mem_descr->mem_array[0].bus_address.u.a64.address;
2109
2110         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2111         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2112         if (mem_descr->mem_array[0].virtual_address) {
2113                 SE_DEBUG(DBG_LVL_8,
2114                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2115                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2116         } else
2117                 shost_printk(KERN_WARNING, phba->shost,
2118                             "No Virtual address \n");
2119         pasync_ctx->async_header.ring_base =
2120                         mem_descr->mem_array[0].virtual_address;
2121
2122         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2123         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2124         if (mem_descr->mem_array[0].virtual_address) {
2125                 SE_DEBUG(DBG_LVL_8,
2126                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2127                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2128         } else
2129                 shost_printk(KERN_WARNING, phba->shost,
2130                             "No Virtual address \n");
2131
2132         pasync_ctx->async_header.handle_base =
2133                         mem_descr->mem_array[0].virtual_address;
2134         pasync_ctx->async_header.writables = 0;
2135         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2136
2137         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2138         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2139         if (mem_descr->mem_array[0].virtual_address) {
2140                 SE_DEBUG(DBG_LVL_8,
2141                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2142                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2143         } else
2144                 shost_printk(KERN_WARNING, phba->shost,
2145                             "No Virtual address \n");
2146         pasync_ctx->async_data.va_base =
2147                         mem_descr->mem_array[0].virtual_address;
2148         pasync_ctx->async_data.pa_base.u.a64.address =
2149                         mem_descr->mem_array[0].bus_address.u.a64.address;
2150
2151         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2152         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2153         if (mem_descr->mem_array[0].virtual_address) {
2154                 SE_DEBUG(DBG_LVL_8,
2155                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2156                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2157         } else
2158                 shost_printk(KERN_WARNING, phba->shost,
2159                              "No Virtual address \n");
2160
2161         pasync_ctx->async_data.ring_base =
2162                         mem_descr->mem_array[0].virtual_address;
2163
2164         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2165         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2166         if (!mem_descr->mem_array[0].virtual_address)
2167                 shost_printk(KERN_WARNING, phba->shost,
2168                             "No Virtual address \n");
2169
2170         pasync_ctx->async_data.handle_base =
2171                         mem_descr->mem_array[0].virtual_address;
2172         pasync_ctx->async_data.writables = 0;
2173         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2174
2175         pasync_header_h =
2176                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2177         pasync_data_h =
2178                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2179
2180         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2181                 pasync_header_h->cri = -1;
2182                 pasync_header_h->index = (char)index;
2183                 INIT_LIST_HEAD(&pasync_header_h->link);
2184                 pasync_header_h->pbuffer =
2185                         (void *)((unsigned long)
2186                         (pasync_ctx->async_header.va_base) +
2187                         (p->defpdu_hdr_sz * index));
2188
2189                 pasync_header_h->pa.u.a64.address =
2190                         pasync_ctx->async_header.pa_base.u.a64.address +
2191                         (p->defpdu_hdr_sz * index);
2192
2193                 list_add_tail(&pasync_header_h->link,
2194                                 &pasync_ctx->async_header.free_list);
2195                 pasync_header_h++;
2196                 pasync_ctx->async_header.free_entries++;
2197                 pasync_ctx->async_header.writables++;
2198
2199                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2200                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2201                                header_busy_list);
2202                 pasync_data_h->cri = -1;
2203                 pasync_data_h->index = (char)index;
2204                 INIT_LIST_HEAD(&pasync_data_h->link);
2205                 pasync_data_h->pbuffer =
2206                         (void *)((unsigned long)
2207                         (pasync_ctx->async_data.va_base) +
2208                         (p->defpdu_data_sz * index));
2209
2210                 pasync_data_h->pa.u.a64.address =
2211                     pasync_ctx->async_data.pa_base.u.a64.address +
2212                     (p->defpdu_data_sz * index);
2213
2214                 list_add_tail(&pasync_data_h->link,
2215                               &pasync_ctx->async_data.free_list);
2216                 pasync_data_h++;
2217                 pasync_ctx->async_data.free_entries++;
2218                 pasync_ctx->async_data.writables++;
2219
2220                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2221         }
2222
2223         pasync_ctx->async_header.host_write_ptr = 0;
2224         pasync_ctx->async_header.ep_read_ptr = -1;
2225         pasync_ctx->async_data.host_write_ptr = 0;
2226         pasync_ctx->async_data.ep_read_ptr = -1;
2227 }
2228
2229 static int
2230 be_sgl_create_contiguous(void *virtual_address,
2231                          u64 physical_address, u32 length,
2232                          struct be_dma_mem *sgl)
2233 {
2234         WARN_ON(!virtual_address);
2235         WARN_ON(!physical_address);
2236         WARN_ON(!length > 0);
2237         WARN_ON(!sgl);
2238
2239         sgl->va = virtual_address;
2240         sgl->dma = physical_address;
2241         sgl->size = length;
2242
2243         return 0;
2244 }
2245
2246 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2247 {
2248         memset(sgl, 0, sizeof(*sgl));
2249 }
2250
2251 static void
2252 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2253                      struct mem_array *pmem, struct be_dma_mem *sgl)
2254 {
2255         if (sgl->va)
2256                 be_sgl_destroy_contiguous(sgl);
2257
2258         be_sgl_create_contiguous(pmem->virtual_address,
2259                                  pmem->bus_address.u.a64.address,
2260                                  pmem->size, sgl);
2261 }
2262
2263 static void
2264 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2265                            struct mem_array *pmem, struct be_dma_mem *sgl)
2266 {
2267         if (sgl->va)
2268                 be_sgl_destroy_contiguous(sgl);
2269
2270         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2271                                  pmem->bus_address.u.a64.address,
2272                                  pmem->size, sgl);
2273 }
2274
2275 static int be_fill_queue(struct be_queue_info *q,
2276                 u16 len, u16 entry_size, void *vaddress)
2277 {
2278         struct be_dma_mem *mem = &q->dma_mem;
2279
2280         memset(q, 0, sizeof(*q));
2281         q->len = len;
2282         q->entry_size = entry_size;
2283         mem->size = len * entry_size;
2284         mem->va = vaddress;
2285         if (!mem->va)
2286                 return -ENOMEM;
2287         memset(mem->va, 0, mem->size);
2288         return 0;
2289 }
2290
2291 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2292                              struct hwi_context_memory *phwi_context)
2293 {
2294         unsigned int i, num_eq_pages;
2295         int ret, eq_for_mcc;
2296         struct be_queue_info *eq;
2297         struct be_dma_mem *mem;
2298         void *eq_vaddress;
2299         dma_addr_t paddr;
2300
2301         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2302                                       sizeof(struct be_eq_entry));
2303
2304         if (phba->msix_enabled)
2305                 eq_for_mcc = 1;
2306         else
2307                 eq_for_mcc = 0;
2308         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2309                 eq = &phwi_context->be_eq[i].q;
2310                 mem = &eq->dma_mem;
2311                 phwi_context->be_eq[i].phba = phba;
2312                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2313                                                      num_eq_pages * PAGE_SIZE,
2314                                                      &paddr);
2315                 if (!eq_vaddress)
2316                         goto create_eq_error;
2317
2318                 mem->va = eq_vaddress;
2319                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2320                                     sizeof(struct be_eq_entry), eq_vaddress);
2321                 if (ret) {
2322                         shost_printk(KERN_ERR, phba->shost,
2323                                      "be_fill_queue Failed for EQ \n");
2324                         goto create_eq_error;
2325                 }
2326
2327                 mem->dma = paddr;
2328                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2329                                             phwi_context->cur_eqd);
2330                 if (ret) {
2331                         shost_printk(KERN_ERR, phba->shost,
2332                                      "beiscsi_cmd_eq_create"
2333                                      "Failedfor EQ \n");
2334                         goto create_eq_error;
2335                 }
2336                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2337         }
2338         return 0;
2339 create_eq_error:
2340         for (i = 0; i < (phba->num_cpus + 1); i++) {
2341                 eq = &phwi_context->be_eq[i].q;
2342                 mem = &eq->dma_mem;
2343                 if (mem->va)
2344                         pci_free_consistent(phba->pcidev, num_eq_pages
2345                                             * PAGE_SIZE,
2346                                             mem->va, mem->dma);
2347         }
2348         return ret;
2349 }
2350
2351 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2352                              struct hwi_context_memory *phwi_context)
2353 {
2354         unsigned int i, num_cq_pages;
2355         int ret;
2356         struct be_queue_info *cq, *eq;
2357         struct be_dma_mem *mem;
2358         struct be_eq_obj *pbe_eq;
2359         void *cq_vaddress;
2360         dma_addr_t paddr;
2361
2362         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2363                                       sizeof(struct sol_cqe));
2364
2365         for (i = 0; i < phba->num_cpus; i++) {
2366                 cq = &phwi_context->be_cq[i];
2367                 eq = &phwi_context->be_eq[i].q;
2368                 pbe_eq = &phwi_context->be_eq[i];
2369                 pbe_eq->cq = cq;
2370                 pbe_eq->phba = phba;
2371                 mem = &cq->dma_mem;
2372                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2373                                                      num_cq_pages * PAGE_SIZE,
2374                                                      &paddr);
2375                 if (!cq_vaddress)
2376                         goto create_cq_error;
2377                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2378                                     sizeof(struct sol_cqe), cq_vaddress);
2379                 if (ret) {
2380                         shost_printk(KERN_ERR, phba->shost,
2381                                      "be_fill_queue Failed for ISCSI CQ \n");
2382                         goto create_cq_error;
2383                 }
2384
2385                 mem->dma = paddr;
2386                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2387                                             false, 0);
2388                 if (ret) {
2389                         shost_printk(KERN_ERR, phba->shost,
2390                                      "beiscsi_cmd_eq_create"
2391                                      "Failed for ISCSI CQ \n");
2392                         goto create_cq_error;
2393                 }
2394                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2395                                                  cq->id, eq->id);
2396                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2397         }
2398         return 0;
2399
2400 create_cq_error:
2401         for (i = 0; i < phba->num_cpus; i++) {
2402                 cq = &phwi_context->be_cq[i];
2403                 mem = &cq->dma_mem;
2404                 if (mem->va)
2405                         pci_free_consistent(phba->pcidev, num_cq_pages
2406                                             * PAGE_SIZE,
2407                                             mem->va, mem->dma);
2408         }
2409         return ret;
2410
2411 }
2412
2413 static int
2414 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2415                        struct hwi_context_memory *phwi_context,
2416                        struct hwi_controller *phwi_ctrlr,
2417                        unsigned int def_pdu_ring_sz)
2418 {
2419         unsigned int idx;
2420         int ret;
2421         struct be_queue_info *dq, *cq;
2422         struct be_dma_mem *mem;
2423         struct be_mem_descriptor *mem_descr;
2424         void *dq_vaddress;
2425
2426         idx = 0;
2427         dq = &phwi_context->be_def_hdrq;
2428         cq = &phwi_context->be_cq[0];
2429         mem = &dq->dma_mem;
2430         mem_descr = phba->init_mem;
2431         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2432         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2433         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2434                             sizeof(struct phys_addr),
2435                             sizeof(struct phys_addr), dq_vaddress);
2436         if (ret) {
2437                 shost_printk(KERN_ERR, phba->shost,
2438                              "be_fill_queue Failed for DEF PDU HDR\n");
2439                 return ret;
2440         }
2441         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2442         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2443                                               def_pdu_ring_sz,
2444                                               phba->params.defpdu_hdr_sz);
2445         if (ret) {
2446                 shost_printk(KERN_ERR, phba->shost,
2447                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2448                 return ret;
2449         }
2450         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2451         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2452                  phwi_context->be_def_hdrq.id);
2453         hwi_post_async_buffers(phba, 1);
2454         return 0;
2455 }
2456
2457 static int
2458 beiscsi_create_def_data(struct beiscsi_hba *phba,
2459                         struct hwi_context_memory *phwi_context,
2460                         struct hwi_controller *phwi_ctrlr,
2461                         unsigned int def_pdu_ring_sz)
2462 {
2463         unsigned int idx;
2464         int ret;
2465         struct be_queue_info *dataq, *cq;
2466         struct be_dma_mem *mem;
2467         struct be_mem_descriptor *mem_descr;
2468         void *dq_vaddress;
2469
2470         idx = 0;
2471         dataq = &phwi_context->be_def_dataq;
2472         cq = &phwi_context->be_cq[0];
2473         mem = &dataq->dma_mem;
2474         mem_descr = phba->init_mem;
2475         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2476         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2477         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2478                             sizeof(struct phys_addr),
2479                             sizeof(struct phys_addr), dq_vaddress);
2480         if (ret) {
2481                 shost_printk(KERN_ERR, phba->shost,
2482                              "be_fill_queue Failed for DEF PDU DATA\n");
2483                 return ret;
2484         }
2485         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2486         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2487                                               def_pdu_ring_sz,
2488                                               phba->params.defpdu_data_sz);
2489         if (ret) {
2490                 shost_printk(KERN_ERR, phba->shost,
2491                              "be_cmd_create_default_pdu_queue Failed"
2492                              " for DEF PDU DATA\n");
2493                 return ret;
2494         }
2495         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2496         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2497                  phwi_context->be_def_dataq.id);
2498         hwi_post_async_buffers(phba, 0);
2499         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2500         return 0;
2501 }
2502
2503 static int
2504 beiscsi_post_pages(struct beiscsi_hba *phba)
2505 {
2506         struct be_mem_descriptor *mem_descr;
2507         struct mem_array *pm_arr;
2508         unsigned int page_offset, i;
2509         struct be_dma_mem sgl;
2510         int status;
2511
2512         mem_descr = phba->init_mem;
2513         mem_descr += HWI_MEM_SGE;
2514         pm_arr = mem_descr->mem_array;
2515
2516         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2517                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2518         for (i = 0; i < mem_descr->num_elements; i++) {
2519                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2520                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2521                                                 page_offset,
2522                                                 (pm_arr->size / PAGE_SIZE));
2523                 page_offset += pm_arr->size / PAGE_SIZE;
2524                 if (status != 0) {
2525                         shost_printk(KERN_ERR, phba->shost,
2526                                      "post sgl failed.\n");
2527                         return status;
2528                 }
2529                 pm_arr++;
2530         }
2531         SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2532         return 0;
2533 }
2534
2535 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2536 {
2537         struct be_dma_mem *mem = &q->dma_mem;
2538         if (mem->va)
2539                 pci_free_consistent(phba->pcidev, mem->size,
2540                         mem->va, mem->dma);
2541 }
2542
2543 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2544                 u16 len, u16 entry_size)
2545 {
2546         struct be_dma_mem *mem = &q->dma_mem;
2547
2548         memset(q, 0, sizeof(*q));
2549         q->len = len;
2550         q->entry_size = entry_size;
2551         mem->size = len * entry_size;
2552         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2553         if (!mem->va)
2554                 return -1;
2555         memset(mem->va, 0, mem->size);
2556         return 0;
2557 }
2558
2559 static int
2560 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2561                          struct hwi_context_memory *phwi_context,
2562                          struct hwi_controller *phwi_ctrlr)
2563 {
2564         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2565         u64 pa_addr_lo;
2566         unsigned int idx, num, i;
2567         struct mem_array *pwrb_arr;
2568         void *wrb_vaddr;
2569         struct be_dma_mem sgl;
2570         struct be_mem_descriptor *mem_descr;
2571         int status;
2572
2573         idx = 0;
2574         mem_descr = phba->init_mem;
2575         mem_descr += HWI_MEM_WRB;
2576         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2577                            GFP_KERNEL);
2578         if (!pwrb_arr) {
2579                 shost_printk(KERN_ERR, phba->shost,
2580                              "Memory alloc failed in create wrb ring.\n");
2581                 return -ENOMEM;
2582         }
2583         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2584         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2585         num_wrb_rings = mem_descr->mem_array[idx].size /
2586                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2587
2588         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2589                 if (num_wrb_rings) {
2590                         pwrb_arr[num].virtual_address = wrb_vaddr;
2591                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2592                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2593                                             sizeof(struct iscsi_wrb);
2594                         wrb_vaddr += pwrb_arr[num].size;
2595                         pa_addr_lo += pwrb_arr[num].size;
2596                         num_wrb_rings--;
2597                 } else {
2598                         idx++;
2599                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2600                         pa_addr_lo = mem_descr->mem_array[idx].\
2601                                         bus_address.u.a64.address;
2602                         num_wrb_rings = mem_descr->mem_array[idx].size /
2603                                         (phba->params.wrbs_per_cxn *
2604                                         sizeof(struct iscsi_wrb));
2605                         pwrb_arr[num].virtual_address = wrb_vaddr;
2606                         pwrb_arr[num].bus_address.u.a64.address\
2607                                                 = pa_addr_lo;
2608                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2609                                                  sizeof(struct iscsi_wrb);
2610                         wrb_vaddr += pwrb_arr[num].size;
2611                         pa_addr_lo   += pwrb_arr[num].size;
2612                         num_wrb_rings--;
2613                 }
2614         }
2615         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2616                 wrb_mem_index = 0;
2617                 offset = 0;
2618                 size = 0;
2619
2620                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2621                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2622                                             &phwi_context->be_wrbq[i]);
2623                 if (status != 0) {
2624                         shost_printk(KERN_ERR, phba->shost,
2625                                      "wrbq create failed.");
2626                         return status;
2627                 }
2628                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2629                                                                    id;
2630         }
2631         kfree(pwrb_arr);
2632         return 0;
2633 }
2634
2635 static void free_wrb_handles(struct beiscsi_hba *phba)
2636 {
2637         unsigned int index;
2638         struct hwi_controller *phwi_ctrlr;
2639         struct hwi_wrb_context *pwrb_context;
2640
2641         phwi_ctrlr = phba->phwi_ctrlr;
2642         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2643                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2644                 kfree(pwrb_context->pwrb_handle_base);
2645                 kfree(pwrb_context->pwrb_handle_basestd);
2646         }
2647 }
2648
2649 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2650 {
2651         struct be_queue_info *q;
2652         struct be_ctrl_info *ctrl = &phba->ctrl;
2653
2654         q = &phba->ctrl.mcc_obj.q;
2655         if (q->created)
2656                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2657         be_queue_free(phba, q);
2658
2659         q = &phba->ctrl.mcc_obj.cq;
2660         if (q->created)
2661                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2662         be_queue_free(phba, q);
2663 }
2664
2665 static void hwi_cleanup(struct beiscsi_hba *phba)
2666 {
2667         struct be_queue_info *q;
2668         struct be_ctrl_info *ctrl = &phba->ctrl;
2669         struct hwi_controller *phwi_ctrlr;
2670         struct hwi_context_memory *phwi_context;
2671         int i, eq_num;
2672
2673         phwi_ctrlr = phba->phwi_ctrlr;
2674         phwi_context = phwi_ctrlr->phwi_ctxt;
2675         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2676                 q = &phwi_context->be_wrbq[i];
2677                 if (q->created)
2678                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2679         }
2680         free_wrb_handles(phba);
2681
2682         q = &phwi_context->be_def_hdrq;
2683         if (q->created)
2684                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2685
2686         q = &phwi_context->be_def_dataq;
2687         if (q->created)
2688                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2689
2690         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2691
2692         for (i = 0; i < (phba->num_cpus); i++) {
2693                 q = &phwi_context->be_cq[i];
2694                 if (q->created)
2695                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2696         }
2697         if (phba->msix_enabled)
2698                 eq_num = 1;
2699         else
2700                 eq_num = 0;
2701         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2702                 q = &phwi_context->be_eq[i].q;
2703                 if (q->created)
2704                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2705         }
2706         be_mcc_queues_destroy(phba);
2707 }
2708
2709 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2710                                 struct hwi_context_memory *phwi_context)
2711 {
2712         struct be_queue_info *q, *cq;
2713         struct be_ctrl_info *ctrl = &phba->ctrl;
2714
2715         /* Alloc MCC compl queue */
2716         cq = &phba->ctrl.mcc_obj.cq;
2717         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2718                         sizeof(struct be_mcc_compl)))
2719                 goto err;
2720         /* Ask BE to create MCC compl queue; */
2721         if (phba->msix_enabled) {
2722                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2723                                          [phba->num_cpus].q, false, true, 0))
2724                 goto mcc_cq_free;
2725         } else {
2726                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2727                                           false, true, 0))
2728                 goto mcc_cq_free;
2729         }
2730
2731         /* Alloc MCC queue */
2732         q = &phba->ctrl.mcc_obj.q;
2733         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2734                 goto mcc_cq_destroy;
2735
2736         /* Ask BE to create MCC queue */
2737         if (beiscsi_cmd_mccq_create(phba, q, cq))
2738                 goto mcc_q_free;
2739
2740         return 0;
2741
2742 mcc_q_free:
2743         be_queue_free(phba, q);
2744 mcc_cq_destroy:
2745         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2746 mcc_cq_free:
2747         be_queue_free(phba, cq);
2748 err:
2749         return -1;
2750 }
2751
2752 static int find_num_cpus(void)
2753 {
2754         int  num_cpus = 0;
2755
2756         num_cpus = num_online_cpus();
2757         if (num_cpus >= MAX_CPUS)
2758                 num_cpus = MAX_CPUS - 1;
2759
2760         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2761         return num_cpus;
2762 }
2763
2764 static int hwi_init_port(struct beiscsi_hba *phba)
2765 {
2766         struct hwi_controller *phwi_ctrlr;
2767         struct hwi_context_memory *phwi_context;
2768         unsigned int def_pdu_ring_sz;
2769         struct be_ctrl_info *ctrl = &phba->ctrl;
2770         int status;
2771
2772         def_pdu_ring_sz =
2773                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2774         phwi_ctrlr = phba->phwi_ctrlr;
2775         phwi_context = phwi_ctrlr->phwi_ctxt;
2776         phwi_context->max_eqd = 0;
2777         phwi_context->min_eqd = 0;
2778         phwi_context->cur_eqd = 64;
2779         be_cmd_fw_initialize(&phba->ctrl);
2780
2781         status = beiscsi_create_eqs(phba, phwi_context);
2782         if (status != 0) {
2783                 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2784                 goto error;
2785         }
2786
2787         status = be_mcc_queues_create(phba, phwi_context);
2788         if (status != 0)
2789                 goto error;
2790
2791         status = mgmt_check_supported_fw(ctrl, phba);
2792         if (status != 0) {
2793                 shost_printk(KERN_ERR, phba->shost,
2794                              "Unsupported fw version \n");
2795                 goto error;
2796         }
2797
2798         if (phba->fw_config.iscsi_features == 0x1)
2799                 ring_mode = 1;
2800         else
2801                 ring_mode = 0;
2802
2803         status = beiscsi_create_cqs(phba, phwi_context);
2804         if (status != 0) {
2805                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2806                 goto error;
2807         }
2808
2809         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2810                                         def_pdu_ring_sz);
2811         if (status != 0) {
2812                 shost_printk(KERN_ERR, phba->shost,
2813                              "Default Header not created\n");
2814                 goto error;
2815         }
2816
2817         status = beiscsi_create_def_data(phba, phwi_context,
2818                                          phwi_ctrlr, def_pdu_ring_sz);
2819         if (status != 0) {
2820                 shost_printk(KERN_ERR, phba->shost,
2821                              "Default Data not created\n");
2822                 goto error;
2823         }
2824
2825         status = beiscsi_post_pages(phba);
2826         if (status != 0) {
2827                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2828                 goto error;
2829         }
2830
2831         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2832         if (status != 0) {
2833                 shost_printk(KERN_ERR, phba->shost,
2834                              "WRB Rings not created\n");
2835                 goto error;
2836         }
2837
2838         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2839         return 0;
2840
2841 error:
2842         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2843         hwi_cleanup(phba);
2844         return -ENOMEM;
2845 }
2846
2847 static int hwi_init_controller(struct beiscsi_hba *phba)
2848 {
2849         struct hwi_controller *phwi_ctrlr;
2850
2851         phwi_ctrlr = phba->phwi_ctrlr;
2852         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2853                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2854                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2855                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2856                          phwi_ctrlr->phwi_ctxt);
2857         } else {
2858                 shost_printk(KERN_ERR, phba->shost,
2859                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2860                              "Failing to load\n");
2861                 return -ENOMEM;
2862         }
2863
2864         iscsi_init_global_templates(phba);
2865         beiscsi_init_wrb_handle(phba);
2866         hwi_init_async_pdu_ctx(phba);
2867         if (hwi_init_port(phba) != 0) {
2868                 shost_printk(KERN_ERR, phba->shost,
2869                              "hwi_init_controller failed\n");
2870                 return -ENOMEM;
2871         }
2872         return 0;
2873 }
2874
2875 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2876 {
2877         struct be_mem_descriptor *mem_descr;
2878         int i, j;
2879
2880         mem_descr = phba->init_mem;
2881         i = 0;
2882         j = 0;
2883         for (i = 0; i < SE_MEM_MAX; i++) {
2884                 for (j = mem_descr->num_elements; j > 0; j--) {
2885                         pci_free_consistent(phba->pcidev,
2886                           mem_descr->mem_array[j - 1].size,
2887                           mem_descr->mem_array[j - 1].virtual_address,
2888                           mem_descr->mem_array[j - 1].bus_address.
2889                                 u.a64.address);
2890                 }
2891                 kfree(mem_descr->mem_array);
2892                 mem_descr++;
2893         }
2894         kfree(phba->init_mem);
2895         kfree(phba->phwi_ctrlr);
2896 }
2897
2898 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2899 {
2900         int ret = -ENOMEM;
2901
2902         ret = beiscsi_get_memory(phba);
2903         if (ret < 0) {
2904                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2905                              "Failed in beiscsi_alloc_memory \n");
2906                 return ret;
2907         }
2908
2909         ret = hwi_init_controller(phba);
2910         if (ret)
2911                 goto free_init;
2912         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2913         return 0;
2914
2915 free_init:
2916         beiscsi_free_mem(phba);
2917         return -ENOMEM;
2918 }
2919
2920 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2921 {
2922         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2923         struct sgl_handle *psgl_handle;
2924         struct iscsi_sge *pfrag;
2925         unsigned int arr_index, i, idx;
2926
2927         phba->io_sgl_hndl_avbl = 0;
2928         phba->eh_sgl_hndl_avbl = 0;
2929
2930         if (ring_mode) {
2931                 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2932                                               phba->params.icds_per_ctrl,
2933                                                  GFP_KERNEL);
2934                 if (!phba->sgl_hndl_array) {
2935                         shost_printk(KERN_ERR, phba->shost,
2936                              "Mem Alloc Failed. Failing to load\n");
2937                         return -ENOMEM;
2938                 }
2939         }
2940
2941         mem_descr_sglh = phba->init_mem;
2942         mem_descr_sglh += HWI_MEM_SGLH;
2943         if (1 == mem_descr_sglh->num_elements) {
2944                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2945                                                  phba->params.ios_per_ctrl,
2946                                                  GFP_KERNEL);
2947                 if (!phba->io_sgl_hndl_base) {
2948                         if (ring_mode)
2949                                 kfree(phba->sgl_hndl_array);
2950                         shost_printk(KERN_ERR, phba->shost,
2951                                      "Mem Alloc Failed. Failing to load\n");
2952                         return -ENOMEM;
2953                 }
2954                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2955                                                  (phba->params.icds_per_ctrl -
2956                                                  phba->params.ios_per_ctrl),
2957                                                  GFP_KERNEL);
2958                 if (!phba->eh_sgl_hndl_base) {
2959                         kfree(phba->io_sgl_hndl_base);
2960                         shost_printk(KERN_ERR, phba->shost,
2961                                      "Mem Alloc Failed. Failing to load\n");
2962                         return -ENOMEM;
2963                 }
2964         } else {
2965                 shost_printk(KERN_ERR, phba->shost,
2966                              "HWI_MEM_SGLH is more than one element."
2967                              "Failing to load\n");
2968                 return -ENOMEM;
2969         }
2970
2971         arr_index = 0;
2972         idx = 0;
2973         while (idx < mem_descr_sglh->num_elements) {
2974                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2975
2976                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2977                       sizeof(struct sgl_handle)); i++) {
2978                         if (arr_index < phba->params.ios_per_ctrl) {
2979                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2980                                 phba->io_sgl_hndl_avbl++;
2981                                 arr_index++;
2982                         } else {
2983                                 phba->eh_sgl_hndl_base[arr_index -
2984                                         phba->params.ios_per_ctrl] =
2985                                                                 psgl_handle;
2986                                 arr_index++;
2987                                 phba->eh_sgl_hndl_avbl++;
2988                         }
2989                         psgl_handle++;
2990                 }
2991                 idx++;
2992         }
2993         SE_DEBUG(DBG_LVL_8,
2994                  "phba->io_sgl_hndl_avbl=%d"
2995                  "phba->eh_sgl_hndl_avbl=%d \n",
2996                  phba->io_sgl_hndl_avbl,
2997                  phba->eh_sgl_hndl_avbl);
2998         mem_descr_sg = phba->init_mem;
2999         mem_descr_sg += HWI_MEM_SGE;
3000         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3001                  mem_descr_sg->num_elements);
3002         arr_index = 0;
3003         idx = 0;
3004         while (idx < mem_descr_sg->num_elements) {
3005                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3006
3007                 for (i = 0;
3008                      i < (mem_descr_sg->mem_array[idx].size) /
3009                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3010                      i++) {
3011                         if (arr_index < phba->params.ios_per_ctrl)
3012                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3013                         else
3014                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3015                                                 phba->params.ios_per_ctrl];
3016                         psgl_handle->pfrag = pfrag;
3017                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3018                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3019                         pfrag += phba->params.num_sge_per_io;
3020                         psgl_handle->sgl_index =
3021                                 phba->fw_config.iscsi_icd_start + arr_index++;
3022                 }
3023                 idx++;
3024         }
3025         phba->io_sgl_free_index = 0;
3026         phba->io_sgl_alloc_index = 0;
3027         phba->eh_sgl_free_index = 0;
3028         phba->eh_sgl_alloc_index = 0;
3029         return 0;
3030 }
3031
3032 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3033 {
3034         int i, new_cid;
3035
3036         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3037                                   GFP_KERNEL);
3038         if (!phba->cid_array) {
3039                 shost_printk(KERN_ERR, phba->shost,
3040                              "Failed to allocate memory in "
3041                              "hba_setup_cid_tbls\n");
3042                 return -ENOMEM;
3043         }
3044         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3045                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3046         if (!phba->ep_array) {
3047                 shost_printk(KERN_ERR, phba->shost,
3048                              "Failed to allocate memory in "
3049                              "hba_setup_cid_tbls \n");
3050                 kfree(phba->cid_array);
3051                 return -ENOMEM;
3052         }
3053         new_cid = phba->fw_config.iscsi_cid_start;
3054         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3055                 phba->cid_array[i] = new_cid;
3056                 new_cid += 2;
3057         }
3058         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3059         return 0;
3060 }
3061
3062 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3063 {
3064         struct be_ctrl_info *ctrl = &phba->ctrl;
3065         struct hwi_controller *phwi_ctrlr;
3066         struct hwi_context_memory *phwi_context;
3067         struct be_queue_info *eq;
3068         u8 __iomem *addr;
3069         u32 reg, i;
3070         u32 enabled;
3071
3072         phwi_ctrlr = phba->phwi_ctrlr;
3073         phwi_context = phwi_ctrlr->phwi_ctxt;
3074
3075         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3076                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3077         reg = ioread32(addr);
3078         SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3079
3080         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3081         if (!enabled) {
3082                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3083                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3084                 iowrite32(reg, addr);
3085                 for (i = 0; i <= phba->num_cpus; i++) {
3086                         eq = &phwi_context->be_eq[i].q;
3087                         SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3088                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3089                 }
3090         } else
3091                 shost_printk(KERN_WARNING, phba->shost,
3092                              "In hwi_enable_intr, Not Enabled \n");
3093         return true;
3094 }
3095
3096 static void hwi_disable_intr(struct beiscsi_hba *phba)
3097 {
3098         struct be_ctrl_info *ctrl = &phba->ctrl;
3099
3100         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3101         u32 reg = ioread32(addr);
3102
3103         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3104         if (enabled) {
3105                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3106                 iowrite32(reg, addr);
3107         } else
3108                 shost_printk(KERN_WARNING, phba->shost,
3109                              "In hwi_disable_intr, Already Disabled \n");
3110 }
3111
3112 static int beiscsi_init_port(struct beiscsi_hba *phba)
3113 {
3114         int ret;
3115
3116         ret = beiscsi_init_controller(phba);
3117         if (ret < 0) {
3118                 shost_printk(KERN_ERR, phba->shost,
3119                              "beiscsi_dev_probe - Failed in"
3120                              "beiscsi_init_controller \n");
3121                 return ret;
3122         }
3123         ret = beiscsi_init_sgl_handle(phba);
3124         if (ret < 0) {
3125                 shost_printk(KERN_ERR, phba->shost,
3126                              "beiscsi_dev_probe - Failed in"
3127                              "beiscsi_init_sgl_handle \n");
3128                 goto do_cleanup_ctrlr;
3129         }
3130
3131         if (hba_setup_cid_tbls(phba)) {
3132                 shost_printk(KERN_ERR, phba->shost,
3133                              "Failed in hba_setup_cid_tbls\n");
3134                 if (ring_mode)
3135                         kfree(phba->sgl_hndl_array);
3136                 kfree(phba->io_sgl_hndl_base);
3137                 kfree(phba->eh_sgl_hndl_base);
3138                 goto do_cleanup_ctrlr;
3139         }
3140
3141         return ret;
3142
3143 do_cleanup_ctrlr:
3144         hwi_cleanup(phba);
3145         return ret;
3146 }
3147
3148 static void hwi_purge_eq(struct beiscsi_hba *phba)
3149 {
3150         struct hwi_controller *phwi_ctrlr;
3151         struct hwi_context_memory *phwi_context;
3152         struct be_queue_info *eq;
3153         struct be_eq_entry *eqe = NULL;
3154         int i, eq_msix;
3155
3156         phwi_ctrlr = phba->phwi_ctrlr;
3157         phwi_context = phwi_ctrlr->phwi_ctxt;
3158         if (phba->msix_enabled)
3159                 eq_msix = 1;
3160         else
3161                 eq_msix = 0;
3162
3163         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3164                 eq = &phwi_context->be_eq[i].q;
3165                 eqe = queue_tail_node(eq);
3166
3167                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3168                                         & EQE_VALID_MASK) {
3169                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3170                         queue_tail_inc(eq);
3171                         eqe = queue_tail_node(eq);
3172                 }
3173         }
3174 }
3175
3176 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3177 {
3178         unsigned char mgmt_status;
3179
3180         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3181         if (mgmt_status)
3182                 shost_printk(KERN_WARNING, phba->shost,
3183                              "mgmt_epfw_cleanup FAILED \n");
3184         hwi_cleanup(phba);
3185         hwi_purge_eq(phba);
3186         if (ring_mode)
3187                 kfree(phba->sgl_hndl_array);
3188         kfree(phba->io_sgl_hndl_base);
3189         kfree(phba->eh_sgl_hndl_base);
3190         kfree(phba->cid_array);
3191         kfree(phba->ep_array);
3192 }
3193
3194 void
3195 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3196                            struct beiscsi_offload_params *params)
3197 {
3198         struct wrb_handle *pwrb_handle;
3199         struct iscsi_target_context_update_wrb *pwrb = NULL;
3200         struct be_mem_descriptor *mem_descr;
3201         struct beiscsi_hba *phba = beiscsi_conn->phba;
3202         u32 doorbell = 0;
3203
3204         /*
3205          * We can always use 0 here because it is reserved by libiscsi for
3206          * login/startup related tasks.
3207          */
3208         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3209                                        phba->fw_config.iscsi_cid_start), 0);
3210         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3211         memset(pwrb, 0, sizeof(*pwrb));
3212         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3213                       max_burst_length, pwrb, params->dw[offsetof
3214                       (struct amap_beiscsi_offload_params,
3215                       max_burst_length) / 32]);
3216         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3217                       max_send_data_segment_length, pwrb,
3218                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3219                       max_send_data_segment_length) / 32]);
3220         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3221                       first_burst_length,
3222                       pwrb,
3223                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3224                       first_burst_length) / 32]);
3225
3226         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3227                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3228                       erl) / 32] & OFFLD_PARAMS_ERL));
3229         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3230                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3231                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3232         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3233                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3234                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3235         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3236                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3237                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3238         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3239                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3240                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3241         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3242                       pwrb,
3243                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3244                       exp_statsn) / 32] + 1));
3245         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3246                       0x7);
3247         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3248                       pwrb, pwrb_handle->wrb_index);
3249         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3250                       pwrb, pwrb_handle->nxt_wrb_index);
3251         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3252                         session_state, pwrb, 0);
3253         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3254                       pwrb, 1);
3255         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3256                       pwrb, 0);
3257         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3258                       0);
3259
3260         mem_descr = phba->init_mem;
3261         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3262
3263         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3264                         pad_buffer_addr_hi, pwrb,
3265                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3266         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3267                         pad_buffer_addr_lo, pwrb,
3268                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3269
3270         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3271
3272         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3273         if (!ring_mode)
3274                 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3275                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3276         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3277
3278         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3279 }
3280
3281 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3282                               int *index, int *age)
3283 {
3284         *index = (int)itt;
3285         if (age)
3286                 *age = conn->session->age;
3287 }
3288
3289 /**
3290  * beiscsi_alloc_pdu - allocates pdu and related resources
3291  * @task: libiscsi task
3292  * @opcode: opcode of pdu for task
3293  *
3294  * This is called with the session lock held. It will allocate
3295  * the wrb and sgl if needed for the command. And it will prep
3296  * the pdu's itt. beiscsi_parse_pdu will later translate
3297  * the pdu itt to the libiscsi task itt.
3298  */
3299 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3300 {
3301         struct beiscsi_io_task *io_task = task->dd_data;
3302         struct iscsi_conn *conn = task->conn;
3303         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3304         struct beiscsi_hba *phba = beiscsi_conn->phba;
3305         struct hwi_wrb_context *pwrb_context;
3306         struct hwi_controller *phwi_ctrlr;
3307         itt_t itt;
3308         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3309         dma_addr_t paddr;
3310
3311         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3312                                           GFP_KERNEL, &paddr);
3313         if (!io_task->cmd_bhs)
3314                 return -ENOMEM;
3315         io_task->bhs_pa.u.a64.address = paddr;
3316         io_task->libiscsi_itt = (itt_t)task->itt;
3317         io_task->pwrb_handle = alloc_wrb_handle(phba,
3318                                                 beiscsi_conn->beiscsi_conn_cid -
3319                                                 phba->fw_config.iscsi_cid_start,
3320                                                 task->itt);
3321         io_task->conn = beiscsi_conn;
3322
3323         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3324         task->hdr_max = sizeof(struct be_cmd_bhs);
3325
3326         if (task->sc) {
3327                 spin_lock(&phba->io_sgl_lock);
3328                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3329                 spin_unlock(&phba->io_sgl_lock);
3330                 if (!io_task->psgl_handle)
3331                         goto free_hndls;
3332         } else {
3333                 io_task->scsi_cmnd = NULL;
3334                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3335                         if (!beiscsi_conn->login_in_progress) {
3336                                 spin_lock(&phba->mgmt_sgl_lock);
3337                                 io_task->psgl_handle = (struct sgl_handle *)
3338                                                 alloc_mgmt_sgl_handle(phba);
3339                                 spin_unlock(&phba->mgmt_sgl_lock);
3340                                 if (!io_task->psgl_handle)
3341                                         goto free_hndls;
3342
3343                                 beiscsi_conn->login_in_progress = 1;
3344                                 beiscsi_conn->plogin_sgl_handle =
3345                                                         io_task->psgl_handle;
3346                         } else {
3347                                 io_task->psgl_handle =
3348                                                 beiscsi_conn->plogin_sgl_handle;
3349                         }
3350                 } else {
3351                         spin_lock(&phba->mgmt_sgl_lock);
3352                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3353                         spin_unlock(&phba->mgmt_sgl_lock);
3354                         if (!io_task->psgl_handle)
3355                                 goto free_hndls;
3356                 }
3357         }
3358         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3359                                  wrb_index << 16) | (unsigned int)
3360                                 (io_task->psgl_handle->sgl_index));
3361         if (ring_mode) {
3362                 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
3363                                      phba->fw_config.iscsi_icd_start] =
3364                                      io_task->psgl_handle;
3365                 io_task->psgl_handle->task = task;
3366                 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid  -
3367                                             phba->fw_config.iscsi_cid_start;
3368         } else
3369                 io_task->pwrb_handle->pio_handle = task;
3370
3371         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3372         return 0;
3373
3374 free_hndls:
3375         phwi_ctrlr = phba->phwi_ctrlr;
3376         pwrb_context = &phwi_ctrlr->wrb_context[
3377                         beiscsi_conn->beiscsi_conn_cid -
3378                         phba->fw_config.iscsi_cid_start];
3379         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3380         io_task->pwrb_handle = NULL;
3381         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3382                       io_task->bhs_pa.u.a64.address);
3383         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3384         return -ENOMEM;
3385 }
3386
3387 static void beiscsi_cleanup_task(struct iscsi_task *task)
3388 {
3389         struct beiscsi_io_task *io_task = task->dd_data;
3390         struct iscsi_conn *conn = task->conn;
3391         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3392         struct beiscsi_hba *phba = beiscsi_conn->phba;
3393         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3394         struct hwi_wrb_context *pwrb_context;
3395         struct hwi_controller *phwi_ctrlr;
3396
3397         phwi_ctrlr = phba->phwi_ctrlr;
3398         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3399                         - phba->fw_config.iscsi_cid_start];
3400         if (io_task->pwrb_handle) {
3401                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3402                 io_task->pwrb_handle = NULL;
3403         }
3404
3405         if (io_task->cmd_bhs) {
3406                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3407                               io_task->bhs_pa.u.a64.address);
3408         }
3409
3410         if (task->sc) {
3411                 if (io_task->psgl_handle) {
3412                         spin_lock(&phba->io_sgl_lock);
3413                         free_io_sgl_handle(phba, io_task->psgl_handle);
3414                         spin_unlock(&phba->io_sgl_lock);
3415                         io_task->psgl_handle = NULL;
3416                 }
3417         } else {
3418                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3419                         return;
3420                 if (io_task->psgl_handle) {
3421                         spin_lock(&phba->mgmt_sgl_lock);
3422                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3423                         spin_unlock(&phba->mgmt_sgl_lock);
3424                         io_task->psgl_handle = NULL;
3425                 }
3426         }
3427 }
3428
3429 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3430                           unsigned int num_sg, unsigned int xferlen,
3431                           unsigned int writedir)
3432 {
3433
3434         struct beiscsi_io_task *io_task = task->dd_data;
3435         struct iscsi_conn *conn = task->conn;
3436         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3437         struct beiscsi_hba *phba = beiscsi_conn->phba;
3438         struct iscsi_wrb *pwrb = NULL;
3439         unsigned int doorbell = 0;
3440
3441         pwrb = io_task->pwrb_handle->pwrb;
3442         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3443         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3444
3445         if (writedir) {
3446                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3447                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3448                               &io_task->cmd_bhs->iscsi_data_pdu,
3449                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3450                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3451                               &io_task->cmd_bhs->iscsi_data_pdu,
3452                               ISCSI_OPCODE_SCSI_DATA_OUT);
3453                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3454                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3455                 if (ring_mode)
3456                         io_task->psgl_handle->type = INI_WR_CMD;
3457                 else
3458                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3459                                       INI_WR_CMD);
3460                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3461         } else {
3462                 if (ring_mode)
3463                         io_task->psgl_handle->type = INI_RD_CMD;
3464                 else
3465                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3466                                       INI_RD_CMD);
3467                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3468         }
3469         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3470                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3471                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3472
3473         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3474                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3475                                   lun[0]));
3476         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3477         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3478                       io_task->pwrb_handle->wrb_index);
3479         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3480                       be32_to_cpu(task->cmdsn));
3481         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3482                       io_task->psgl_handle->sgl_index);
3483
3484         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3485
3486         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3487                       io_task->pwrb_handle->nxt_wrb_index);
3488         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3489
3490         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3491         if (!ring_mode)
3492                 doorbell |= (io_task->pwrb_handle->wrb_index &
3493                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3494         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3495
3496         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3497         return 0;
3498 }
3499
3500 static int beiscsi_mtask(struct iscsi_task *task)
3501 {
3502         struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3503         struct iscsi_conn *conn = task->conn;
3504         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3505         struct beiscsi_hba *phba = beiscsi_conn->phba;
3506         struct iscsi_session *session;
3507         struct iscsi_wrb *pwrb = NULL;
3508         struct hwi_controller *phwi_ctrlr;
3509         struct hwi_wrb_context *pwrb_context;
3510         struct wrb_handle *pwrb_handle;
3511         unsigned int doorbell = 0;
3512         unsigned int i, cid;
3513         struct iscsi_task *aborted_task;
3514
3515         cid = beiscsi_conn->beiscsi_conn_cid;
3516         pwrb = io_task->pwrb_handle->pwrb;
3517         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3518                       be32_to_cpu(task->cmdsn));
3519         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3520                       io_task->pwrb_handle->wrb_index);
3521         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3522                       io_task->psgl_handle->sgl_index);
3523
3524         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3525         case ISCSI_OP_LOGIN:
3526                 if (ring_mode)
3527                         io_task->psgl_handle->type = TGT_DM_CMD;
3528                 else
3529                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3530                                       TGT_DM_CMD);
3531                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3532                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3533                 hwi_write_buffer(pwrb, task);
3534                 break;
3535         case ISCSI_OP_NOOP_OUT:
3536                 if (ring_mode)
3537                         io_task->psgl_handle->type = INI_RD_CMD;
3538                 else
3539                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3540                                       INI_RD_CMD);
3541                 hwi_write_buffer(pwrb, task);
3542                 break;
3543         case ISCSI_OP_TEXT:
3544                 if (ring_mode)
3545                         io_task->psgl_handle->type = INI_WR_CMD;
3546                 else
3547                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3548                                       INI_WR_CMD);
3549                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3550                 hwi_write_buffer(pwrb, task);
3551                 break;
3552         case ISCSI_OP_SCSI_TMFUNC:
3553                 session = conn->session;
3554                 i = ((struct iscsi_tm *)task->hdr)->rtt;
3555                 phwi_ctrlr = phba->phwi_ctrlr;
3556                 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3557                                             phba->fw_config.iscsi_cid_start];
3558                 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3559                                                                 >> 16];
3560                 aborted_task = pwrb_handle->pio_handle;
3561                  if (!aborted_task)
3562                         return 0;
3563
3564                 aborted_io_task = aborted_task->dd_data;
3565                 if (!aborted_io_task->scsi_cmnd)
3566                         return 0;
3567
3568                 mgmt_invalidate_icds(phba,
3569                                      aborted_io_task->psgl_handle->sgl_index,
3570                                      cid);
3571                 if (ring_mode)
3572                         io_task->psgl_handle->type = INI_TMF_CMD;
3573                 else
3574                         AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3575                                       INI_TMF_CMD);
3576                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3577                 hwi_write_buffer(pwrb, task);
3578                 break;
3579         case ISCSI_OP_LOGOUT:
3580                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3581                 if (ring_mode)
3582                         io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3583                 else
3584                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3585                                 HWH_TYPE_LOGOUT);
3586                 hwi_write_buffer(pwrb, task);
3587                 break;
3588
3589         default:
3590                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3591                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3592                 return -EINVAL;
3593         }
3594
3595         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3596                       be32_to_cpu(task->data_count));
3597         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3598                       io_task->pwrb_handle->nxt_wrb_index);
3599         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3600
3601         doorbell |= cid & DB_WRB_POST_CID_MASK;
3602         if (!ring_mode)
3603                 doorbell |= (io_task->pwrb_handle->wrb_index &
3604                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3605         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3606         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3607         return 0;
3608 }
3609
3610 static int beiscsi_task_xmit(struct iscsi_task *task)
3611 {
3612         struct iscsi_conn *conn = task->conn;
3613         struct beiscsi_io_task *io_task = task->dd_data;
3614         struct scsi_cmnd *sc = task->sc;
3615         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3616         struct scatterlist *sg;
3617         int num_sg;
3618         unsigned int  writedir = 0, xferlen = 0;
3619
3620         SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3621                  "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3622                  task, conn, beiscsi_conn);
3623         if (!sc)
3624                 return beiscsi_mtask(task);
3625
3626         io_task->scsi_cmnd = sc;
3627         num_sg = scsi_dma_map(sc);
3628         if (num_sg < 0) {
3629                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3630                 return num_sg;
3631         }
3632         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3633                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3634         xferlen = scsi_bufflen(sc);
3635         sg = scsi_sglist(sc);
3636         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3637                 writedir = 1;
3638                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3639                          task->imm_count);
3640         } else
3641                 writedir = 0;
3642         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3643 }
3644
3645
3646 static void beiscsi_remove(struct pci_dev *pcidev)
3647 {
3648         struct beiscsi_hba *phba = NULL;
3649         struct hwi_controller *phwi_ctrlr;
3650         struct hwi_context_memory *phwi_context;
3651         struct be_eq_obj *pbe_eq;
3652         unsigned int i, msix_vec;
3653
3654         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3655         if (!phba) {
3656                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3657                 return;
3658         }
3659
3660         phwi_ctrlr = phba->phwi_ctrlr;
3661         phwi_context = phwi_ctrlr->phwi_ctxt;
3662         hwi_disable_intr(phba);
3663         if (phba->msix_enabled) {
3664                 for (i = 0; i <= phba->num_cpus; i++) {
3665                         msix_vec = phba->msix_entries[i].vector;
3666                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3667                 }
3668         } else
3669                 if (phba->pcidev->irq)
3670                         free_irq(phba->pcidev->irq, phba);
3671         pci_disable_msix(phba->pcidev);
3672         destroy_workqueue(phba->wq);
3673         if (blk_iopoll_enabled)
3674                 for (i = 0; i < phba->num_cpus; i++) {
3675                         pbe_eq = &phwi_context->be_eq[i];
3676                         blk_iopoll_disable(&pbe_eq->iopoll);
3677                 }
3678
3679         beiscsi_clean_port(phba);
3680         beiscsi_free_mem(phba);
3681         beiscsi_unmap_pci_function(phba);
3682         pci_free_consistent(phba->pcidev,
3683                             phba->ctrl.mbox_mem_alloced.size,
3684                             phba->ctrl.mbox_mem_alloced.va,
3685                             phba->ctrl.mbox_mem_alloced.dma);
3686         iscsi_host_remove(phba->shost);
3687         pci_dev_put(phba->pcidev);
3688         iscsi_host_free(phba->shost);
3689 }
3690
3691 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3692 {
3693         int i, status;
3694
3695         for (i = 0; i <= phba->num_cpus; i++)
3696                 phba->msix_entries[i].entry = i;
3697
3698         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3699                                  (phba->num_cpus + 1));
3700         if (!status)
3701                 phba->msix_enabled = true;
3702
3703         return;
3704 }
3705
3706 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3707                                 const struct pci_device_id *id)
3708 {
3709         struct beiscsi_hba *phba = NULL;
3710         struct hwi_controller *phwi_ctrlr;
3711         struct hwi_context_memory *phwi_context;
3712         struct be_eq_obj *pbe_eq;
3713         int ret, msix_vec, num_cpus, i;
3714
3715         ret = beiscsi_enable_pci(pcidev);
3716         if (ret < 0) {
3717                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3718                              "Failed to enable pci device \n");
3719                 return ret;
3720         }
3721
3722         phba = beiscsi_hba_alloc(pcidev);
3723         if (!phba) {
3724                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3725                         " Failed in beiscsi_hba_alloc \n");
3726                 goto disable_pci;
3727         }
3728         SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3729
3730         pci_set_drvdata(pcidev, phba);
3731         if (enable_msix)
3732                 num_cpus = find_num_cpus();
3733         else
3734                 num_cpus = 1;
3735         phba->num_cpus = num_cpus;
3736         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3737
3738         if (enable_msix)
3739                 beiscsi_msix_enable(phba);
3740         ret = be_ctrl_init(phba, pcidev);
3741         if (ret) {
3742                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3743                                 "Failed in be_ctrl_init\n");
3744                 goto hba_free;
3745         }
3746
3747         spin_lock_init(&phba->io_sgl_lock);
3748         spin_lock_init(&phba->mgmt_sgl_lock);
3749         spin_lock_init(&phba->isr_lock);
3750         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3751         if (ret != 0) {
3752                 shost_printk(KERN_ERR, phba->shost,
3753                              "Error getting fw config\n");
3754                 goto free_port;
3755         }
3756         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3757         phba->shost->can_queue = phba->params.ios_per_ctrl;
3758         beiscsi_get_params(phba);
3759         ret = beiscsi_init_port(phba);
3760         if (ret < 0) {
3761                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3762                              "Failed in beiscsi_init_port\n");
3763                 goto free_port;
3764         }
3765
3766         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3767                  phba->shost->host_no);
3768         phba->wq = create_workqueue(phba->wq_name);
3769         if (!phba->wq) {
3770                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3771                                 "Failed to allocate work queue\n");
3772                 goto free_twq;
3773         }
3774
3775         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3776
3777         phwi_ctrlr = phba->phwi_ctrlr;
3778         phwi_context = phwi_ctrlr->phwi_ctxt;
3779         if (blk_iopoll_enabled) {
3780                 for (i = 0; i < phba->num_cpus; i++) {
3781                         pbe_eq = &phwi_context->be_eq[i];
3782                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3783                                         be_iopoll);
3784                         blk_iopoll_enable(&pbe_eq->iopoll);
3785                 }
3786         }
3787         ret = beiscsi_init_irqs(phba);
3788         if (ret < 0) {
3789                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3790                              "Failed to beiscsi_init_irqs\n");
3791                 goto free_blkenbld;
3792         }
3793         ret = hwi_enable_intr(phba);
3794         if (ret < 0) {
3795                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3796                              "Failed to hwi_enable_intr\n");
3797                 goto free_ctrlr;
3798         }
3799         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3800         return 0;
3801
3802 free_ctrlr:
3803         if (phba->msix_enabled) {
3804                 for (i = 0; i <= phba->num_cpus; i++) {
3805                         msix_vec = phba->msix_entries[i].vector;
3806                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3807                 }
3808         } else
3809                 if (phba->pcidev->irq)
3810                         free_irq(phba->pcidev->irq, phba);
3811         pci_disable_msix(phba->pcidev);
3812 free_blkenbld:
3813         destroy_workqueue(phba->wq);
3814         if (blk_iopoll_enabled)
3815                 for (i = 0; i < phba->num_cpus; i++) {
3816                         pbe_eq = &phwi_context->be_eq[i];
3817                         blk_iopoll_disable(&pbe_eq->iopoll);
3818                 }
3819 free_twq:
3820         beiscsi_clean_port(phba);
3821         beiscsi_free_mem(phba);
3822 free_port:
3823         pci_free_consistent(phba->pcidev,
3824                             phba->ctrl.mbox_mem_alloced.size,
3825                             phba->ctrl.mbox_mem_alloced.va,
3826                            phba->ctrl.mbox_mem_alloced.dma);
3827         beiscsi_unmap_pci_function(phba);
3828 hba_free:
3829         iscsi_host_remove(phba->shost);
3830         pci_dev_put(phba->pcidev);
3831         iscsi_host_free(phba->shost);
3832 disable_pci:
3833         pci_disable_device(pcidev);
3834         return ret;
3835 }
3836
3837 struct iscsi_transport beiscsi_iscsi_transport = {
3838         .owner = THIS_MODULE,
3839         .name = DRV_NAME,
3840         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3841                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3842         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3843                 ISCSI_MAX_XMIT_DLENGTH |
3844                 ISCSI_HDRDGST_EN |
3845                 ISCSI_DATADGST_EN |
3846                 ISCSI_INITIAL_R2T_EN |
3847                 ISCSI_MAX_R2T |
3848                 ISCSI_IMM_DATA_EN |
3849                 ISCSI_FIRST_BURST |
3850                 ISCSI_MAX_BURST |
3851                 ISCSI_PDU_INORDER_EN |
3852                 ISCSI_DATASEQ_INORDER_EN |
3853                 ISCSI_ERL |
3854                 ISCSI_CONN_PORT |
3855                 ISCSI_CONN_ADDRESS |
3856                 ISCSI_EXP_STATSN |
3857                 ISCSI_PERSISTENT_PORT |
3858                 ISCSI_PERSISTENT_ADDRESS |
3859                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3860                 ISCSI_USERNAME | ISCSI_PASSWORD |
3861                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3862                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3863                 ISCSI_LU_RESET_TMO |
3864                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3865                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3866         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3867                                 ISCSI_HOST_INITIATOR_NAME,
3868         .create_session = beiscsi_session_create,
3869         .destroy_session = beiscsi_session_destroy,
3870         .create_conn = beiscsi_conn_create,
3871         .bind_conn = beiscsi_conn_bind,
3872         .destroy_conn = iscsi_conn_teardown,
3873         .set_param = beiscsi_set_param,
3874         .get_conn_param = beiscsi_conn_get_param,
3875         .get_session_param = iscsi_session_get_param,
3876         .get_host_param = beiscsi_get_host_param,
3877         .start_conn = beiscsi_conn_start,
3878         .stop_conn = beiscsi_conn_stop,
3879         .send_pdu = iscsi_conn_send_pdu,
3880         .xmit_task = beiscsi_task_xmit,
3881         .cleanup_task = beiscsi_cleanup_task,
3882         .alloc_pdu = beiscsi_alloc_pdu,
3883         .parse_pdu_itt = beiscsi_parse_pdu,
3884         .get_stats = beiscsi_conn_get_stats,
3885         .ep_connect = beiscsi_ep_connect,
3886         .ep_poll = beiscsi_ep_poll,
3887         .ep_disconnect = beiscsi_ep_disconnect,
3888         .session_recovery_timedout = iscsi_session_recovery_timedout,
3889 };
3890
3891 static struct pci_driver beiscsi_pci_driver = {
3892         .name = DRV_NAME,
3893         .probe = beiscsi_dev_probe,
3894         .remove = beiscsi_remove,
3895         .id_table = beiscsi_pci_id_table
3896 };
3897
3898
3899 static int __init beiscsi_module_init(void)
3900 {
3901         int ret;
3902
3903         beiscsi_scsi_transport =
3904                         iscsi_register_transport(&beiscsi_iscsi_transport);
3905         if (!beiscsi_scsi_transport) {
3906                 SE_DEBUG(DBG_LVL_1,
3907                          "beiscsi_module_init - Unable to  register beiscsi"
3908                          "transport.\n");
3909                 ret = -ENOMEM;
3910         }
3911         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3912                  &beiscsi_iscsi_transport);
3913
3914         ret = pci_register_driver(&beiscsi_pci_driver);
3915         if (ret) {
3916                 SE_DEBUG(DBG_LVL_1,
3917                          "beiscsi_module_init - Unable to  register"
3918                          "beiscsi pci driver.\n");
3919                 goto unregister_iscsi_transport;
3920         }
3921         ring_mode = 0;
3922         return 0;
3923
3924 unregister_iscsi_transport:
3925         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3926         return ret;
3927 }
3928
3929 static void __exit beiscsi_module_exit(void)
3930 {
3931         pci_unregister_driver(&beiscsi_pci_driver);
3932         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3933 }
3934
3935 module_init(beiscsi_module_init);
3936 module_exit(beiscsi_module_exit);