Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
Linus Torvalds [Sat, 28 May 2011 02:52:57 +0000 (19:52 -0700)]
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (60 commits)
  [SCSI] lpfc 8.3.24: Extend BSG infrastructure and add link diagnostics
  [SCSI] lpfc 8.3.24: Add resource extent support
  [SCSI] lpfc 8.3.24: Add request-firmware support
  [SCSI] lpfc 8.3.24: Add SR-IOV control
  [SCSI] lpfc 8.3.24: Extended hardware support and support dump images
  [SCSI] lpfc 8.3.24: Miscellaneous Fixes and Corrections
  [SCSI] libsas: Add option for SATA soft reset
  [SCSI] libsas: check dev->gone before submitting sata i/o
  [SCSI] libsas: fix/amend device gone notification in sas_deform_port()
  [SCSI] MAINTAINERS update for SCSI (new email address)
  [SCSI] Fix Ultrastor asm snippet
  [SCSI] osst: fix warning
  [SCSI] osst: wrong index used in inner loop
  [SCSI] aic94xx: world-writable sysfs update_bios file
  [SCSI] MAINTAINERS: Add drivers/target/ entry
  [SCSI] target: Convert TASK_ATTR to scsi_tcq.h definitions
  [SCSI] target: Convert REPORT_LUNs to use int_to_scsilun
  [SCSI] target: Fix task->task_execute_queue=1 clear bug + LUN_RESET OOPs
  [SCSI] target: Fix bug with task_sg chained transport_free_dev_tasks release
  [SCSI] target: Fix interrupt context bug with stats_lock and core_tmr_alloc_req
  ...

79 files changed:
Documentation/scsi/ChangeLog.megaraid_sas
MAINTAINERS
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/bfa/bfa_ioc_cb.c
drivers/scsi/bfa/bfa_ioc_ct.c
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/ipr.c
drivers/scsi/libfc/fc_disc.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_libfc.h
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_phy.c
drivers/scsi/libsas/sas_port.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_bsg.h
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/osst.c
drivers/scsi/qla4xxx/Makefile
drivers/scsi/qla4xxx/ql4_attr.c [new file with mode: 0644]
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_error.c
drivers/scsi/scsi_trace.c
drivers/scsi/sd.c
drivers/scsi/ultrastor.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
include/scsi/libsas.h
include/scsi/scsi_tcq.h
include/target/target_core_base.h
include/target/target_core_fabric_ops.h
include/target/target_core_transport.h

index 4d9ce73..9ed1d9d 100644 (file)
@@ -1,3 +1,17 @@
+Release Date    : Wed. May 11, 2011 17:00:00 PST 2010 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Adam Radford
+Current Version : 00.00.05.38-rc1
+Old Version     : 00.00.05.34-rc1
+    1. Remove MSI-X black list, use MFI_REG_STATE.ready.msiEnable.
+    2. Remove un-used function megasas_return_cmd_for_smid().
+    3. Check MFI_REG_STATE.fault.resetAdapter in megasas_reset_fusion().
+    4. Disable interrupts/free_irq() in megasas_shutdown().
+    5. Fix bug where AENs could be lost in probe() and resume().
+    6. Convert 6,10,12 byte CDB's to 16 byte CDB for large LBA's for FastPath
+       IO.
+    7. Add 1078 OCR support.
+-------------------------------------------------------------------------------
 Release Date    : Thu. Feb 24, 2011 17:00:00 PST 2010 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Adam Radford
index a33b115..b9f5aee 100644 (file)
@@ -5515,7 +5515,7 @@ F:        drivers/scsi/sg.c
 F:     include/scsi/sg.h
 
 SCSI SUBSYSTEM
-M:     "James E.J. Bottomley" <James.Bottomley@suse.de>
+M:     "James E.J. Bottomley" <JBottomley@parallels.com>
 L:     linux-scsi@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
@@ -6084,6 +6084,17 @@ F:       Documentation/filesystems/sysv-fs.txt
 F:     fs/sysv/
 F:     include/linux/sysv_fs.h
 
+TARGET SUBSYSTEM
+M:     Nicholas A. Bellinger <nab@linux-iscsi.org>
+L:     linux-scsi@vger.kernel.org
+L:     http://groups.google.com/group/linux-iscsi-target-dev
+W:     http://www.linux-iscsi.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
+S:     Supported
+F:     drivers/target/
+F:     include/target/
+F:     Documentation/target/
+
 TASKSTATS STATISTICS INTERFACE
 M:     Balbir Singh <balbir@linux.vnet.ibm.com>
 S:     Maintained
index 3b7e83d..d5ff142 100644 (file)
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
                        flash_error_table[i].reason);
 }
 
-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
        asd_show_update_bios, asd_store_update_bios);
 
 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
index c1f72c4..6c7e033 100644 (file)
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
 #define bfa_ioc_notify_fail(__ioc)              \
                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc)               \
+                       ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
 #define bfa_ioc_sync_join(__ioc)                \
                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
 #define bfa_ioc_sync_leave(__ioc)               \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
-                       if (bfa_ioc_sync_complete(ioc)) {
+                       if (bfa_ioc_sync_start(ioc)) {
                                iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
index ec9cf08..c85182a 100644 (file)
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
                                        bfa_boolean_t msix);
        void            (*ioc_notify_fail)      (struct bfa_ioc_s *ioc);
        void            (*ioc_ownership_reset)  (struct bfa_ioc_s *ioc);
+       bfa_boolean_t   (*ioc_sync_start)       (struct bfa_ioc_s *ioc);
        void            (*ioc_sync_join)        (struct bfa_ioc_s *ioc);
        void            (*ioc_sync_leave)       (struct bfa_ioc_s *ioc);
        void            (*ioc_sync_ack)         (struct bfa_ioc_s *ioc);
index e4a0713..89ae4c8 100644 (file)
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
 static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
        hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
        hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
        hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+       hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
        hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
        hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
        hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
 }
 
 /*
+ * Synchronized IOC failure processing routines
+ */
+static bfa_boolean_t
+bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
+{
+       return bfa_ioc_cb_sync_complete(ioc);
+}
+
+/*
  * Cleanup hw semaphore and usecnt registers
  */
 static void
index 008d129..9361252 100644 (file)
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
        hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
        hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
        hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+       hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
        hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
        hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
        hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
        writel(1, ioc->ioc_regs.ioc_sem_reg);
 }
 
+static bfa_boolean_t
+bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
+{
+       uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+       uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+       /*
+        * Driver load time.  If the sync required bit for this PCI fn
+        * is set, it is due to an unclean exit by the driver for this
+        * PCI fn in the previous incarnation. Whoever comes here first
+        * should clean it up, no matter which PCI fn.
+        */
+
+       if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+               writel(0, ioc->ioc_regs.ioc_fail_sync);
+               writel(1, ioc->ioc_regs.ioc_usage_reg);
+               writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+               writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+               return BFA_TRUE;
+       }
+
+       return bfa_ioc_ct_sync_complete(ioc);
+}
+
 /*
  * Synchronized IOC failure processing routines
  */
index cfd5902..6bdd25a 100644 (file)
 #define BD_SPLIT_SIZE                  32768
 
 /* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
-#define BNX2I_SQ_WQES_MIN              16
-#define BNX2I_570X_SQ_WQES_MAX                 128
-#define BNX2I_5770X_SQ_WQES_MAX        512
-#define BNX2I_570X_SQ_WQES_DEFAULT     128
-#define BNX2I_5770X_SQ_WQES_DEFAULT    256
+#define BNX2I_SQ_WQES_MIN              16
+#define BNX2I_570X_SQ_WQES_MAX         128
+#define BNX2I_5770X_SQ_WQES_MAX                512
+#define BNX2I_570X_SQ_WQES_DEFAULT     128
+#define BNX2I_5770X_SQ_WQES_DEFAULT    128
 
 #define BNX2I_570X_CQ_WQES_MAX                 128
 #define BNX2I_5770X_CQ_WQES_MAX        512
 #define BNX2X_MAX_CQS                  8
 
 #define CNIC_ARM_CQE                   1
+#define CNIC_ARM_CQE_FP                        2
 #define CNIC_DISARM_CQE                        0
 
 #define REG_RD(__hba, offset)                          \
@@ -666,7 +667,9 @@ enum {
  *                      after HBA reset is completed by bnx2i/cnic/bnx2
  *                      modules
  * @state:              tracks offload connection state machine
- * @teardown_mode:      indicates if conn teardown is abortive or orderly
+ * @timestamp:          tracks the start time when the ep begins to connect
+ * @num_active_cmds:    tracks the number of outstanding commands for this ep
+ * @ec_shift:           the amount of shift as part of the event coal calc
  * @qp:                 QP information
  * @ids:                contains chip allocated *context id* & driver assigned
  *                      *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
        u32 state;
        unsigned long timestamp;
        int num_active_cmds;
+       u32 ec_shift;
 
        struct qp_info qp;
        struct ep_handles ids;
index f0b8951..5c54a2d 100644 (file)
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
        u16 next_index;
        u32 num_active_cmds;
 
-
        /* Coalesce CQ entries only on 10G devices */
        if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
                return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
         * interrupts and other unwanted results
         */
        cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
-       if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
-               return;
 
-       if (action == CNIC_ARM_CQE) {
+       if (action != CNIC_ARM_CQE_FP)
+               if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+                       return;
+
+       if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
                num_active_cmds = ep->num_active_cmds;
                if (num_active_cmds <= event_coal_min)
                        next_index = 1;
                else
                        next_index = event_coal_min +
-                               (num_active_cmds - event_coal_min) / event_coal_div;
+                                    ((num_active_cmds - event_coal_min) >>
+                                    ep->ec_shift);
                if (!next_index)
                        next_index = 1;
                cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
        iscsi_init.dummy_buffer_addr_hi =
                (u32) ((u64) hba->dummy_buf_dma >> 32);
 
+       hba->num_ccell = hba->max_sqes >> 1;
        hba->ctx_ccell_tasks =
                        ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
        iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
                        qp->cq_cons_idx++;
                }
        }
-       bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
 }
 
 /**
@@ -1948,22 +1950,23 @@ cqe_out:
 static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
                                        struct iscsi_kcqe *new_cqe_kcqe)
 {
-       struct bnx2i_conn *conn;
+       struct bnx2i_conn *bnx2i_conn;
        u32 iscsi_cid;
 
        iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
-       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+       bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
 
-       if (!conn) {
+       if (!bnx2i_conn) {
                printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
                return;
        }
-       if (!conn->ep) {
+       if (!bnx2i_conn->ep) {
                printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
                return;
        }
-
-       bnx2i_process_new_cqes(conn);
+       bnx2i_process_new_cqes(bnx2i_conn);
+       bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
+       bnx2i_process_new_cqes(bnx2i_conn);
 }
 
 
index 1d24a28..6adbdc3 100644 (file)
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
        wait_event_interruptible_timeout(hba->eh_wait,
                                         (list_empty(&hba->ep_ofld_list) &&
                                         list_empty(&hba->ep_destroy_list)),
-                                        10 * HZ);
+                                        2 * HZ);
        /* Wait for all endpoints to be torn down, Chip will be reset once
         *  control returns to network driver. So it is required to cleanup and
         * release all connection resources before returning from this routine.
index 1809f9c..041928b 100644 (file)
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
 {
        struct iscsi_endpoint *ep;
        struct bnx2i_endpoint *bnx2i_ep;
+       u32 ec_div;
 
        ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
        if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
        bnx2i_ep->ep_iscsi_cid = (u16) -1;
        bnx2i_ep->hba = hba;
        bnx2i_ep->hba_age = hba->age;
+
+       ec_div = event_coal_div;
+       while (ec_div >>= 1)
+               bnx2i_ep->ec_shift += 1;
+
        hba->ofld_conns_active++;
        init_waitqueue_head(&bnx2i_ep->ofld_wait);
        return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
        mutex_init(&hba->net_dev_lock);
        init_waitqueue_head(&hba->eh_wait);
        if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
-               hba->hba_shutdown_tmo = 20 * HZ;
+               hba->hba_shutdown_tmo = 30 * HZ;
                hba->conn_teardown_tmo = 20 * HZ;
                hba->conn_ctx_destroy_tmo = 6 * HZ;
        } else {        /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
        struct bnx2i_cmd *cmd = task->dd_data;
        struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
 
+       if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
+               return -ENOMEM;
+
        /*
         * If there is no scsi_cmnd this must be a mgmt task
         */
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
        .change_queue_depth     = iscsi_change_queue_depth,
        .can_queue              = 1024,
        .max_sectors            = 127,
-       .cmd_per_lun            = 32,
+       .cmd_per_lun            = 24,
        .this_id                = -1,
        .use_clustering         = ENABLE_CLUSTERING,
        .sg_tablesize           = ISCSI_MAX_BDS_PER_CMD,
index cc23bd9..155d7b9 100644 (file)
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
 static int fcoe_vport_disable(struct fc_vport *, bool disable);
 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static int fcoe_validate_vport_create(struct fc_vport *);
 
 static struct libfc_function_template fcoe_libfc_fcn_templ = {
        .frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
        struct fcoe_interface *fcoe = port->priv;
        struct net_device *netdev = fcoe->netdev;
        struct fc_lport *vn_port;
+       int rc;
+       char buf[32];
+
+       rc = fcoe_validate_vport_create(vport);
+       if (rc) {
+               wwn_to_str(vport->port_name, buf, sizeof(buf));
+               printk(KERN_ERR "fcoe: Failed to create vport, "
+                       "WWPN (0x%s) already exists\n",
+                       buf);
+               return rc;
+       }
 
        mutex_lock(&fcoe_config_mutex);
        vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
        if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
                fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
 }
+
+/**
+ * fcoe_validate_vport_create() - Validate a vport before creating it
+ * @vport: NPIV port to be created
+ *
+ * This routine is meant to add validation for a vport before creating it
+ * via fcoe_vport_create().
+ * Current validations are:
+ *      - WWPN supplied is unique for given lport
+ *
+ *
+*/
+static int fcoe_validate_vport_create(struct fc_vport *vport)
+{
+       struct Scsi_Host *shost = vport_to_shost(vport);
+       struct fc_lport *n_port = shost_priv(shost);
+       struct fc_lport *vn_port;
+       int rc = 0;
+       char buf[32];
+
+       mutex_lock(&n_port->lp_mutex);
+
+       wwn_to_str(vport->port_name, buf, sizeof(buf));
+       /* Check if the wwpn is not same as that of the lport */
+       if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+               FCOE_DBG("vport WWPN 0x%s is same as that of the "
+                       "base port WWPN\n", buf);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Check if there is any existing vport with same wwpn */
+       list_for_each_entry(vn_port, &n_port->vports, list) {
+               if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+                       FCOE_DBG("vport with given WWPN 0x%s already "
+                       "exists\n", buf);
+                       rc = -EINVAL;
+                       break;
+               }
+       }
+
+out:
+       mutex_unlock(&n_port->lp_mutex);
+
+       return rc;
+}
index 408a6fd..c4a9399 100644 (file)
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
                        ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
 }
 
+static inline void wwn_to_str(u64 wwn, char *buf, int len)
+{
+       u8 wwpn[8];
+
+       u64_to_wwn(wwn, wwpn);
+       snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+               wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+               wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+}
+
 #endif /* _FCOE_H_ */
index 229e4af..c74c4b8 100644 (file)
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
        struct fc_lport *lport = fip->lp;
        struct fc_lport *vn_port = NULL;
        u32 desc_mask;
-       int is_vn_port = 0;
+       int num_vlink_desc;
+       int reset_phys_port = 0;
+       struct fip_vn_desc **vlink_desc_arr = NULL;
 
        LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
 
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
        /*
         * mask of required descriptors.  Validating each one clears its bit.
         */
-       desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
+       desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
 
        rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
        desc = (struct fip_desc *)(fh + 1);
+
+       /*
+        * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
+        * before determining max Vx_Port descriptor but a buggy FCF could have
+        * omited either or both MAC Address and Name Identifier descriptors
+        */
+       num_vlink_desc = rlen / sizeof(*vp);
+       if (num_vlink_desc)
+               vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
+                                        GFP_ATOMIC);
+       if (!vlink_desc_arr)
+               return;
+       num_vlink_desc = 0;
+
        while (rlen >= sizeof(*desc)) {
                dlen = desc->fip_dlen * FIP_BPW;
                if (dlen > rlen)
-                       return;
+                       goto err;
                /* Drop CVL if there are duplicate critical descriptors */
                if ((desc->fip_dtype < 32) &&
+                   (desc->fip_dtype != FIP_DT_VN_ID) &&
                    !(desc_mask & 1U << desc->fip_dtype)) {
                        LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
                                        "Descriptors in FIP CVL\n");
-                       return;
+                       goto err;
                }
                switch (desc->fip_dtype) {
                case FIP_DT_MAC:
                        mp = (struct fip_mac_desc *)desc;
                        if (dlen < sizeof(*mp))
-                               return;
+                               goto err;
                        if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
-                               return;
+                               goto err;
                        desc_mask &= ~BIT(FIP_DT_MAC);
                        break;
                case FIP_DT_NAME:
                        wp = (struct fip_wwn_desc *)desc;
                        if (dlen < sizeof(*wp))
-                               return;
+                               goto err;
                        if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
-                               return;
+                               goto err;
                        desc_mask &= ~BIT(FIP_DT_NAME);
                        break;
                case FIP_DT_VN_ID:
                        vp = (struct fip_vn_desc *)desc;
                        if (dlen < sizeof(*vp))
-                               return;
-                       if (compare_ether_addr(vp->fd_mac,
-                                              fip->get_src_addr(lport)) == 0 &&
-                           get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
-                           ntoh24(vp->fd_fc_id) == lport->port_id) {
-                               desc_mask &= ~BIT(FIP_DT_VN_ID);
-                               break;
+                               goto err;
+                       vlink_desc_arr[num_vlink_desc++] = vp;
+                       vn_port = fc_vport_id_lookup(lport,
+                                                     ntoh24(vp->fd_fc_id));
+                       if (vn_port && (vn_port == lport)) {
+                               mutex_lock(&fip->ctlr_mutex);
+                               per_cpu_ptr(lport->dev_stats,
+                                           get_cpu())->VLinkFailureCount++;
+                               put_cpu();
+                               fcoe_ctlr_reset(fip);
+                               mutex_unlock(&fip->ctlr_mutex);
                        }
-                       /* check if clr_vlink is for NPIV port */
-                       mutex_lock(&lport->lp_mutex);
-                       list_for_each_entry(vn_port, &lport->vports, list) {
-                               if (compare_ether_addr(vp->fd_mac,
-                                   fip->get_src_addr(vn_port)) == 0 &&
-                                   (get_unaligned_be64(&vp->fd_wwpn)
-                                                       == vn_port->wwpn) &&
-                                   (ntoh24(vp->fd_fc_id) ==
-                                           fc_host_port_id(vn_port->host))) {
-                                       desc_mask &= ~BIT(FIP_DT_VN_ID);
-                                       is_vn_port = 1;
-                                       break;
-                               }
-                       }
-                       mutex_unlock(&lport->lp_mutex);
-
                        break;
                default:
                        /* standard says ignore unknown descriptors >= 128 */
                        if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
-                               return;
+                               goto err;
                        break;
                }
                desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
        /*
         * reset only if all required descriptors were present and valid.
         */
-       if (desc_mask) {
+       if (desc_mask)
                LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
                                desc_mask);
+       else if (!num_vlink_desc) {
+               LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
+               /*
+                * No Vx_Port description. Clear all NPIV ports,
+                * followed by physical port
+                */
+               mutex_lock(&lport->lp_mutex);
+               list_for_each_entry(vn_port, &lport->vports, list)
+                       fc_lport_reset(vn_port);
+               mutex_unlock(&lport->lp_mutex);
+
+               mutex_lock(&fip->ctlr_mutex);
+               per_cpu_ptr(lport->dev_stats,
+                           get_cpu())->VLinkFailureCount++;
+               put_cpu();
+               fcoe_ctlr_reset(fip);
+               mutex_unlock(&fip->ctlr_mutex);
+
+               fc_lport_reset(fip->lp);
+               fcoe_ctlr_solicit(fip, NULL);
        } else {
-               LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+               int i;
 
-               if (is_vn_port)
-                       fc_lport_reset(vn_port);
-               else {
-                       mutex_lock(&fip->ctlr_mutex);
-                       per_cpu_ptr(lport->dev_stats,
-                                   get_cpu())->VLinkFailureCount++;
-                       put_cpu();
-                       fcoe_ctlr_reset(fip);
-                       mutex_unlock(&fip->ctlr_mutex);
+               LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+               for (i = 0; i < num_vlink_desc; i++) {
+                       vp = vlink_desc_arr[i];
+                       vn_port = fc_vport_id_lookup(lport,
+                                                    ntoh24(vp->fd_fc_id));
+                       if (!vn_port)
+                               continue;
+
+                       /*
+                        * 'port_id' is already validated, check MAC address and
+                        * wwpn
+                        */
+                       if (compare_ether_addr(fip->get_src_addr(vn_port),
+                                               vp->fd_mac) != 0 ||
+                               get_unaligned_be64(&vp->fd_wwpn) !=
+                                                       vn_port->wwpn)
+                               continue;
+
+                       if (vn_port == lport)
+                               /*
+                                * Physical port, defer processing till all
+                                * listed NPIV ports are cleared
+                                */
+                               reset_phys_port = 1;
+                       else    /* NPIV port */
+                               fc_lport_reset(vn_port);
+               }
 
+               if (reset_phys_port) {
                        fc_lport_reset(fip->lp);
                        fcoe_ctlr_solicit(fip, NULL);
                }
        }
+
+err:
+       kfree(vlink_desc_arr);
 }
 
 /**
index f81f77c..41068e8 100644 (file)
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
        struct fcoe_transport *ft = NULL;
        enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
 
-#ifdef CONFIG_LIBFCOE_MODULE
-       /*
-        * Make sure the module has been initialized, and is not about to be
-        * removed.  Module parameter sysfs files are writable before the
-        * module_init function is called and after module_exit.
-        */
-       if (THIS_MODULE->state != MODULE_STATE_LIVE)
-               goto out_nodev;
-#endif
-
        mutex_lock(&ft_mutex);
 
        netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
        struct net_device *netdev = NULL;
        struct fcoe_transport *ft = NULL;
 
-#ifdef CONFIG_LIBFCOE_MODULE
-       /*
-        * Make sure the module has been initialized, and is not about to be
-        * removed.  Module parameter sysfs files are writable before the
-        * module_init function is called and after module_exit.
-        */
-       if (THIS_MODULE->state != MODULE_STATE_LIVE)
-               goto out_nodev;
-#endif
-
        mutex_lock(&ft_mutex);
 
        netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
        struct net_device *netdev = NULL;
        struct fcoe_transport *ft = NULL;
 
-#ifdef CONFIG_LIBFCOE_MODULE
-       /*
-        * Make sure the module has been initialized, and is not about to be
-        * removed.  Module parameter sysfs files are writable before the
-        * module_init function is called and after module_exit.
-        */
-       if (THIS_MODULE->state != MODULE_STATE_LIVE)
-               goto out_nodev;
-#endif
-
        mutex_lock(&ft_mutex);
 
        netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
        struct net_device *netdev = NULL;
        struct fcoe_transport *ft = NULL;
 
-#ifdef CONFIG_LIBFCOE_MODULE
-       /*
-        * Make sure the module has been initialized, and is not about to be
-        * removed.  Module parameter sysfs files are writable before the
-        * module_init function is called and after module_exit.
-        */
-       if (THIS_MODULE->state != MODULE_STATE_LIVE)
-               goto out_nodev;
-#endif
-
        mutex_lock(&ft_mutex);
 
        netdev = fcoe_if_to_netdev(buffer);
index 12868ca..888086c 100644 (file)
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
 
                if (ipr_cmd != NULL) {
                        /* Clear the PCI interrupt */
+                       num_hrrq = 0;
                        do {
                                writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
                        } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
                                        num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
 
-                       if (int_reg & IPR_PCII_HRRQ_UPDATED) {
-                               ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
-                               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                               return IRQ_HANDLED;
-                       }
-
                } else if (rc == IRQ_NONE && irq_none == 0) {
                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
                        irq_none++;
+               } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
+                          int_reg & IPR_PCII_HRRQ_UPDATED) {
+                       ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       return IRQ_HANDLED;
                } else
                        break;
        }
index 911b273..b9cb814 100644 (file)
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
        default:
                FC_DISC_DBG(disc, "Received an unsupported request, "
                            "the opcode is (%x)\n", op);
+               fc_frame_free(fp);
                break;
        }
 }
index 77035a7..3b8a645 100644 (file)
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
            (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
            (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
                spin_lock_bh(&ep->ex_lock);
+               resp = ep->resp;
                rc = fc_exch_done_locked(ep);
                WARN_ON(fc_seq_exch(sp) != ep);
                spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
        spin_unlock_bh(&ep->ex_lock);
        return sp;
 err:
+       fc_fcp_ddp_done(fr_fsp(fp));
        rc = fc_exch_done_locked(ep);
        spin_unlock_bh(&ep->ex_lock);
        if (!rc)
index 2a3a472..9cd2149 100644 (file)
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
  *                    DDP related resources for a fcp_pkt
  * @fsp: The FCP packet that DDP had been used on
  */
-static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
 {
        struct fc_lport *lport;
 
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
                error = lport->tt.seq_send(lport, seq, fp);
                if (error) {
                        WARN_ON(1);             /* send error should be rare */
-                       fc_fcp_retry_cmd(fsp);
-                       return 0;
+                       return error;
                }
                fp = NULL;
        }
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
                       FC_FCTL_REQ, 0);
 
        rec_tov = get_fsp_rec_tov(fsp);
-       seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
+       seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
+                                     fc_fcp_pkt_destroy,
                                      fsp, jiffies_to_msecs(rec_tov));
        if (!seq)
                goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
                return;
        }
 
-       fsp->recov_seq = NULL;
        switch (fc_frame_payload_op(fp)) {
        case ELS_LS_ACC:
                fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
                break;
        }
        fc_fcp_unlock_pkt(fsp);
-       fsp->lp->tt.exch_done(seq);
 out:
+       fsp->lp->tt.exch_done(seq);
        fc_frame_free(fp);
-       fc_fcp_pkt_release(fsp);        /* drop hold for outstanding SRR */
 }
 
 /**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
        if (fc_fcp_lock_pkt(fsp))
                goto out;
-       fsp->lp->tt.exch_done(fsp->recov_seq);
-       fsp->recov_seq = NULL;
        switch (PTR_ERR(fp)) {
        case -FC_EX_TIMEOUT:
                if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
        }
        fc_fcp_unlock_pkt(fsp);
 out:
-       fc_fcp_pkt_release(fsp);        /* drop hold for outstanding SRR */
+       fsp->lp->tt.exch_done(fsp->recov_seq);
 }
 
 /**
index fedc819..c7d0712 100644 (file)
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init;   /* FCP initiator provider */
  * Set up direct-data placement for this I/O request
  */
 void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
 
 /*
  * Module setup functions
index 31fc21f..db9238f 100644 (file)
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
        struct sas_ha_struct *sas_ha;
        enum ata_completion_errors ac;
        unsigned long flags;
+       struct ata_link *link;
 
        if (!qc)
                goto qc_already_gone;
 
        dev = qc->ap->private_data;
        sas_ha = dev->port->ha;
+       link = &dev->sata_dev.ap->link;
 
        spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
        if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
            ((stat->stat == SAM_STAT_CHECK_CONDITION &&
              dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
                ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
-               qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+
+               if (!link->sactive) {
+                       qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+               } else {
+                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       if (unlikely(link->eh_info.err_mask))
+                               qc->flags |= ATA_QCFLAG_FAILED;
+               }
+
                dev->sata_dev.sstatus = resp->sstatus;
                dev->sata_dev.serror = resp->serror;
                dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
                        SAS_DPRINTK("%s: SAS error %x\n", __func__,
                                    stat->stat);
                        /* We saw a SAS error. Send a vague error. */
-                       qc->err_mask = ac;
+                       if (!link->sactive) {
+                               qc->err_mask = ac;
+                       } else {
+                               link->eh_info.err_mask |= AC_ERR_DEV;
+                               qc->flags |= ATA_QCFLAG_FAILED;
+                       }
+
                        dev->sata_dev.tf.feature = 0x04; /* status err */
                        dev->sata_dev.tf.command = ATA_ERR;
                }
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
        return ret;
 }
 
+static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
+                              unsigned long deadline)
+{
+       struct ata_port *ap = link->ap;
+       struct domain_device *dev = ap->private_data;
+       struct sas_internal *i =
+               to_sas_internal(dev->port->ha->core.shost->transportt);
+       int res = TMF_RESP_FUNC_FAILED;
+       int ret = 0;
+
+       if (i->dft->lldd_ata_soft_reset)
+               res = i->dft->lldd_ata_soft_reset(dev);
+
+       if (res != TMF_RESP_FUNC_COMPLETE) {
+               SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
+               ret = -EAGAIN;
+       }
+
+       switch (dev->sata_dev.command_set) {
+       case ATA_COMMAND_SET:
+               SAS_DPRINTK("%s: Found ATA device.\n", __func__);
+               *class = ATA_DEV_ATA;
+               break;
+       case ATAPI_COMMAND_SET:
+               SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
+               *class = ATA_DEV_ATAPI;
+               break;
+       default:
+               SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
+                           __func__, dev->sata_dev.command_set);
+               *class = ATA_DEV_UNKNOWN;
+               break;
+       }
+
+       ap->cbl = ATA_CBL_SATA;
+       return ret;
+}
+
 static void sas_ata_post_internal(struct ata_queued_cmd *qc)
 {
        if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
 
 static struct ata_port_operations sas_sata_ops = {
        .prereset               = ata_std_prereset,
-       .softreset              = NULL,
+       .softreset              = sas_ata_soft_reset,
        .hardreset              = sas_ata_hard_reset,
        .postreset              = ata_std_postreset,
        .error_handler          = ata_std_error_handler,
index 8b538bd..14e21b5 100644 (file)
@@ -57,7 +57,7 @@ int  sas_init_queue(struct sas_ha_struct *sas_ha);
 int  sas_init_events(struct sas_ha_struct *sas_ha);
 void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
 
-void sas_deform_port(struct asd_sas_phy *phy);
+void sas_deform_port(struct asd_sas_phy *phy, int gone);
 
 void sas_porte_bytes_dmaed(struct work_struct *work);
 void sas_porte_broadcast_rcvd(struct work_struct *work);
index b459c4b..e0f5018 100644 (file)
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
        sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
                        &phy->phy_events_pending);
        phy->error = 0;
-       sas_deform_port(phy);
+       sas_deform_port(phy, 1);
 }
 
 static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
        sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
                        &phy->phy_events_pending);
 
-       sas_deform_port(phy);
+       sas_deform_port(phy, 1);
 
        if (!port && phy->enabled && i->dft->lldd_control_phy) {
                phy->error++;
index 5257fdf..42fd1f2 100644 (file)
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
 
        if (port) {
                if (!phy_is_wideport_member(port, phy))
-                       sas_deform_port(phy);
+                       sas_deform_port(phy, 0);
                else {
                        SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
                                    __func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
  * This is called when the physical link to the other phy has been
  * lost (on this phy), in Event thread context. We cannot delay here.
  */
-void sas_deform_port(struct asd_sas_phy *phy)
+void sas_deform_port(struct asd_sas_phy *phy, int gone)
 {
        struct sas_ha_struct *sas_ha = phy->ha;
        struct asd_sas_port *port = phy->port;
        struct sas_internal *si =
                to_sas_internal(sas_ha->core.shost->transportt);
+       struct domain_device *dev;
        unsigned long flags;
 
        if (!port)
                return;           /* done by a phy event */
 
-       if (port->port_dev)
-               port->port_dev->pathways--;
+       dev = port->port_dev;
+       if (dev)
+               dev->pathways--;
 
        if (port->num_phys == 1) {
+               if (dev && gone)
+                       dev->gone = 1;
                sas_unregister_domain_devices(port);
                sas_port_delete(port->port);
                port->port = NULL;
        } else
                sas_port_delete_phy(port->port, phy->phy);
 
-
        if (si->dft->lldd_port_deformed)
                si->dft->lldd_port_deformed(phy);
 
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
        sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
                        &phy->port_events_pending);
 
-       sas_deform_port(phy);
+       sas_deform_port(phy, 1);
 }
 
 void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
        sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
                        &phy->port_events_pending);
 
-       sas_deform_port(phy);
+       sas_deform_port(phy, 1);
 }
 
 void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
        sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
                        &phy->port_events_pending);
 
-       sas_deform_port(phy);
+       sas_deform_port(phy, 1);
 }
 
 /* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
 
        for (i = 0; i < sas_ha->num_phys; i++)
                if (sas_ha->sas_phy[i]->port)
-                       sas_deform_port(sas_ha->sas_phy[i]);
+                       sas_deform_port(sas_ha->sas_phy[i], 0);
 
 }
index f6e189f..eeba76c 100644 (file)
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
                struct sas_ha_struct *sas_ha = dev->port->ha;
                struct sas_task *task;
 
+               /* If the device fell off, no sense in issuing commands */
+               if (dev->gone) {
+                       cmd->result = DID_BAD_TARGET << 16;
+                       scsi_done(cmd);
+                       goto out;
+               }
+
                if (dev_is_sata(dev)) {
                        unsigned long flags;
 
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
                        goto out;
                }
 
-               /* If the device fell off, no sense in issuing commands */
-               if (dev->gone) {
-                       cmd->result = DID_BAD_TARGET << 16;
-                       scsi_done(cmd);
-                       goto out;
-               }
-
                res = -ENOMEM;
                task = sas_create_task(cmd, dev, GFP_ATOMIC);
                if (!task)
index 02d53d8..8ec2c86 100644 (file)
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
                downloads using bsg */
 #define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
 #define LPFC_MAX_SG_SEG_CNT    4096    /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
 #define LPFC_MAX_PROT_SG_SEG_CNT 4096  /* prot sg element count per scsi cmd*/
 #define LPFC_IOCB_LIST_CNT     2250    /* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
                                     (1 << LPFC_USER_LINK_SPEED_AUTO))
 #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
 
+enum nemb_type {
+       nemb_mse = 1,
+       nemb_hbd
+};
+
+enum mbox_type {
+       mbox_rd = 1,
+       mbox_wr
+};
+
+enum dma_type {
+       dma_mbox = 1,
+       dma_ebuf
+};
+
+enum sta_type {
+       sta_pre_addr = 1,
+       sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+       uint32_t state;
+#define LPFC_BSG_MBOX_IDLE             0
+#define LPFC_BSG_MBOX_HOST              1
+#define LPFC_BSG_MBOX_PORT             2
+#define LPFC_BSG_MBOX_DONE             3
+#define LPFC_BSG_MBOX_ABTS             4
+       enum nemb_type nembType;
+       enum mbox_type mboxType;
+       uint32_t numBuf;
+       uint32_t mbxTag;
+       uint32_t seqNum;
+       struct lpfc_dmabuf *mbx_dmabuf;
+       struct list_head ext_dmabuf_list;
+};
+
 struct lpfc_hba {
        /* SCSI interface function jump table entries */
        int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
 
        MAILBOX_t *mbox;
        uint32_t *mbox_ext;
+       struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
        uint32_t ha_copy;
        struct _PCB *pcb;
        struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
        uint32_t cfg_hostmem_hgp;
        uint32_t cfg_log_verbose;
        uint32_t cfg_aer_support;
+       uint32_t cfg_sriov_nr_virtfn;
        uint32_t cfg_iocb_cnt;
        uint32_t cfg_suppress_link_up;
 #define LPFC_INITIALIZE_LINK              0    /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
        uint32_t          *hbq_get;     /* Host mem address of HBQ get ptrs */
 
        int brd_no;                     /* FC board number */
-
        char SerialNumber[32];          /* adapter Serial Number */
        char OptionROMVersion[32];      /* adapter BIOS / Fcode version */
        char ModelDesc[256];            /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
        uint16_t vpi_base;
        uint16_t vfi_base;
        unsigned long *vpi_bmask;       /* vpi allocation table */
+       uint16_t *vpi_ids;
+       uint16_t vpi_count;
+       struct list_head lpfc_vpi_blk_list;
 
        /* Data structure used by fabric iocb scheduler */
        struct list_head fabric_iocb_list;
index 8dcbf8f..135a53b 100644 (file)
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
 }
 
 /**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+       struct completion online_compl;
+       uint32_t reg_val;
+       int status = 0;
+       int rc;
+
+       if (!phba->cfg_enable_hba_reset)
+               return -EIO;
+
+       if ((phba->sli_rev < LPFC_SLI_REV4) ||
+           (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+            LPFC_SLI_INTF_IF_TYPE_2))
+               return -EPERM;
+
+       status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+       if (status != 0)
+               return status;
+
+       /* wait for the device to be quiesced before firmware reset */
+       msleep(100);
+
+       reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+                       LPFC_CTL_PDEV_CTL_OFFSET);
+
+       if (opcode == LPFC_FW_DUMP)
+               reg_val |= LPFC_FW_DUMP_REQUEST;
+       else if (opcode == LPFC_FW_RESET)
+               reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+       else if (opcode == LPFC_DV_RESET)
+               reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+       writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+              LPFC_CTL_PDEV_CTL_OFFSET);
+       /* flush */
+       readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+       /* delay driver action following IF_TYPE_2 reset */
+       msleep(100);
+
+       init_completion(&online_compl);
+       rc = lpfc_workq_post_event(phba, &status, &online_compl,
+                                  LPFC_EVT_ONLINE);
+       if (rc == 0)
+               return -ENOMEM;
+
+       wait_for_completion(&online_compl);
+
+       if (status != 0)
+               return -EIO;
+
+       return 0;
+}
+
+/**
  * lpfc_nport_evt_cnt_show - Return the number of nport events
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
                        return -EINVAL;
                else
                        status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+       else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+               status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+       else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+               status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+       else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+               status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
        else
                return -EINVAL;
 
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
 }
 
 /**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       struct lpfc_hba *phba = vport->phba;
+       struct pci_dev *pdev = phba->pcidev;
+       union  lpfc_sli4_cfg_shdr *shdr;
+       uint32_t shdr_status, shdr_add_status;
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
+       struct lpfc_rsrc_desc_pcie *desc;
+       uint32_t max_nr_virtfn;
+       uint32_t desc_count;
+       int length, rc, i;
+
+       if ((phba->sli_rev < LPFC_SLI_REV4) ||
+           (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+            LPFC_SLI_INTF_IF_TYPE_2))
+               return -EPERM;
+
+       if (!pdev->is_physfn)
+               return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+
+       /* get the maximum number of virtfn support by physfn */
+       length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
+                        length, LPFC_SLI4_MBX_EMBED);
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+       bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
+              phba->sli4_hba.iov.pf_number + 1);
+
+       get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
+       bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
+              LPFC_CFG_TYPE_CURRENT_ACTIVE);
+
+       rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+                               lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+
+       if (rc != MBX_TIMEOUT) {
+               /* check return status */
+               shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+               shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                        &shdr->response);
+               if (shdr_status || shdr_add_status || rc)
+                       goto error_out;
+
+       } else
+               goto error_out;
+
+       desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
+
+       for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+               desc = (struct lpfc_rsrc_desc_pcie *)
+                       &get_prof_cfg->u.response.prof_cfg.desc[i];
+               if (LPFC_RSRC_DESC_TYPE_PCIE ==
+                   bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+                       max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
+                                              desc);
+                       break;
+               }
+       }
+
+       if (i < LPFC_RSRC_DESC_MAX_NUM) {
+               if (rc != MBX_TIMEOUT)
+                       mempool_free(mboxq, phba->mbox_mem_pool);
+               return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+       }
+
+error_out:
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
+       return -EIO;
+}
+
+/**
  * lpfc_param_show - Return a cfg attribute value in decimal
  *
  * Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
 static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
 static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
 static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+                  lpfc_sriov_hw_max_virtfn_show, NULL);
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
  *
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing enable or disable aer flag.
  * @count: unused variable.
  *
  * Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
 /**
  * lpfc_aer_support_init - Set the initial adapters aer support flag
  * @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @val: enable aer or disable aer flag.
  *
  * Description:
  * If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
  * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing flag 1 for aer cleanup state.
  * @count: unused variable.
  *
  * Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
                   lpfc_aer_cleanup_state);
 
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba *phba = vport->phba;
+       struct pci_dev *pdev = phba->pcidev;
+       int val = 0, rc = -EINVAL;
+
+       /* Sanity check on user data */
+       if (!isdigit(buf[0]))
+               return -EINVAL;
+       if (sscanf(buf, "%i", &val) != 1)
+               return -EINVAL;
+       if (val < 0)
+               return -EINVAL;
+
+       /* Request disabling virtual functions */
+       if (val == 0) {
+               if (phba->cfg_sriov_nr_virtfn > 0) {
+                       pci_disable_sriov(pdev);
+                       phba->cfg_sriov_nr_virtfn = 0;
+               }
+               return strlen(buf);
+       }
+
+       /* Request enabling virtual functions */
+       if (phba->cfg_sriov_nr_virtfn > 0) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3018 There are %d virtual functions "
+                               "enabled on physical function.\n",
+                               phba->cfg_sriov_nr_virtfn);
+               return -EEXIST;
+       }
+
+       if (val <= LPFC_MAX_VFN_PER_PFN)
+               phba->cfg_sriov_nr_virtfn = val;
+       else {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3019 Enabling %d virtual functions is not "
+                               "allowed.\n", val);
+               return -EINVAL;
+       }
+
+       rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+       if (rc) {
+               phba->cfg_sriov_nr_virtfn = 0;
+               rc = -EPERM;
+       } else
+               rc = strlen(buf);
+
+       return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+       if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+               phba->cfg_sriov_nr_virtfn = val;
+               return 0;
+       }
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3017 Enabling %d virtual functions is not "
+                       "allowed.\n", val);
+       return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+                  lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
 /*
 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
 # Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_prot_sg_seg_cnt,
        &dev_attr_lpfc_aer_support,
        &dev_attr_lpfc_aer_state_cleanup,
+       &dev_attr_lpfc_sriov_nr_virtfn,
        &dev_attr_lpfc_suppress_link_up,
        &dev_attr_lpfc_iocb_cnt,
        &dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_fips_level,
        &dev_attr_lpfc_fips_rev,
        &dev_attr_lpfc_dss,
+       &dev_attr_lpfc_sriov_hw_max_virtfn,
        NULL,
 };
 
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
                .name = "mbox",
                .mode = S_IRUSR | S_IWUSR,
        },
-       .size = MAILBOX_CMD_SIZE,
+       .size = MAILBOX_SYSFS_MAX,
        .read = sysfs_mbox_read,
        .write = sysfs_mbox_write,
 };
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
        lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
        lpfc_aer_support_init(phba, lpfc_aer_support);
+       lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
        lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
        lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
        phba->cfg_enable_dss = 1;
index 853e504..7fb0ba4 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/list.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
 struct lpfc_bsg_mbox {
        LPFC_MBOXQ_t *pmboxq;
        MAILBOX_t *mb;
-       struct lpfc_dmabuf *rxbmp; /* for BIU diags */
-       struct lpfc_dmabufext *dmp; /* for BIU diags */
+       struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
        uint8_t *ext; /* extended mailbox data */
        uint32_t mbOffset; /* from app */
        uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
        cmd->ulpLe = 1;
        cmd->ulpClass = CLASS3;
        cmd->ulpContext = ndlp->nlp_rpi;
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
        cmd->ulpOwner = OWN_CHIP;
        cmdiocbq->vport = phba->pport;
        cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
                }
 
                icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       icmd->ulpContext =
+                               phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
                /* The exchange is done, mark the entry as invalid */
                phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
        } else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
 }
 
 /**
- * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
  * @job: LPFC_BSG_VENDOR_DIAG_MODE
  *
- * This function is responsible for placing a port into diagnostic loopback
- * mode in order to perform a diagnostic loopback test.
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+       struct lpfc_vport **vports;
+       struct Scsi_Host *shost;
+       struct lpfc_sli *psli;
+       struct lpfc_sli_ring *pring;
+       int i = 0;
+
+       psli = &phba->sli;
+       if (!psli)
+               return -ENODEV;
+
+       pring = &psli->ring[LPFC_FCP_RING];
+       if (!pring)
+               return -ENODEV;
+
+       if ((phba->link_state == LPFC_HBA_ERROR) ||
+           (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+           (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+               return -EACCES;
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports) {
+               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+                       shost = lpfc_shost_from_vport(vports[i]);
+                       scsi_block_requests(shost);
+               }
+               lpfc_destroy_vport_work_array(phba, vports);
+       } else {
+               shost = lpfc_shost_from_vport(phba->pport);
+               scsi_block_requests(shost);
+       }
+
+       while (pring->txcmplq_cnt) {
+               if (i++ > 500)  /* wait up to 5 seconds */
+                       break;
+               msleep(10);
+       }
+       return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+       struct Scsi_Host *shost;
+       struct lpfc_vport **vports;
+       int i;
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports) {
+               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+                       shost = lpfc_shost_from_vport(vports[i]);
+                       scsi_unblock_requests(shost);
+               }
+               lpfc_destroy_vport_work_array(phba, vports);
+       } else {
+               shost = lpfc_shost_from_vport(phba->pport);
+               scsi_unblock_requests(shost);
+       }
+       return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3  port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
  * All new scsi requests are blocked, a small delay is used to allow the
  * scsi requests to complete then the link is brought down. If the link is
  * is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
  * All of this is done in-line.
  */
 static int
-lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 {
-       struct Scsi_Host *shost = job->shost;
-       struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
-       struct lpfc_hba *phba = vport->phba;
        struct diag_mode_set *loopback_mode;
-       struct lpfc_sli *psli = &phba->sli;
-       struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
        uint32_t link_flags;
        uint32_t timeout;
-       struct lpfc_vport **vports;
        LPFC_MBOXQ_t *pmboxq;
        int mbxstatus;
        int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
        /* no data to return just the return code */
        job->reply->reply_payload_rcv_len = 0;
 
-       if (job->request_len <
-           sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+       if (job->request_len < sizeof(struct fc_bsg_request) +
+           sizeof(struct diag_mode_set)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
-                               "2738 Received DIAG MODE request below minimum "
-                               "size\n");
+                               "2738 Received DIAG MODE request size:%d "
+                               "below the minimum size:%d\n",
+                               job->request_len,
+                               (int)(sizeof(struct fc_bsg_request) +
+                               sizeof(struct diag_mode_set)));
                rc = -EINVAL;
                goto job_error;
        }
 
+       rc = lpfc_bsg_diag_mode_enter(phba, job);
+       if (rc)
+               goto job_error;
+
+       /* bring the link to diagnostic mode */
        loopback_mode = (struct diag_mode_set *)
                job->request->rqst_data.h_vendor.vendor_cmd;
        link_flags = loopback_mode->type;
        timeout = loopback_mode->timeout * 100;
 
-       if ((phba->link_state == LPFC_HBA_ERROR) ||
-           (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
-           (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
-               rc = -EACCES;
-               goto job_error;
-       }
-
        pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!pmboxq) {
                rc = -ENOMEM;
-               goto job_error;
-       }
-
-       vports = lpfc_create_vport_work_array(phba);
-       if (vports) {
-               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
-                       shost = lpfc_shost_from_vport(vports[i]);
-                       scsi_block_requests(shost);
-               }
-
-               lpfc_destroy_vport_work_array(phba, vports);
-       } else {
-               shost = lpfc_shost_from_vport(phba->pport);
-               scsi_block_requests(shost);
+               goto loopback_mode_exit;
        }
-
-       while (pring->txcmplq_cnt) {
-               if (i++ > 500)  /* wait up to 5 seconds */
-                       break;
-
-               msleep(10);
-       }
-
        memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
        pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
        pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
                rc = -ENODEV;
 
 loopback_mode_exit:
-       vports = lpfc_create_vport_work_array(phba);
-       if (vports) {
-               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
-                       shost = lpfc_shost_from_vport(vports[i]);
-                       scsi_unblock_requests(shost);
+       lpfc_bsg_diag_mode_exit(phba);
+
+       /*
+        * Let SLI layer release mboxq if mbox command completed after timeout.
+        */
+       if (mbxstatus != MBX_TIMEOUT)
+               mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+       /* make error code available to userspace */
+       job->reply->result = rc;
+       /* complete the job back to userspace if no error */
+       if (rc == 0)
+               job->job_done(job);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+       LPFC_MBOXQ_t *pmboxq;
+       struct lpfc_mbx_set_link_diag_state *link_diag_state;
+       uint32_t req_len, alloc_len;
+       int mbxstatus = MBX_SUCCESS, rc;
+
+       pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmboxq)
+               return -ENOMEM;
+
+       req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+                  sizeof(struct lpfc_sli4_cfg_mhdr));
+       alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                               LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+                               req_len, LPFC_SLI4_MBX_EMBED);
+       if (alloc_len != req_len) {
+               rc = -ENOMEM;
+               goto link_diag_state_set_out;
+       }
+       link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+       bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+              phba->sli4_hba.link_state.number);
+       bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+              phba->sli4_hba.link_state.type);
+       if (diag)
+               bf_set(lpfc_mbx_set_diag_state_diag,
+                      &link_diag_state->u.req, 1);
+       else
+               bf_set(lpfc_mbx_set_diag_state_diag,
+                      &link_diag_state->u.req, 0);
+
+       mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+       if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+               rc = 0;
+       else
+               rc = -ENODEV;
+
+link_diag_state_set_out:
+       if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+               mempool_free(pmboxq, phba->mbox_mem_pool);
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+       struct diag_mode_set *loopback_mode;
+       uint32_t link_flags, timeout, req_len, alloc_len;
+       struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+       LPFC_MBOXQ_t *pmboxq = NULL;
+       int mbxstatus, i, rc = 0;
+
+       /* no data to return just the return code */
+       job->reply->reply_payload_rcv_len = 0;
+
+       if (job->request_len < sizeof(struct fc_bsg_request) +
+           sizeof(struct diag_mode_set)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+                               "3011 Received DIAG MODE request size:%d "
+                               "below the minimum size:%d\n",
+                               job->request_len,
+                               (int)(sizeof(struct fc_bsg_request) +
+                               sizeof(struct diag_mode_set)));
+               rc = -EINVAL;
+               goto job_error;
+       }
+
+       rc = lpfc_bsg_diag_mode_enter(phba, job);
+       if (rc)
+               goto job_error;
+
+       /* bring the link to diagnostic mode */
+       loopback_mode = (struct diag_mode_set *)
+               job->request->rqst_data.h_vendor.vendor_cmd;
+       link_flags = loopback_mode->type;
+       timeout = loopback_mode->timeout * 100;
+
+       rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+       if (rc)
+               goto loopback_mode_exit;
+
+       /* wait for link down before proceeding */
+       i = 0;
+       while (phba->link_state != LPFC_LINK_DOWN) {
+               if (i++ > timeout) {
+                       rc = -ETIMEDOUT;
+                       goto loopback_mode_exit;
+               }
+               msleep(10);
+       }
+       /* set up loopback mode */
+       pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmboxq) {
+               rc = -ENOMEM;
+               goto loopback_mode_exit;
+       }
+       req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+                  sizeof(struct lpfc_sli4_cfg_mhdr));
+       alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                               LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+                               req_len, LPFC_SLI4_MBX_EMBED);
+       if (alloc_len != req_len) {
+               rc = -ENOMEM;
+               goto loopback_mode_exit;
+       }
+       link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+       bf_set(lpfc_mbx_set_diag_state_link_num,
+              &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
+       bf_set(lpfc_mbx_set_diag_state_link_type,
+              &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
+       if (link_flags == INTERNAL_LOOP_BACK)
+               bf_set(lpfc_mbx_set_diag_lpbk_type,
+                      &link_diag_loopback->u.req,
+                      LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+       else
+               bf_set(lpfc_mbx_set_diag_lpbk_type,
+                      &link_diag_loopback->u.req,
+                      LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
+
+       mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+       if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+               rc = -ENODEV;
+       else {
+               phba->link_flag |= LS_LOOPBACK_MODE;
+               /* wait for the link attention interrupt */
+               msleep(100);
+               i = 0;
+               while (phba->link_state != LPFC_HBA_READY) {
+                       if (i++ > timeout) {
+                               rc = -ETIMEDOUT;
+                               break;
+                       }
+                       msleep(10);
                }
-               lpfc_destroy_vport_work_array(phba, vports);
-       } else {
-               shost = lpfc_shost_from_vport(phba->pport);
-               scsi_unblock_requests(shost);
        }
 
+loopback_mode_exit:
+       lpfc_bsg_diag_mode_exit(phba);
+
        /*
         * Let SLI layer release mboxq if mbox command completed after timeout.
         */
-       if (mbxstatus != MBX_TIMEOUT)
+       if (pmboxq && (mbxstatus != MBX_TIMEOUT))
                mempool_free(pmboxq, phba->mbox_mem_pool);
 
 job_error:
@@ -1622,6 +1846,234 @@ job_error:
 }
 
 /**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+       struct Scsi_Host *shost;
+       struct lpfc_vport *vport;
+       struct lpfc_hba *phba;
+       int rc;
+
+       shost = job->shost;
+       if (!shost)
+               return -ENODEV;
+       vport = (struct lpfc_vport *)job->shost->hostdata;
+       if (!vport)
+               return -ENODEV;
+       phba = vport->phba;
+       if (!phba)
+               return -ENODEV;
+
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+       else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+                LPFC_SLI_INTF_IF_TYPE_2)
+               rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+       else
+               rc = -ENODEV;
+
+       return rc;
+
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+       struct Scsi_Host *shost;
+       struct lpfc_vport *vport;
+       struct lpfc_hba *phba;
+       int rc;
+
+       shost = job->shost;
+       if (!shost)
+               return -ENODEV;
+       vport = (struct lpfc_vport *)job->shost->hostdata;
+       if (!vport)
+               return -ENODEV;
+       phba = vport->phba;
+       if (!phba)
+               return -ENODEV;
+
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               return -ENODEV;
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+           LPFC_SLI_INTF_IF_TYPE_2)
+               return -ENODEV;
+
+       rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+       if (!rc)
+               rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+       struct Scsi_Host *shost;
+       struct lpfc_vport *vport;
+       struct lpfc_hba *phba;
+       LPFC_MBOXQ_t *pmboxq;
+       struct sli4_link_diag *link_diag_test_cmd;
+       uint32_t req_len, alloc_len;
+       uint32_t timeout;
+       struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+       union lpfc_sli4_cfg_shdr *shdr;
+       uint32_t shdr_status, shdr_add_status;
+       struct diag_status *diag_status_reply;
+       int mbxstatus, rc = 0;
+
+       shost = job->shost;
+       if (!shost) {
+               rc = -ENODEV;
+               goto job_error;
+       }
+       vport = (struct lpfc_vport *)job->shost->hostdata;
+       if (!vport) {
+               rc = -ENODEV;
+               goto job_error;
+       }
+       phba = vport->phba;
+       if (!phba) {
+               rc = -ENODEV;
+               goto job_error;
+       }
+
+       if (phba->sli_rev < LPFC_SLI_REV4) {
+               rc = -ENODEV;
+               goto job_error;
+       }
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+           LPFC_SLI_INTF_IF_TYPE_2) {
+               rc = -ENODEV;
+               goto job_error;
+       }
+
+       if (job->request_len < sizeof(struct fc_bsg_request) +
+           sizeof(struct sli4_link_diag)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+                               "3013 Received LINK DIAG TEST request "
+                               " size:%d below the minimum size:%d\n",
+                               job->request_len,
+                               (int)(sizeof(struct fc_bsg_request) +
+                               sizeof(struct sli4_link_diag)));
+               rc = -EINVAL;
+               goto job_error;
+       }
+
+       rc = lpfc_bsg_diag_mode_enter(phba, job);
+       if (rc)
+               goto job_error;
+
+       link_diag_test_cmd = (struct sli4_link_diag *)
+                        job->request->rqst_data.h_vendor.vendor_cmd;
+       timeout = link_diag_test_cmd->timeout * 100;
+
+       rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+       if (rc)
+               goto job_error;
+
+       pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmboxq) {
+               rc = -ENOMEM;
+               goto link_diag_test_exit;
+       }
+
+       req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+                  sizeof(struct lpfc_sli4_cfg_mhdr));
+       alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                                    LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+                                    req_len, LPFC_SLI4_MBX_EMBED);
+       if (alloc_len != req_len) {
+               rc = -ENOMEM;
+               goto link_diag_test_exit;
+       }
+       run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+       bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+              phba->sli4_hba.link_state.number);
+       bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+              phba->sli4_hba.link_state.type);
+       bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+              link_diag_test_cmd->test_id);
+       bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+              link_diag_test_cmd->loops);
+       bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+              link_diag_test_cmd->test_version);
+       bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+              link_diag_test_cmd->error_action);
+
+       mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || mbxstatus) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "3010 Run link diag test mailbox failed with "
+                               "mbx_status x%x status x%x, add_status x%x\n",
+                               mbxstatus, shdr_status, shdr_add_status);
+       }
+
+       diag_status_reply = (struct diag_status *)
+                           job->reply->reply_data.vendor_reply.vendor_rsp;
+
+       if (job->reply_len <
+           sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+                               "3012 Received Run link diag test reply "
+                               "below minimum size (%d): reply_len:%d\n",
+                               (int)(sizeof(struct fc_bsg_request) +
+                               sizeof(struct diag_status)),
+                               job->reply_len);
+               rc = -EINVAL;
+               goto job_error;
+       }
+
+       diag_status_reply->mbox_status = mbxstatus;
+       diag_status_reply->shdr_status = shdr_status;
+       diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+       rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+       if (pmboxq)
+               mempool_free(pmboxq, phba->mbox_mem_pool);
+
+       lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+       /* make error code available to userspace */
+       job->reply->result = rc;
+       /* complete the job back to userspace if no error */
+       if (rc == 0)
+               job->job_done(job);
+       return rc;
+}
+
+/**
  * lpfcdiag_loop_self_reg - obtains a remote port login id
  * @phba: Pointer to HBA context object
  * @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
 }
 
 /**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * retruns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+       struct lpfc_dmabuf *dmabuf;
+       struct pci_dev *pcidev = phba->pcidev;
+
+       /* allocate dma buffer struct */
+       dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (!dmabuf)
+               return NULL;
+
+       INIT_LIST_HEAD(&dmabuf->list);
+
+       /* now, allocate dma buffer */
+       dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+                                         &(dmabuf->phys), GFP_KERNEL);
+
+       if (!dmabuf->virt) {
+               kfree(dmabuf);
+               return NULL;
+       }
+       memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
+
+       return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+       struct pci_dev *pcidev = phba->pcidev;
+
+       if (!dmabuf)
+               return;
+
+       if (dmabuf->virt)
+               dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+                                 dmabuf->virt, dmabuf->phys);
+       kfree(dmabuf);
+       return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+                           struct list_head *dmabuf_list)
+{
+       struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+       if (list_empty(dmabuf_list))
+               return;
+
+       list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+               list_del_init(&dmabuf->list);
+               lpfc_bsg_dma_page_free(phba, dmabuf);
+       }
+       return;
+}
+
+/**
  * diag_cmd_data_alloc - fills in a bde struct with dma buffers
  * @phba: Pointer to HBA context object
  * @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
 }
 
 /**
- * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
  * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
  *
  * This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
  * of loopback mode.
  **/
 static int
-lpfc_bsg_diag_test(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
        struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
 }
 
 /**
- * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
  * @phba: Pointer to HBA context object.
  * @pmboxq: Pointer to mailbox command.
  *
@@ -2422,15 +2954,13 @@ job_error:
  * of the mailbox.
  **/
 void
-lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
        struct bsg_job_data *dd_data;
        struct fc_bsg_job *job;
-       struct lpfc_mbx_nembed_cmd *nembed_sge;
        uint32_t size;
        unsigned long flags;
-       uint8_t *to;
-       uint8_t *from;
+       uint8_t *pmb, *pmb_buf;
 
        spin_lock_irqsave(&phba->ct_ev_lock, flags);
        dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
                return;
        }
 
-       /* build the outgoing buffer to do an sg copy
-        * the format is the response mailbox followed by any extended
-        * mailbox data
+       /*
+        * The outgoing buffer is readily referred from the dma buffer,
+        * just need to get header part from mailboxq structure.
         */
-       from = (uint8_t *)&pmboxq->u.mb;
-       to = (uint8_t *)dd_data->context_un.mbox.mb;
-       memcpy(to, from, sizeof(MAILBOX_t));
-       if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
-               /* copy the extended data if any, count is in words */
-               if (dd_data->context_un.mbox.outExtWLen) {
-                       from = (uint8_t *)dd_data->context_un.mbox.ext;
-                       to += sizeof(MAILBOX_t);
-                       size = dd_data->context_un.mbox.outExtWLen *
-                                       sizeof(uint32_t);
-                       memcpy(to, from, size);
-               } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
-                       from = (uint8_t *)dd_data->context_un.mbox.
-                                               dmp->dma.virt;
-                       to += sizeof(MAILBOX_t);
-                       size = dd_data->context_un.mbox.dmp->size;
-                       memcpy(to, from, size);
-               } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
-                       (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
-                       from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
-                                               virt;
-                       to += sizeof(MAILBOX_t);
-                       size = pmboxq->u.mb.un.varWords[5];
-                       memcpy(to, from, size);
-               } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
-                       (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
-                       nembed_sge = (struct lpfc_mbx_nembed_cmd *)
-                                       &pmboxq->u.mb.un.varWords[0];
-
-                       from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
-                                               virt;
-                       to += sizeof(MAILBOX_t);
-                       size = nembed_sge->sge[0].length;
-                       memcpy(to, from, size);
-               } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
-                       from = (uint8_t *)dd_data->context_un.
-                                               mbox.dmp->dma.virt;
-                       to += sizeof(MAILBOX_t);
-                       size = dd_data->context_un.mbox.dmp->size;
-                       memcpy(to, from, size);
-               }
-       }
+       pmb = (uint8_t *)&pmboxq->u.mb;
+       pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+       memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
 
-       from = (uint8_t *)dd_data->context_un.mbox.mb;
        job = dd_data->context_un.mbox.set_job;
        if (job) {
                size = job->reply_payload.payload_len;
                job->reply->reply_payload_rcv_len =
                        sg_copy_from_buffer(job->reply_payload.sg_list,
-                                       job->reply_payload.sg_cnt,
-                                       from, size);
-               job->reply->result = 0;
+                                           job->reply_payload.sg_cnt,
+                                           pmb_buf, size);
                /* need to hold the lock until we set job->dd_data to NULL
                 * to hold off the timeout handler returning to the mid-layer
                 * while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
                job->dd_data = NULL;
                dd_data->context_un.mbox.set_job = NULL;
                spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-               job->job_done(job);
        } else {
                dd_data->context_un.mbox.set_job = NULL;
                spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
        }
 
-       kfree(dd_data->context_un.mbox.mb);
        mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
-       kfree(dd_data->context_un.mbox.ext);
-       if (dd_data->context_un.mbox.dmp) {
-               dma_free_coherent(&phba->pcidev->dev,
-                       dd_data->context_un.mbox.dmp->size,
-                       dd_data->context_un.mbox.dmp->dma.virt,
-                       dd_data->context_un.mbox.dmp->dma.phys);
-               kfree(dd_data->context_un.mbox.dmp);
-       }
-       if (dd_data->context_un.mbox.rxbmp) {
-               lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
-                       dd_data->context_un.mbox.rxbmp->phys);
-               kfree(dd_data->context_un.mbox.rxbmp);
-       }
+       lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
        kfree(dd_data);
+
+       if (job) {
+               job->reply->result = 0;
+               job->job_done(job);
+       }
        return;
 }
 
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
 }
 
 /**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+       if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+               return;
+
+       /* free all memory, including dma buffers */
+       lpfc_bsg_dma_page_list_free(phba,
+                                   &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+       lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+       /* multi-buffer write mailbox command pass-through complete */
+       memset((char *)&phba->mbox_ext_buf_ctx, 0,
+              sizeof(struct lpfc_mbox_ext_buf_ctx));
+       INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+       return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+       struct bsg_job_data *dd_data;
+       struct fc_bsg_job *job;
+       uint8_t *pmb, *pmb_buf;
+       unsigned long flags;
+       uint32_t size;
+       int rc = 0;
+
+       spin_lock_irqsave(&phba->ct_ev_lock, flags);
+       dd_data = pmboxq->context1;
+       /* has the job already timed out? */
+       if (!dd_data) {
+               spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+               job = NULL;
+               goto job_done_out;
+       }
+
+       /*
+        * The outgoing buffer is readily referred from the dma buffer,
+        * just need to get header part from mailboxq structure.
+        */
+       pmb = (uint8_t *)&pmboxq->u.mb;
+       pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+       memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+       job = dd_data->context_un.mbox.set_job;
+       if (job) {
+               size = job->reply_payload.payload_len;
+               job->reply->reply_payload_rcv_len =
+                       sg_copy_from_buffer(job->reply_payload.sg_list,
+                                           job->reply_payload.sg_cnt,
+                                           pmb_buf, size);
+               /* result for successful */
+               job->reply->result = 0;
+               job->dd_data = NULL;
+               /* need to hold the lock util we set job->dd_data to NULL
+                * to hold off the timeout handler from midlayer to take
+                * any action.
+                */
+               spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2937 SLI_CONFIG ext-buffer maibox command "
+                               "(x%x/x%x) complete bsg job done, bsize:%d\n",
+                               phba->mbox_ext_buf_ctx.nembType,
+                               phba->mbox_ext_buf_ctx.mboxType, size);
+       } else
+               spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+job_done_out:
+       if (!job)
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "2938 SLI_CONFIG ext-buffer maibox "
+                               "command (x%x/x%x) failure, rc:x%x\n",
+                               phba->mbox_ext_buf_ctx.nembType,
+                               phba->mbox_ext_buf_ctx.mboxType, rc);
+       /* state change */
+       phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+       kfree(dd_data);
+
+       return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+       struct fc_bsg_job *job;
+
+       /* handle the BSG job with mailbox command */
+       if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+               pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                       "2939 SLI_CONFIG ext-buffer rd maibox command "
+                       "complete, ctxState:x%x, mbxStatus:x%x\n",
+                       phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+       job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+       if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+               lpfc_bsg_mbox_ext_session_reset(phba);
+
+       /* free base driver mailbox structure memory */
+       mempool_free(pmboxq, phba->mbox_mem_pool);
+
+       /* complete the bsg job if we have it */
+       if (job)
+               job->job_done(job);
+
+       return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+       struct fc_bsg_job *job;
+
+       /* handle the BSG job with the mailbox command */
+       if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+               pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                       "2940 SLI_CONFIG ext-buffer wr maibox command "
+                       "complete, ctxState:x%x, mbxStatus:x%x\n",
+                       phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+       job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+       /* free all memory, including dma buffers */
+       mempool_free(pmboxq, phba->mbox_mem_pool);
+       lpfc_bsg_mbox_ext_session_reset(phba);
+
+       /* complete the bsg job if we have it */
+       if (job)
+               job->job_done(job);
+
+       return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+                               uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+                               struct lpfc_dmabuf *ext_dmabuf)
+{
+       struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+       /* pointer to the start of mailbox command */
+       sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+       if (nemb_tp == nemb_mse) {
+               if (index == 0) {
+                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                               mse[index].pa_hi =
+                               putPaddrHigh(mbx_dmabuf->phys +
+                                            sizeof(MAILBOX_t));
+                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                               mse[index].pa_lo =
+                               putPaddrLow(mbx_dmabuf->phys +
+                                           sizeof(MAILBOX_t));
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "2943 SLI_CONFIG(mse)[%d], "
+                                       "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+                                       index,
+                                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                                       mse[index].buf_len,
+                                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                                       mse[index].pa_hi,
+                                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                                       mse[index].pa_lo);
+               } else {
+                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                               mse[index].pa_hi =
+                               putPaddrHigh(ext_dmabuf->phys);
+                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                               mse[index].pa_lo =
+                               putPaddrLow(ext_dmabuf->phys);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "2944 SLI_CONFIG(mse)[%d], "
+                                       "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+                                       index,
+                                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                                       mse[index].buf_len,
+                                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                                       mse[index].pa_hi,
+                                       sli_cfg_mbx->un.sli_config_emb0_subsys.
+                                       mse[index].pa_lo);
+               }
+       } else {
+               if (index == 0) {
+                       sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_hi =
+                               putPaddrHigh(mbx_dmabuf->phys +
+                                            sizeof(MAILBOX_t));
+                       sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_lo =
+                               putPaddrLow(mbx_dmabuf->phys +
+                                           sizeof(MAILBOX_t));
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "3007 SLI_CONFIG(hbd)[%d], "
+                                       "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+                               index,
+                               bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+                               &sli_cfg_mbx->un.
+                               sli_config_emb1_subsys.hbd[index]),
+                               sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_hi,
+                               sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_lo);
+
+               } else {
+                       sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_hi =
+                               putPaddrHigh(ext_dmabuf->phys);
+                       sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_lo =
+                               putPaddrLow(ext_dmabuf->phys);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "3008 SLI_CONFIG(hbd)[%d], "
+                                       "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+                               index,
+                               bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+                               &sli_cfg_mbx->un.
+                               sli_config_emb1_subsys.hbd[index]),
+                               sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_hi,
+                               sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[index].pa_lo);
+               }
+       }
+       return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+                             enum nemb_type nemb_tp,
+                             struct lpfc_dmabuf *dmabuf)
+{
+       struct lpfc_sli_config_mbox *sli_cfg_mbx;
+       struct dfc_mbox_req *mbox_req;
+       struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+       uint32_t ext_buf_cnt, ext_buf_index;
+       struct lpfc_dmabuf *ext_dmabuf = NULL;
+       struct bsg_job_data *dd_data = NULL;
+       LPFC_MBOXQ_t *pmboxq = NULL;
+       MAILBOX_t *pmb;
+       uint8_t *pmbx;
+       int rc, i;
+
+       mbox_req =
+          (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+       /* pointer to the start of mailbox command */
+       sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+       if (nemb_tp == nemb_mse) {
+               ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+                       &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+               if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                                       "2945 Handled SLI_CONFIG(mse) rd, "
+                                       "ext_buf_cnt(%d) out of range(%d)\n",
+                                       ext_buf_cnt,
+                                       LPFC_MBX_SLI_CONFIG_MAX_MSE);
+                       rc = -ERANGE;
+                       goto job_error;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2941 Handled SLI_CONFIG(mse) rd, "
+                               "ext_buf_cnt:%d\n", ext_buf_cnt);
+       } else {
+               /* sanity check on interface type for support */
+               if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+                   LPFC_SLI_INTF_IF_TYPE_2) {
+                       rc = -ENODEV;
+                       goto job_error;
+               }
+               /* nemb_tp == nemb_hbd */
+               ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+               if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                                       "2946 Handled SLI_CONFIG(hbd) rd, "
+                                       "ext_buf_cnt(%d) out of range(%d)\n",
+                                       ext_buf_cnt,
+                                       LPFC_MBX_SLI_CONFIG_MAX_HBD);
+                       rc = -ERANGE;
+                       goto job_error;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2942 Handled SLI_CONFIG(hbd) rd, "
+                               "ext_buf_cnt:%d\n", ext_buf_cnt);
+       }
+
+       /* reject non-embedded mailbox command with none external buffer */
+       if (ext_buf_cnt == 0) {
+               rc = -EPERM;
+               goto job_error;
+       } else if (ext_buf_cnt > 1) {
+               /* additional external read buffers */
+               for (i = 1; i < ext_buf_cnt; i++) {
+                       ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+                       if (!ext_dmabuf) {
+                               rc = -ENOMEM;
+                               goto job_error;
+                       }
+                       list_add_tail(&ext_dmabuf->list,
+                                     &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+               }
+       }
+
+       /* bsg tracking structure */
+       dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+       if (!dd_data) {
+               rc = -ENOMEM;
+               goto job_error;
+       }
+
+       /* mailbox command structure for base driver */
+       pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmboxq) {
+               rc = -ENOMEM;
+               goto job_error;
+       }
+       memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+       /* for the first external buffer */
+       lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+       /* for the rest of external buffer descriptors if any */
+       if (ext_buf_cnt > 1) {
+               ext_buf_index = 1;
+               list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+                               &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+                       lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+                                               ext_buf_index, dmabuf,
+                                               curr_dmabuf);
+                       ext_buf_index++;
+               }
+       }
+
+       /* construct base driver mbox command */
+       pmb = &pmboxq->u.mb;
+       pmbx = (uint8_t *)dmabuf->virt;
+       memcpy(pmb, pmbx, sizeof(*pmb));
+       pmb->mbxOwner = OWN_HOST;
+       pmboxq->vport = phba->pport;
+
+       /* multi-buffer handling context */
+       phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+       phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+       phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+       phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+       phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+       phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+       /* callback for multi-buffer read mailbox command */
+       pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+       /* context fields to callback function */
+       pmboxq->context1 = dd_data;
+       dd_data->type = TYPE_MBOX;
+       dd_data->context_un.mbox.pmboxq = pmboxq;
+       dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+       dd_data->context_un.mbox.set_job = job;
+       job->dd_data = dd_data;
+
+       /* state change */
+       phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+       rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+       if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2947 Issued SLI_CONFIG ext-buffer "
+                               "maibox command, rc:x%x\n", rc);
+               return 1;
+       }
+       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                       "2948 Failed to issue SLI_CONFIG ext-buffer "
+                       "maibox command, rc:x%x\n", rc);
+       rc = -EPIPE;
+
+job_error:
+       if (pmboxq)
+               mempool_free(pmboxq, phba->mbox_mem_pool);
+       lpfc_bsg_dma_page_list_free(phba,
+                                   &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+       kfree(dd_data);
+       phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+       return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+                              enum nemb_type nemb_tp,
+                              struct lpfc_dmabuf *dmabuf)
+{
+       struct dfc_mbox_req *mbox_req;
+       struct lpfc_sli_config_mbox *sli_cfg_mbx;
+       uint32_t ext_buf_cnt;
+       struct bsg_job_data *dd_data = NULL;
+       LPFC_MBOXQ_t *pmboxq = NULL;
+       MAILBOX_t *pmb;
+       uint8_t *mbx;
+       int rc = 0, i;
+
+       mbox_req =
+          (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+       /* pointer to the start of mailbox command */
+       sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+       if (nemb_tp == nemb_mse) {
+               ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+                       &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+               if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                                       "2953 Handled SLI_CONFIG(mse) wr, "
+                                       "ext_buf_cnt(%d) out of range(%d)\n",
+                                       ext_buf_cnt,
+                                       LPFC_MBX_SLI_CONFIG_MAX_MSE);
+                       return -ERANGE;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2949 Handled SLI_CONFIG(mse) wr, "
+                               "ext_buf_cnt:%d\n", ext_buf_cnt);
+       } else {
+               /* sanity check on interface type for support */
+               if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+                   LPFC_SLI_INTF_IF_TYPE_2)
+                       return -ENODEV;
+               /* nemb_tp == nemb_hbd */
+               ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+               if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                                       "2954 Handled SLI_CONFIG(hbd) wr, "
+                                       "ext_buf_cnt(%d) out of range(%d)\n",
+                                       ext_buf_cnt,
+                                       LPFC_MBX_SLI_CONFIG_MAX_HBD);
+                       return -ERANGE;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2950 Handled SLI_CONFIG(hbd) wr, "
+                               "ext_buf_cnt:%d\n", ext_buf_cnt);
+       }
+
+       if (ext_buf_cnt == 0)
+               return -EPERM;
+
+       /* for the first external buffer */
+       lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+       /* log for looking forward */
+       for (i = 1; i < ext_buf_cnt; i++) {
+               if (nemb_tp == nemb_mse)
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+                               i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+                               mse[i].buf_len);
+               else
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+                               i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+                               &sli_cfg_mbx->un.sli_config_emb1_subsys.
+                               hbd[i]));
+       }
+
+       /* multi-buffer handling context */
+       phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+       phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+       phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+       phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+       phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+       phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+       if (ext_buf_cnt == 1) {
+               /* bsg tracking structure */
+               dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+               if (!dd_data) {
+                       rc = -ENOMEM;
+                       goto job_error;
+               }
+
+               /* mailbox command structure for base driver */
+               pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!pmboxq) {
+                       rc = -ENOMEM;
+                       goto job_error;
+               }
+               memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+               pmb = &pmboxq->u.mb;
+               mbx = (uint8_t *)dmabuf->virt;
+               memcpy(pmb, mbx, sizeof(*pmb));
+               pmb->mbxOwner = OWN_HOST;
+               pmboxq->vport = phba->pport;
+
+               /* callback for multi-buffer read mailbox command */
+               pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+               /* context fields to callback function */
+               pmboxq->context1 = dd_data;
+               dd_data->type = TYPE_MBOX;
+               dd_data->context_un.mbox.pmboxq = pmboxq;
+               dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+               dd_data->context_un.mbox.set_job = job;
+               job->dd_data = dd_data;
+
+               /* state change */
+               phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+               rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+               if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "2955 Issued SLI_CONFIG ext-buffer "
+                                       "maibox command, rc:x%x\n", rc);
+                       return 1;
+               }
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "2956 Failed to issue SLI_CONFIG ext-buffer "
+                               "maibox command, rc:x%x\n", rc);
+               rc = -EPIPE;
+       }
+
+job_error:
+       if (pmboxq)
+               mempool_free(pmboxq, phba->mbox_mem_pool);
+       kfree(dd_data);
+
+       return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+                            struct lpfc_dmabuf *dmabuf)
+{
+       struct lpfc_sli_config_mbox *sli_cfg_mbx;
+       uint32_t subsys;
+       uint32_t opcode;
+       int rc = SLI_CONFIG_NOT_HANDLED;
+
+       /* state change */
+       phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+       sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+       if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+           &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+               subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+                                   &sli_cfg_mbx->un.sli_config_emb0_subsys);
+               opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+                                   &sli_cfg_mbx->un.sli_config_emb0_subsys);
+               if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+                       switch (opcode) {
+                       case FCOE_OPCODE_READ_FCF:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "2957 Handled SLI_CONFIG "
+                                               "subsys_fcoe, opcode:x%x\n",
+                                               opcode);
+                               rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+                                                       nemb_mse, dmabuf);
+                               break;
+                       case FCOE_OPCODE_ADD_FCF:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "2958 Handled SLI_CONFIG "
+                                               "subsys_fcoe, opcode:x%x\n",
+                                               opcode);
+                               rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+                                                       nemb_mse, dmabuf);
+                               break;
+                       default:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "2959 Not handled SLI_CONFIG "
+                                               "subsys_fcoe, opcode:x%x\n",
+                                               opcode);
+                               rc = SLI_CONFIG_NOT_HANDLED;
+                               break;
+                       }
+               } else {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "2977 Handled SLI_CONFIG "
+                                       "subsys:x%d, opcode:x%x\n",
+                                       subsys, opcode);
+                       rc = SLI_CONFIG_NOT_HANDLED;
+               }
+       } else {
+               subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+                                   &sli_cfg_mbx->un.sli_config_emb1_subsys);
+               opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+                                   &sli_cfg_mbx->un.sli_config_emb1_subsys);
+               if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+                       switch (opcode) {
+                       case COMN_OPCODE_READ_OBJECT:
+                       case COMN_OPCODE_READ_OBJECT_LIST:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "2960 Handled SLI_CONFIG "
+                                               "subsys_comn, opcode:x%x\n",
+                                               opcode);
+                               rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+                                                       nemb_hbd, dmabuf);
+                               break;
+                       case COMN_OPCODE_WRITE_OBJECT:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "2961 Handled SLI_CONFIG "
+                                               "subsys_comn, opcode:x%x\n",
+                                               opcode);
+                               rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+                                                       nemb_hbd, dmabuf);
+                               break;
+                       default:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                               "2962 Not handled SLI_CONFIG "
+                                               "subsys_comn, opcode:x%x\n",
+                                               opcode);
+                               rc = SLI_CONFIG_NOT_HANDLED;
+                               break;
+                       }
+               } else {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "2978 Handled SLI_CONFIG "
+                                       "subsys:x%d, opcode:x%x\n",
+                                       subsys, opcode);
+                       rc = SLI_CONFIG_NOT_HANDLED;
+               }
+       }
+       return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+       if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+               phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+       else
+               lpfc_bsg_mbox_ext_session_reset(phba);
+       return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+       struct lpfc_sli_config_mbox *sli_cfg_mbx;
+       struct lpfc_dmabuf *dmabuf;
+       uint8_t *pbuf;
+       uint32_t size;
+       uint32_t index;
+
+       index = phba->mbox_ext_buf_ctx.seqNum;
+       phba->mbox_ext_buf_ctx.seqNum++;
+
+       sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+                       phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+       if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+               size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+                       &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2963 SLI_CONFIG (mse) ext-buffer rd get "
+                               "buffer[%d], size:%d\n", index, size);
+       } else {
+               size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+                       &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2964 SLI_CONFIG (hbd) ext-buffer rd get "
+                               "buffer[%d], size:%d\n", index, size);
+       }
+       if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+               return -EPIPE;
+       dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+                                 struct lpfc_dmabuf, list);
+       list_del_init(&dmabuf->list);
+       pbuf = (uint8_t *)dmabuf->virt;
+       job->reply->reply_payload_rcv_len =
+               sg_copy_from_buffer(job->reply_payload.sg_list,
+                                   job->reply_payload.sg_cnt,
+                                   pbuf, size);
+
+       lpfc_bsg_dma_page_free(phba, dmabuf);
+
+       if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+                               "command session done\n");
+               lpfc_bsg_mbox_ext_session_reset(phba);
+       }
+
+       job->reply->result = 0;
+       job->job_done(job);
+
+       return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+                       struct lpfc_dmabuf *dmabuf)
+{
+       struct lpfc_sli_config_mbox *sli_cfg_mbx;
+       struct bsg_job_data *dd_data = NULL;
+       LPFC_MBOXQ_t *pmboxq = NULL;
+       MAILBOX_t *pmb;
+       enum nemb_type nemb_tp;
+       uint8_t *pbuf;
+       uint32_t size;
+       uint32_t index;
+       int rc;
+
+       index = phba->mbox_ext_buf_ctx.seqNum;
+       phba->mbox_ext_buf_ctx.seqNum++;
+       nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+       sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+                       phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+       dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+       if (!dd_data) {
+               rc = -ENOMEM;
+               goto job_error;
+       }
+
+       pbuf = (uint8_t *)dmabuf->virt;
+       size = job->request_payload.payload_len;
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt,
+                         pbuf, size);
+
+       if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2966 SLI_CONFIG (mse) ext-buffer wr set "
+                               "buffer[%d], size:%d\n",
+                               phba->mbox_ext_buf_ctx.seqNum, size);
+
+       } else {
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2967 SLI_CONFIG (hbd) ext-buffer wr set "
+                               "buffer[%d], size:%d\n",
+                               phba->mbox_ext_buf_ctx.seqNum, size);
+
+       }
+
+       /* set up external buffer descriptor and add to external buffer list */
+       lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+                                       phba->mbox_ext_buf_ctx.mbx_dmabuf,
+                                       dmabuf);
+       list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+       if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2968 SLI_CONFIG ext-buffer wr all %d "
+                               "ebuffers received\n",
+                               phba->mbox_ext_buf_ctx.numBuf);
+               /* mailbox command structure for base driver */
+               pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!pmboxq) {
+                       rc = -ENOMEM;
+                       goto job_error;
+               }
+               memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+               pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+               pmb = &pmboxq->u.mb;
+               memcpy(pmb, pbuf, sizeof(*pmb));
+               pmb->mbxOwner = OWN_HOST;
+               pmboxq->vport = phba->pport;
+
+               /* callback for multi-buffer write mailbox command */
+               pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+               /* context fields to callback function */
+               pmboxq->context1 = dd_data;
+               dd_data->type = TYPE_MBOX;
+               dd_data->context_un.mbox.pmboxq = pmboxq;
+               dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+               dd_data->context_un.mbox.set_job = job;
+               job->dd_data = dd_data;
+
+               /* state change */
+               phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+               rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+               if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "2969 Issued SLI_CONFIG ext-buffer "
+                                       "maibox command, rc:x%x\n", rc);
+                       return 1;
+               }
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "2970 Failed to issue SLI_CONFIG ext-buffer "
+                               "maibox command, rc:x%x\n", rc);
+               rc = -EPIPE;
+               goto job_error;
+       }
+
+       /* wait for additoinal external buffers */
+       job->reply->result = 0;
+       job->job_done(job);
+       return SLI_CONFIG_HANDLED;
+
+job_error:
+       lpfc_bsg_dma_page_free(phba, dmabuf);
+       kfree(dd_data);
+
+       return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+                            struct lpfc_dmabuf *dmabuf)
+{
+       int rc;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                       "2971 SLI_CONFIG buffer (type:x%x)\n",
+                       phba->mbox_ext_buf_ctx.mboxType);
+
+       if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+               if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                                       "2972 SLI_CONFIG rd buffer state "
+                                       "mismatch:x%x\n",
+                                       phba->mbox_ext_buf_ctx.state);
+                       lpfc_bsg_mbox_ext_abort(phba);
+                       return -EPIPE;
+               }
+               rc = lpfc_bsg_read_ebuf_get(phba, job);
+               if (rc == SLI_CONFIG_HANDLED)
+                       lpfc_bsg_dma_page_free(phba, dmabuf);
+       } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+               if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                                       "2973 SLI_CONFIG wr buffer state "
+                                       "mismatch:x%x\n",
+                                       phba->mbox_ext_buf_ctx.state);
+                       lpfc_bsg_mbox_ext_abort(phba);
+                       return -EPIPE;
+               }
+               rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+       }
+       return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+                           struct lpfc_dmabuf *dmabuf)
+{
+       struct dfc_mbox_req *mbox_req;
+       int rc;
+
+       mbox_req =
+          (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+       /* mbox command with/without single external buffer */
+       if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+               return SLI_CONFIG_NOT_HANDLED;
+
+       /* mbox command and first external buffer */
+       if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+               if (mbox_req->extSeqNum == 1) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                                       "2974 SLI_CONFIG mailbox: tag:%d, "
+                                       "seq:%d\n", mbox_req->extMboxTag,
+                                       mbox_req->extSeqNum);
+                       rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+                       return rc;
+               } else
+                       goto sli_cfg_ext_error;
+       }
+
+       /*
+        * handle additional external buffers
+        */
+
+       /* check broken pipe conditions */
+       if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+               goto sli_cfg_ext_error;
+       if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+               goto sli_cfg_ext_error;
+       if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+               goto sli_cfg_ext_error;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                       "2975 SLI_CONFIG mailbox external buffer: "
+                       "extSta:x%x, tag:%d, seq:%d\n",
+                       phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+                       mbox_req->extSeqNum);
+       rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+       return rc;
+
+sli_cfg_ext_error:
+       /* all other cases, broken pipe */
+       lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                       "2976 SLI_CONFIG mailbox broken pipe: "
+                       "ctxSta:x%x, ctxNumBuf:%d "
+                       "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+                       phba->mbox_ext_buf_ctx.state,
+                       phba->mbox_ext_buf_ctx.numBuf,
+                       phba->mbox_ext_buf_ctx.mbxTag,
+                       phba->mbox_ext_buf_ctx.seqNum,
+                       mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+       lpfc_bsg_mbox_ext_session_reset(phba);
+
+       return -EPIPE;
+}
+
+/**
  * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
  * @phba: Pointer to HBA context object.
  * @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
        LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
        MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
        /* a 4k buffer to hold the mb and extended data from/to the bsg */
-       MAILBOX_t *mb = NULL;
+       uint8_t *pmbx = NULL;
        struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
-       uint32_t size;
-       struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
-       struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
-       struct ulp_bde64 *rxbpl = NULL;
-       struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
-               job->request->rqst_data.h_vendor.vendor_cmd;
+       struct lpfc_dmabuf *dmabuf = NULL;
+       struct dfc_mbox_req *mbox_req;
        struct READ_EVENT_LOG_VAR *rdEventLog;
        uint32_t transmit_length, receive_length, mode;
+       struct lpfc_mbx_sli4_config *sli4_config;
        struct lpfc_mbx_nembed_cmd *nembed_sge;
        struct mbox_header *header;
        struct ulp_bde64 *bde;
        uint8_t *ext = NULL;
        int rc = 0;
        uint8_t *from;
+       uint32_t size;
+
 
        /* in case no data is transferred */
        job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                goto job_done;
        }
 
+       /*
+        * Don't allow mailbox commands to be sent when blocked or when in
+        * the middle of discovery
+        */
+        if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+               rc = -EAGAIN;
+               goto job_done;
+       }
+
+       mbox_req =
+           (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
        /* check if requested extended data lengths are valid */
        if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
            (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                goto job_done;
        }
 
+       dmabuf = lpfc_bsg_dma_page_alloc(phba);
+       if (!dmabuf || !dmabuf->virt) {
+               rc = -ENOMEM;
+               goto job_done;
+       }
+
+       /* Get the mailbox command or external buffer from BSG */
+       pmbx = (uint8_t *)dmabuf->virt;
+       size = job->request_payload.payload_len;
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt, pmbx, size);
+
+       /* Handle possible SLI_CONFIG with non-embedded payloads */
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+               if (rc == SLI_CONFIG_HANDLED)
+                       goto job_cont;
+               if (rc)
+                       goto job_done;
+               /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+       }
+
+       rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+       if (rc != 0)
+               goto job_done; /* must be negative */
+
        /* allocate our bsg tracking structure */
        dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
        if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                goto job_done;
        }
 
-       mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
-       if (!mb) {
-               rc = -ENOMEM;
-               goto job_done;
-       }
-
        pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!pmboxq) {
                rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
        }
        memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
 
-       size = job->request_payload.payload_len;
-       sg_copy_to_buffer(job->request_payload.sg_list,
-                       job->request_payload.sg_cnt,
-                       mb, size);
-
-       rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
-       if (rc != 0)
-               goto job_done; /* must be negative */
-
        pmb = &pmboxq->u.mb;
-       memcpy(pmb, mb, sizeof(*pmb));
+       memcpy(pmb, pmbx, sizeof(*pmb));
        pmb->mbxOwner = OWN_HOST;
        pmboxq->vport = vport;
 
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                                "0x%x while in stopped state.\n",
                                pmb->mbxCommand);
 
-       /* Don't allow mailbox commands to be sent when blocked
-        * or when in the middle of discovery
-        */
-       if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
-               rc = -EAGAIN;
-               goto job_done;
-       }
-
        /* extended mailbox commands will need an extended buffer */
        if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
-               ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
-               if (!ext) {
-                       rc = -ENOMEM;
-                       goto job_done;
-               }
-
                /* any data for the device? */
                if (mbox_req->inExtWLen) {
-                       from = (uint8_t *)mb;
-                       from += sizeof(MAILBOX_t);
-                       memcpy((uint8_t *)ext, from,
-                               mbox_req->inExtWLen * sizeof(uint32_t));
+                       from = pmbx;
+                       ext = from + sizeof(MAILBOX_t);
                }
-
                pmboxq->context2 = ext;
                pmboxq->in_ext_byte_len =
                        mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                        rc = -ERANGE;
                        goto job_done;
                }
-
-               rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-               if (!rxbmp) {
-                       rc = -ENOMEM;
-                       goto job_done;
-               }
-
-               rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
-               if (!rxbmp->virt) {
-                       rc = -ENOMEM;
-                       goto job_done;
-               }
-
-               INIT_LIST_HEAD(&rxbmp->list);
-               rxbpl = (struct ulp_bde64 *) rxbmp->virt;
-               dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
-               if (!dmp) {
-                       rc = -ENOMEM;
-                       goto job_done;
-               }
-
-               INIT_LIST_HEAD(&dmp->dma.list);
                pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
-                       putPaddrHigh(dmp->dma.phys);
+                       putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
                pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
-                       putPaddrLow(dmp->dma.phys);
+                       putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
 
                pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
-                       putPaddrHigh(dmp->dma.phys +
-                               pmb->un.varBIUdiag.un.s2.
-                                       xmit_bde64.tus.f.bdeSize);
+                       putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+                         + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
                pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
-                       putPaddrLow(dmp->dma.phys +
-                               pmb->un.varBIUdiag.un.s2.
-                                       xmit_bde64.tus.f.bdeSize);
-
-               /* copy the transmit data found in the mailbox extension area */
-               from = (uint8_t *)mb;
-               from += sizeof(MAILBOX_t);
-               memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+                       putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+                         + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
        } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
                rdEventLog = &pmb->un.varRdEventLog;
                receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 
                /* mode zero uses a bde like biu diags command */
                if (mode == 0) {
-
-                       /* rebuild the command for sli4 using our own buffers
-                       * like we do for biu diags
-                       */
-
-                       rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-                       if (!rxbmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
-                       rxbpl = (struct ulp_bde64 *) rxbmp->virt;
-                       if (rxbpl) {
-                               INIT_LIST_HEAD(&rxbmp->list);
-                               dmp = diag_cmd_data_alloc(phba, rxbpl,
-                                       receive_length, 0);
-                       }
-
-                       if (!dmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       INIT_LIST_HEAD(&dmp->dma.list);
-                       pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
-                       pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+                       pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+                                                       + sizeof(MAILBOX_t));
+                       pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+                                                       + sizeof(MAILBOX_t));
                }
        } else if (phba->sli_rev == LPFC_SLI_REV4) {
                if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                        /* receive length cannot be greater than mailbox
                         * extension size
                         */
-                       if ((receive_length == 0) ||
-                               (receive_length > MAILBOX_EXT_SIZE)) {
+                       if (receive_length == 0) {
                                rc = -ERANGE;
                                goto job_done;
                        }
-
-                       rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-                       if (!rxbmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
-                       if (!rxbmp->virt) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       INIT_LIST_HEAD(&rxbmp->list);
-                       rxbpl = (struct ulp_bde64 *) rxbmp->virt;
-                       dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
-                                               0);
-                       if (!dmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       INIT_LIST_HEAD(&dmp->dma.list);
-                       pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
-                       pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+                       pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+                                               + sizeof(MAILBOX_t));
+                       pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+                                               + sizeof(MAILBOX_t));
                } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
                        pmb->un.varUpdateCfg.co) {
                        bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                                rc = -ERANGE;
                                goto job_done;
                        }
-
-                       rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-                       if (!rxbmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
-                       if (!rxbmp->virt) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       INIT_LIST_HEAD(&rxbmp->list);
-                       rxbpl = (struct ulp_bde64 *) rxbmp->virt;
-                       dmp = diag_cmd_data_alloc(phba, rxbpl,
-                                       bde->tus.f.bdeSize, 0);
-                       if (!dmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       INIT_LIST_HEAD(&dmp->dma.list);
-                       bde->addrHigh = putPaddrHigh(dmp->dma.phys);
-                       bde->addrLow = putPaddrLow(dmp->dma.phys);
-
-                       /* copy the transmit data found in the mailbox
-                        * extension area
-                        */
-                       from = (uint8_t *)mb;
-                       from += sizeof(MAILBOX_t);
-                       memcpy((uint8_t *)dmp->dma.virt, from,
-                               bde->tus.f.bdeSize);
+                       bde->addrHigh = putPaddrHigh(dmabuf->phys
+                                               + sizeof(MAILBOX_t));
+                       bde->addrLow = putPaddrLow(dmabuf->phys
+                                               + sizeof(MAILBOX_t));
                } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
-                       /* rebuild the command for sli4 using our own buffers
-                       * like we do for biu diags
-                       */
-                       header = (struct mbox_header *)&pmb->un.varWords[0];
-                       nembed_sge = (struct lpfc_mbx_nembed_cmd *)
-                               &pmb->un.varWords[0];
-                       receive_length = nembed_sge->sge[0].length;
-
-                       /* receive length cannot be greater than mailbox
-                        * extension size
-                        */
-                       if ((receive_length == 0) ||
-                               (receive_length > MAILBOX_EXT_SIZE)) {
-                               rc = -ERANGE;
-                               goto job_done;
-                       }
-
-                       rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-                       if (!rxbmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
-
-                       rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
-                       if (!rxbmp->virt) {
-                               rc = -ENOMEM;
-                               goto job_done;
-                       }
+                       /* Handling non-embedded SLI_CONFIG mailbox command */
+                       sli4_config = &pmboxq->u.mqe.un.sli4_config;
+                       if (!bf_get(lpfc_mbox_hdr_emb,
+                           &sli4_config->header.cfg_mhdr)) {
+                               /* rebuild the command for sli4 using our
+                                * own buffers like we do for biu diags
+                                */
+                               header = (struct mbox_header *)
+                                               &pmb->un.varWords[0];
+                               nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+                                               &pmb->un.varWords[0];
+                               receive_length = nembed_sge->sge[0].length;
+
+                               /* receive length cannot be greater than
+                                * mailbox extension size
+                                */
+                               if ((receive_length == 0) ||
+                                   (receive_length > MAILBOX_EXT_SIZE)) {
+                                       rc = -ERANGE;
+                                       goto job_done;
+                               }
 
-                       INIT_LIST_HEAD(&rxbmp->list);
-                       rxbpl = (struct ulp_bde64 *) rxbmp->virt;
-                       dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
-                                               0);
-                       if (!dmp) {
-                               rc = -ENOMEM;
-                               goto job_done;
+                               nembed_sge->sge[0].pa_hi =
+                                               putPaddrHigh(dmabuf->phys
+                                                  + sizeof(MAILBOX_t));
+                               nembed_sge->sge[0].pa_lo =
+                                               putPaddrLow(dmabuf->phys
+                                                  + sizeof(MAILBOX_t));
                        }
-
-                       INIT_LIST_HEAD(&dmp->dma.list);
-                       nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
-                       nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
-                       /* copy the transmit data found in the mailbox
-                        * extension area
-                        */
-                       from = (uint8_t *)mb;
-                       from += sizeof(MAILBOX_t);
-                       memcpy((uint8_t *)dmp->dma.virt, from,
-                               header->cfg_mhdr.payload_length);
                }
        }
 
-       dd_data->context_un.mbox.rxbmp = rxbmp;
-       dd_data->context_un.mbox.dmp = dmp;
+       dd_data->context_un.mbox.dmabuffers = dmabuf;
 
        /* setup wake call as IOCB callback */
-       pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+       pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
 
        /* setup context field to pass wait_queue pointer to wake function */
        pmboxq->context1 = dd_data;
        dd_data->type = TYPE_MBOX;
        dd_data->context_un.mbox.pmboxq = pmboxq;
-       dd_data->context_un.mbox.mb = mb;
+       dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
        dd_data->context_un.mbox.set_job = job;
        dd_data->context_un.mbox.ext = ext;
        dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
                }
 
                /* job finished, copy the data */
-               memcpy(mb, pmb, sizeof(*pmb));
+               memcpy(pmbx, pmb, sizeof(*pmb));
                job->reply->reply_payload_rcv_len =
                        sg_copy_from_buffer(job->reply_payload.sg_list,
-                                       job->reply_payload.sg_cnt,
-                                       mb, size);
+                                           job->reply_payload.sg_cnt,
+                                           pmbx, size);
                /* not waiting mbox already done */
                rc = 0;
                goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 
 job_done:
        /* common exit for error or job completed inline */
-       kfree(mb);
        if (pmboxq)
                mempool_free(pmboxq, phba->mbox_mem_pool);
-       kfree(ext);
-       if (dmp) {
-               dma_free_coherent(&phba->pcidev->dev,
-                       dmp->size, dmp->dma.virt,
-                               dmp->dma.phys);
-               kfree(dmp);
-       }
-       if (rxbmp) {
-               lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
-               kfree(rxbmp);
-       }
+       lpfc_bsg_dma_page_free(phba, dmabuf);
        kfree(dd_data);
 
+job_cont:
        return rc;
 }
 
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
        struct lpfc_hba *phba = vport->phba;
+       struct dfc_mbox_req *mbox_req;
        int rc = 0;
 
-       /* in case no data is transferred */
+       /* mix-and-match backward compatibility */
        job->reply->reply_payload_rcv_len = 0;
        if (job->request_len <
            sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
-                               "2737 Received MBOX_REQ request below "
-                               "minimum size\n");
-               rc = -EINVAL;
-               goto job_error;
-       }
-
-       if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
-               rc = -EINVAL;
-               goto job_error;
-       }
-
-       if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
-               rc = -EINVAL;
-               goto job_error;
-       }
-
-       if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
-               rc = -EAGAIN;
-               goto job_error;
+               lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+                               "2737 Mix-and-match backward compability "
+                               "between MBOX_REQ old size:%d and "
+                               "new request size:%d\n",
+                               (int)(job->request_len -
+                                     sizeof(struct fc_bsg_request)),
+                               (int)sizeof(struct dfc_mbox_req));
+               mbox_req = (struct dfc_mbox_req *)
+                               job->request->rqst_data.h_vendor.vendor_cmd;
+               mbox_req->extMboxTag = 0;
+               mbox_req->extSeqNum = 0;
        }
 
        rc = lpfc_bsg_issue_mbox(phba, job, vport);
 
-job_error:
        if (rc == 0) {
                /* job done */
                job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
                rc = lpfc_bsg_send_mgmt_rsp(job);
                break;
        case LPFC_BSG_VENDOR_DIAG_MODE:
-               rc = lpfc_bsg_diag_mode(job);
+               rc = lpfc_bsg_diag_loopback_mode(job);
+               break;
+       case LPFC_BSG_VENDOR_DIAG_MODE_END:
+               rc = lpfc_sli4_bsg_diag_mode_end(job);
+               break;
+       case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+               rc = lpfc_bsg_diag_loopback_run(job);
                break;
-       case LPFC_BSG_VENDOR_DIAG_TEST:
-               rc = lpfc_bsg_diag_test(job);
+       case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+               rc = lpfc_sli4_bsg_link_diag_test(job);
                break;
        case LPFC_BSG_VENDOR_GET_MGMT_REV:
                rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
                /* the mbox completion handler can now be run */
                spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
                job->job_done(job);
+               if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+                       phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
                break;
        case TYPE_MENLO:
                menlo = &dd_data->context_un.menlo;
index b542aca..c8c2b47 100644 (file)
  * These are the vendor unique structures passed in using the bsg
  * FC_BSG_HST_VENDOR message code type.
  */
-#define LPFC_BSG_VENDOR_SET_CT_EVENT   1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT   2
-#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
-#define LPFC_BSG_VENDOR_DIAG_MODE      4
-#define LPFC_BSG_VENDOR_DIAG_TEST      5
-#define LPFC_BSG_VENDOR_GET_MGMT_REV   6
-#define LPFC_BSG_VENDOR_MBOX           7
-#define LPFC_BSG_VENDOR_MENLO_CMD      8
-#define LPFC_BSG_VENDOR_MENLO_DATA     9
+#define LPFC_BSG_VENDOR_SET_CT_EVENT           1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT           2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP         3
+#define LPFC_BSG_VENDOR_DIAG_MODE              4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK      5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV           6
+#define LPFC_BSG_VENDOR_MBOX                   7
+#define LPFC_BSG_VENDOR_MENLO_CMD              8
+#define LPFC_BSG_VENDOR_MENLO_DATA             9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END          10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST         11
 
 struct set_ct_event {
        uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
        uint32_t timeout;
 };
 
+struct sli4_link_diag {
+       uint32_t command;
+       uint32_t timeout;
+       uint32_t test_id;
+       uint32_t loops;
+       uint32_t test_version;
+       uint32_t error_action;
+};
+
 struct diag_mode_test {
        uint32_t command;
 };
 
+struct diag_status {
+       uint32_t mbox_status;
+       uint32_t shdr_status;
+       uint32_t shdr_add_status;
+};
+
 #define LPFC_WWNN_TYPE         0
 #define LPFC_WWPN_TYPE         1
 
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
 };
 
 #define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
 struct dfc_mbox_req {
        uint32_t command;
        uint32_t mbOffset;
        uint32_t inExtWLen;
        uint32_t outExtWLen;
+       uint32_t extMboxTag;
+       uint32_t extSeqNum;
 };
 
 /* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
 #define lpfc_mbox_sli_config_mse_len_WORD      buf_len
 };
 
-struct lpfc_sli_config_subcmd_hbd {
+struct lpfc_sli_config_hbd {
        uint32_t buf_len;
 #define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT        0
 #define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
        uint32_t reserved5;
 };
 
-struct lpfc_sli_config_generic {
+struct lpfc_sli_config_emb0_subsys {
        struct lpfc_sli_config_hdr      sli_config_hdr;
 #define LPFC_MBX_SLI_CONFIG_MAX_MSE     19
        struct lpfc_sli_config_mse      mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+       uint32_t padding;
+       uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT 0
+#define lpfc_emb0_subcmnd_opcode_MASK  0xff
+#define lpfc_emb0_subcmnd_opcode_WORD  word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT 8
+#define lpfc_emb0_subcmnd_subsys_MASK  0xff
+#define lpfc_emb0_subcmnd_subsys_WORD  word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE         0x0C
+#define FCOE_OPCODE_READ_FCF           0x08
+#define FCOE_OPCODE_ADD_FCF            0x09
 };
 
-struct lpfc_sli_config_subcmnd {
+struct lpfc_sli_config_emb1_subsys {
        struct lpfc_sli_config_hdr      sli_config_hdr;
        uint32_t word6;
-#define lpfc_subcmnd_opcode_SHIFT      0
-#define lpfc_subcmnd_opcode_MASK       0xff
-#define lpfc_subcmnd_opcode_WORD       word6
-#define lpfc_subcmnd_subsys_SHIFT      8
-#define lpfc_subcmnd_subsys_MASK       0xff
-#define lpfc_subcmnd_subsys_WORD       word6
+#define lpfc_emb1_subcmnd_opcode_SHIFT 0
+#define lpfc_emb1_subcmnd_opcode_MASK  0xff
+#define lpfc_emb1_subcmnd_opcode_WORD  word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT 8
+#define lpfc_emb1_subcmnd_subsys_MASK  0xff
+#define lpfc_emb1_subcmnd_subsys_WORD  word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN         0x01
+#define COMN_OPCODE_READ_OBJECT                0xAB
+#define COMN_OPCODE_WRITE_OBJECT       0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST   0xAD
+#define COMN_OPCODE_DELETE_OBJECT      0xAE
        uint32_t timeout;
        uint32_t request_length;
        uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
        uint32_t rd_offset;
        uint32_t obj_name[26];
        uint32_t hbd_count;
-#define LPFC_MBX_SLI_CONFIG_MAX_HBD    10
-       struct lpfc_sli_config_subcmd_hbd   hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD    8
+       struct lpfc_sli_config_hbd      hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
 };
 
 struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
 #define lpfc_mqe_command_MASK          0x000000FF
 #define lpfc_mqe_command_WORD          word0
        union {
-               struct lpfc_sli_config_generic  sli_config_generic;
-               struct lpfc_sli_config_subcmnd  sli_config_subcmnd;
+               struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+               struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
        } un;
 };
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED         0
+#define SLI_CONFIG_HANDLED             1
index f0b332f..fc20c24 100644 (file)
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_supported_pages(struct lpfcMboxq *);
 void lpfc_pc_sli4_params(struct lpfcMboxq *);
 int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+                          uint16_t, uint16_t, bool);
 int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
 void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
 
 int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
 int lpfc_config_port_post(struct lpfc_hba *);
 int lpfc_hba_down_prep(struct lpfc_hba *);
 int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
        uint32_t, uint32_t);
 extern struct lpfc_hbq_init *lpfc_hbq_defs[];
 
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+
 /* externs BlockGuard */
 extern char *_dump_buf_data;
 extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
 void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
        uint32_t);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
index d9edfd9..779b88e 100644 (file)
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
        icmd->ulpLe = 1;
        icmd->ulpClass = CLASS3;
        icmd->ulpContext = ndlp->nlp_rpi;
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
 
        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
                /* For GEN_REQUEST64_CR, use the RPI */
index c93fca0..ffe82d1 100644 (file)
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
        /* Get fast-path complete queue information */
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                        "Fast-path FCP CQ information:\n");
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+       fcp_qidx = 0;
+       do {
                len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
                                "Associated EQID[%02d]:\n",
                                phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                                phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
                                phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
                                phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
-       }
+       } while (++fcp_qidx < phba->cfg_fcp_eq_count);
        len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
 
        /* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                        goto pass_check;
                }
                /* FCP complete queue */
-               for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+               qidx = 0;
+               do {
                        if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
                                /* Sanity check */
                                rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                                                phba->sli4_hba.fcp_cq[qidx];
                                goto pass_check;
                        }
-               }
+               } while (++qidx < phba->cfg_fcp_eq_count);
                goto error_out;
                break;
        case LPFC_IDIAG_MQ:
index e2c4524..32a0845 100644 (file)
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                icmd->un.elsreq64.myID = vport->fc_myDID;
 
                /* For ELS_REQUEST64_CR, use the VPI by default */
-               icmd->ulpContext = vport->vpi + phba->vpi_base;
+               icmd->ulpContext = phba->vpi_ids[vport->vpi];
                icmd->ulpCt_h = 0;
                /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
                if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
                rc = -ENOMEM;
                goto fail_free_dmabuf;
        }
+
        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq) {
                rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
 {
        struct lpfc_vport *vport;
        unsigned long flags;
+       int i;
+
+       /* The physical ports are always vpi 0 - translate is unnecessary. */
+       if (vpi > 0) {
+               /*
+                * Translate the physical vpi to the logical vpi.  The
+                * vport stores the logical vpi.
+                */
+               for (i = 0; i < phba->max_vpi; i++) {
+                       if (vpi == phba->vpi_ids[i])
+                               break;
+               }
+
+               if (i >= phba->max_vpi) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+                                        "2936 Could not find Vport mapped "
+                                        "to vpi %d\n", vpi);
+                       return NULL;
+               }
+       }
 
        spin_lock_irqsave(&phba->hbalock, flags);
        list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        vport = phba->pport;
                else
                        vport = lpfc_find_vport_by_vpid(phba,
-                               icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
+                                               icmd->unsli3.rcvsli3.vpi);
        }
+
        /* If there are no BDEs associated
         * with this IOCB, there is nothing to do.
         */
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
                elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
                /* Set the ulpContext to the vpi */
-               elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
+               elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
        } else {
                /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
                icmd->ulpCt_h = 1;
index 7a35df5..18d0dbf 100644 (file)
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
        /* Clean up any firmware default rpi's */
        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mb) {
-               lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+               lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
                mb->vport = vport;
                mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
        memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
               sizeof (struct serv_parm));
-       if (phba->cfg_soft_wwnn)
-               u64_to_wwn(phba->cfg_soft_wwnn,
-                          vport->fc_sparam.nodeName.u.wwn);
-       if (phba->cfg_soft_wwpn)
-               u64_to_wwn(phba->cfg_soft_wwpn,
-                          vport->fc_sparam.portName.u.wwn);
-       memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
-              sizeof(vport->fc_nodename));
-       memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
-              sizeof(vport->fc_portname));
+       lpfc_update_vport_wwn(vport);
        if (vport->port_type == LPFC_PHYSICAL_PORT) {
                memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
                memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                return;
        }
 
-       ndlp->nlp_rpi = mb->un.varWords[0];
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               ndlp->nlp_rpi = mb->un.varWords[0];
        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
                return;
        }
 
-       ndlp->nlp_rpi = mb->un.varWords[0];
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               ndlp->nlp_rpi = mb->un.varWords[0];
        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        if (ndlp->nlp_type & NLP_FCP_INITIATOR)
                rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
 
-
        if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
                fc_remote_port_rolechg(rport, rport_ids.roles);
 
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        struct lpfc_hba *phba = vport->phba;
        LPFC_MBOXQ_t    *mbox;
        int rc;
+       uint16_t rpi;
 
        if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (mbox) {
-                       lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+                       /* SLI4 ports require the physical rpi value. */
+                       rpi = ndlp->nlp_rpi;
+                       if (phba->sli_rev == LPFC_SLI_REV4)
+                               rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+                       lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
                        mbox->vport = vport;
                        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mbox) {
-               lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+               lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+                                mbox);
                mbox->vport = vport;
                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mbox) {
-               lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+               lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+                              mbox);
                mbox->vport = vport;
                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
        if (num_sent)
                return;
 
-       /*
-        * For SLI3, cmpl_reg_vpi will set port_state to READY, and
-        * continue discovery.
-        */
+       /* Register the VPI for SLI3, NON-NPIV only. */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_PT2PT) &&
            !(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
                if (phba->sli_rev < LPFC_SLI_REV4) {
                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                                lpfc_issue_reg_vpi(phba, vport);
-                       else  { /* NPIV Not enabled */
+                       else  {
                                lpfc_issue_clear_la(phba, vport);
                                vport->port_state = LPFC_VPORT_READY;
                        }
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        pmb->context1 = NULL;
        pmb->context2 = NULL;
 
-       ndlp->nlp_rpi = mb->un.varWords[0];
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               ndlp->nlp_rpi = mb->un.varWords[0];
        ndlp->nlp_flag |= NLP_RPI_REGISTERED;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                shost = lpfc_shost_from_vport(vports[i]);
                spin_lock_irq(shost->host_lock);
+               /*
+                * IF the CVL_RCVD bit is not set then we have sent the
+                * flogi.
+                * If dev_loss fires while we are waiting we do not want to
+                * unreg the fcf.
+                */
+               if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+                       spin_unlock_irq(shost->host_lock);
+                       ret =  1;
+                       goto out;
+               }
                list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
                        if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
                          (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
index 86b6f7e..9059524 100644 (file)
@@ -64,6 +64,8 @@
 #define SLI3_IOCB_CMD_SIZE     128
 #define SLI3_IOCB_RSP_SIZE     64
 
+#define LPFC_UNREG_ALL_RPIS_VPORT      0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS       0xffffffff
 
 /* vendor ID used in SCSI netlink calls */
 #define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ {                        /* Structure is in Big Endian format */
 #define rrq_rxid_WORD          rrq_exchg
 };
 
+#define LPFC_MAX_VFN_PER_PFN   255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN   0   /* Default VFs due to platform limitation*/
 
 struct RTV_RSP {               /* Structure is in Big Endian format */
        uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
 #define PCI_DEVICE_ID_BALIUS        0xe131
 #define PCI_DEVICE_ID_PROTEUS_PF    0xe180
 #define PCI_DEVICE_ID_LANCER_FC     0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF  0xe208
 #define PCI_DEVICE_ID_LANCER_FCOE   0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
 #define PCI_DEVICE_ID_SAT_SMB       0xf011
 #define PCI_DEVICE_ID_SAT_MID       0xf015
 #define PCI_DEVICE_ID_RFLY          0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
 #define MAILBOX_EXT_SIZE       (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
 #define MAILBOX_HBA_EXT_OFFSET  0x100
 /* max mbox xmit size is a page size for sysfs IO operations */
-#define MAILBOX_MAX_XMIT_SIZE   PAGE_SIZE
+#define MAILBOX_SYSFS_MAX      4096
 
 typedef union {
        uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
index 4dff668..11e26a2 100644 (file)
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
 #define LPFC_PCI_FUNC3         3
 #define LPFC_PCI_FUNC4         4
 
+/* SLI4 interface type-2 control register offsets */
+#define LPFC_CTL_PORT_SEM_OFFSET       0x400
+#define LPFC_CTL_PORT_STA_OFFSET       0x404
+#define LPFC_CTL_PORT_CTL_OFFSET       0x408
+#define LPFC_CTL_PORT_ER1_OFFSET       0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET       0x410
+#define LPFC_CTL_PDEV_CTL_OFFSET       0x414
+
+/* Some SLI4 interface type-2 PDEV_CTL register bits */
+#define LPFC_CTL_PDEV_CTL_DRST         0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST         0x00000002
+#define LPFC_CTL_PDEV_CTL_DD           0x00000004
+#define LPFC_CTL_PDEV_CTL_LC           0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL      0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE  0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC      0x20
+
+#define LPFC_FW_DUMP_REQUEST    (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
 /* Active interrupt test count */
 #define LPFC_ACT_INTR_CNT      4
 
@@ -210,9 +229,26 @@ struct ulp_bde64 {
 
 struct lpfc_sli4_flags {
        uint32_t word0;
-#define lpfc_fip_flag_SHIFT 0
-#define lpfc_fip_flag_MASK 0x00000001
-#define lpfc_fip_flag_WORD word0
+#define lpfc_idx_rsrc_rdy_SHIFT                0
+#define lpfc_idx_rsrc_rdy_MASK         0x00000001
+#define lpfc_idx_rsrc_rdy_WORD         word0
+#define LPFC_IDX_RSRC_RDY              1
+#define lpfc_xri_rsrc_rdy_SHIFT                1
+#define lpfc_xri_rsrc_rdy_MASK         0x00000001
+#define lpfc_xri_rsrc_rdy_WORD         word0
+#define LPFC_XRI_RSRC_RDY              1
+#define lpfc_rpi_rsrc_rdy_SHIFT                2
+#define lpfc_rpi_rsrc_rdy_MASK         0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD         word0
+#define LPFC_RPI_RSRC_RDY              1
+#define lpfc_vpi_rsrc_rdy_SHIFT                3
+#define lpfc_vpi_rsrc_rdy_MASK         0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD         word0
+#define LPFC_VPI_RSRC_RDY              1
+#define lpfc_vfi_rsrc_rdy_SHIFT                4
+#define lpfc_vfi_rsrc_rdy_MASK         0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD         word0
+#define LPFC_VFI_RSRC_RDY              1
 };
 
 struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
 #define lpfc_mbox_hdr_version_SHIFT    0
 #define lpfc_mbox_hdr_version_MASK     0x000000FF
 #define lpfc_mbox_hdr_version_WORD     word9
+#define lpfc_mbox_hdr_pf_num_SHIFT     16
+#define lpfc_mbox_hdr_pf_num_MASK      0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD      word9
+#define lpfc_mbox_hdr_vh_num_SHIFT     24
+#define lpfc_mbox_hdr_vh_num_MASK      0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD      word9
 #define LPFC_Q_CREATE_VERSION_2        2
 #define LPFC_Q_CREATE_VERSION_1        1
 #define LPFC_Q_CREATE_VERSION_0        0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
        } response;
 };
 
-/* Mailbox structures */
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
 struct mbox_header {
        struct lpfc_sli4_cfg_mhdr cfg_mhdr;
        union  lpfc_sli4_cfg_shdr cfg_shdr;
 };
 
+#define LPFC_EXTENT_LOCAL              0
+#define LPFC_TIMEOUT_DEFAULT           0
+#define LPFC_EXTENT_VERSION_DEFAULT    0
+
 /* Subsystem Definitions */
 #define LPFC_MBOX_SUBSYSTEM_COMMON     0x1
 #define LPFC_MBOX_SUBSYSTEM_FCOE       0xC
@@ -794,6 +846,13 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_QUERY_FW_CFG          0x3A
 #define LPFC_MBOX_OPCODE_FUNCTION_RESET                0x3D
 #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT         0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO  0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT     0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT   0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG    0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG    0xA4
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT          0xAC
 #define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS   0xB5
 
 /* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF               0x0A
 #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE                0x0B
 #define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF           0x10
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE          0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK       0x23
 
 /* Mailbox command structures */
 struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
        } u;
 };
 
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI 0x20
+#define LPFC_RSC_TYPE_FCOE_VPI 0x21
+#define LPFC_RSC_TYPE_FCOE_RPI 0x22
+#define LPFC_RSC_TYPE_FCOE_XRI 0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT       0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK                0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD                word4
+               } req;
+               struct {
+                       uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT                0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK         0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD         word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT       16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK                0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD                word4
+               } rsp;
+       } u;
+};
+
+struct lpfc_id_range {
+       uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
+#define lpfc_mbx_rsrc_id_word4_0_MASK  0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD  word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
+#define lpfc_mbx_rsrc_id_word4_1_MASK  0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD  word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT     0
+#define lpfc_mbx_set_diag_state_diag_MASK      0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD      word0
+#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_state_link_num_MASK  0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD  word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD word0
+               } req;
+               struct {
+                       uint32_t word0;
+               } rsp;
+       } u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT      0
+#define lpfc_mbx_set_diag_lpbk_type_MASK       0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_WORD       word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE                0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL       0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL       0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT  16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK   0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD   word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK  0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD  word0
+               } req;
+               struct {
+                       uint32_t word0;
+               } rsp;
+       } u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT  16
+#define lpfc_mbx_run_diag_test_link_num_MASK   0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD   word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
+#define lpfc_mbx_run_diag_test_link_type_MASK  0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD  word0
+                       uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT   0
+#define lpfc_mbx_run_diag_test_test_id_MASK    0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD    word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT     16
+#define lpfc_mbx_run_diag_test_loops_MASK      0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD      word1
+                       uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT  0
+#define lpfc_mbx_run_diag_test_test_ver_MASK   0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD   word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT   16
+#define lpfc_mbx_run_diag_test_err_act_MASK    0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD    word2
+               } req;
+               struct {
+                       uint32_t word0;
+               } rsp;
+       } u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges.  For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK  0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD  word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT  16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK   0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD   word4
+               } req;
+               struct {
+                       uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT        0
+#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD word4
+                       struct lpfc_id_range id[53];
+               } rsp;
+       } u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above.  This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+       union  lpfc_sli4_cfg_shdr cfg_shdr;
+       uint32_t word4;
+       struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+       struct mbox_header header;
+       struct {
+               uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT       0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK                0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD                word4
+       } req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
 struct lpfc_mbx_post_hdr_tmpl {
        struct mbox_header header;
        uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
 
        uint32_t word2;
 #define lpfc_sli4_sge_offset_SHIFT     0 /* Offset of buffer - Not used*/
-#define lpfc_sli4_sge_offset_MASK      0x00FFFFFF
+#define lpfc_sli4_sge_offset_MASK      0x1FFFFFFF
 #define lpfc_sli4_sge_offset_WORD      word2
 #define lpfc_sli4_sge_last_SHIFT       31 /* Last SEG in the SGL sets
                                                this  flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
 
 struct lpfc_mbx_read_config {
        uint32_t word1;
-#define lpfc_mbx_rd_conf_max_bbc_SHIFT         0
-#define lpfc_mbx_rd_conf_max_bbc_MASK          0x000000FF
-#define lpfc_mbx_rd_conf_max_bbc_WORD          word1
-#define lpfc_mbx_rd_conf_init_bbc_SHIFT                8
-#define lpfc_mbx_rd_conf_init_bbc_MASK         0x000000FF
-#define lpfc_mbx_rd_conf_init_bbc_WORD         word1
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT    31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK     0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD     word1
        uint32_t word2;
-#define lpfc_mbx_rd_conf_nport_did_SHIFT       0
-#define lpfc_mbx_rd_conf_nport_did_MASK                0x00FFFFFF
-#define lpfc_mbx_rd_conf_nport_did_WORD                word2
 #define lpfc_mbx_rd_conf_topology_SHIFT                24
 #define lpfc_mbx_rd_conf_topology_MASK         0x000000FF
 #define lpfc_mbx_rd_conf_topology_WORD         word2
-       uint32_t word3;
-#define lpfc_mbx_rd_conf_ao_SHIFT              0
-#define lpfc_mbx_rd_conf_ao_MASK               0x00000001
-#define lpfc_mbx_rd_conf_ao_WORD               word3
-#define lpfc_mbx_rd_conf_bb_scn_SHIFT          8
-#define lpfc_mbx_rd_conf_bb_scn_MASK           0x0000000F
-#define lpfc_mbx_rd_conf_bb_scn_WORD           word3
-#define lpfc_mbx_rd_conf_cbb_scn_SHIFT         12
-#define lpfc_mbx_rd_conf_cbb_scn_MASK          0x0000000F
-#define lpfc_mbx_rd_conf_cbb_scn_WORD          word3
-#define lpfc_mbx_rd_conf_mc_SHIFT              29
-#define lpfc_mbx_rd_conf_mc_MASK               0x00000001
-#define lpfc_mbx_rd_conf_mc_WORD               word3
+       uint32_t rsvd_3;
        uint32_t word4;
 #define lpfc_mbx_rd_conf_e_d_tov_SHIFT         0
 #define lpfc_mbx_rd_conf_e_d_tov_MASK          0x0000FFFF
 #define lpfc_mbx_rd_conf_e_d_tov_WORD          word4
-       uint32_t word5;
-#define lpfc_mbx_rd_conf_lp_tov_SHIFT          0
-#define lpfc_mbx_rd_conf_lp_tov_MASK           0x0000FFFF
-#define lpfc_mbx_rd_conf_lp_tov_WORD           word5
+       uint32_t rsvd_5;
        uint32_t word6;
 #define lpfc_mbx_rd_conf_r_a_tov_SHIFT         0
 #define lpfc_mbx_rd_conf_r_a_tov_MASK          0x0000FFFF
 #define lpfc_mbx_rd_conf_r_a_tov_WORD          word6
-       uint32_t word7;
-#define lpfc_mbx_rd_conf_r_t_tov_SHIFT         0
-#define lpfc_mbx_rd_conf_r_t_tov_MASK          0x000000FF
-#define lpfc_mbx_rd_conf_r_t_tov_WORD          word7
-       uint32_t word8;
-#define lpfc_mbx_rd_conf_al_tov_SHIFT          0
-#define lpfc_mbx_rd_conf_al_tov_MASK           0x0000000F
-#define lpfc_mbx_rd_conf_al_tov_WORD           word8
+       uint32_t rsvd_7;
+       uint32_t rsvd_8;
        uint32_t word9;
 #define lpfc_mbx_rd_conf_lmt_SHIFT             0
 #define lpfc_mbx_rd_conf_lmt_MASK              0x0000FFFF
 #define lpfc_mbx_rd_conf_lmt_WORD              word9
-       uint32_t word10;
-#define lpfc_mbx_rd_conf_max_alpa_SHIFT                0
-#define lpfc_mbx_rd_conf_max_alpa_MASK         0x000000FF
-#define lpfc_mbx_rd_conf_max_alpa_WORD         word10
-       uint32_t word11_rsvd;
+       uint32_t rsvd_10;
+       uint32_t rsvd_11;
        uint32_t word12;
 #define lpfc_mbx_rd_conf_xri_base_SHIFT                0
 #define lpfc_mbx_rd_conf_xri_base_MASK         0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_vfi_count_MASK         0x0000FFFF
 #define lpfc_mbx_rd_conf_vfi_count_WORD         word15
        uint32_t word16;
-#define lpfc_mbx_rd_conf_fcfi_base_SHIFT       0
-#define lpfc_mbx_rd_conf_fcfi_base_MASK                0x0000FFFF
-#define lpfc_mbx_rd_conf_fcfi_base_WORD                word16
 #define lpfc_mbx_rd_conf_fcfi_count_SHIFT      16
 #define lpfc_mbx_rd_conf_fcfi_count_MASK       0x0000FFFF
 #define lpfc_mbx_rd_conf_fcfi_count_WORD       word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
 #define cfg_fcoe_SHIFT                         0
 #define cfg_fcoe_MASK                          0x00000001
 #define cfg_fcoe_WORD                          word12
+#define cfg_ext_SHIFT                          1
+#define cfg_ext_MASK                           0x00000001
+#define cfg_ext_WORD                           word12
+#define cfg_hdrr_SHIFT                         2
+#define cfg_hdrr_MASK                          0x00000001
+#define cfg_hdrr_WORD                          word12
 #define cfg_phwq_SHIFT                         15
 #define cfg_phwq_MASK                          0x00000001
 #define cfg_phwq_WORD                          word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
        struct lpfc_sli4_parameters sli4_parameters;
 };
 
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE                   18
+       uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+       uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT         0
+#define lpfc_rsrc_desc_pcie_type_MASK          0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD          word0
+#define LPFC_RSRC_DESC_TYPE_PCIE               0x40
+       uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT                0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK         0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD         word1
+       uint32_t reserved;
+       uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT    0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK     0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD     word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT       8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK                0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD                word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT      16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK       0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD       word3
+       uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT    0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK     0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD     word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+       uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT       0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK                0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD                word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE             0x43
+       uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT      0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK       0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD       word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT      16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK        0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD        word1
+       uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT    0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK     0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD     word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT    16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK     0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD     word2
+       uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT     0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK      0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD      word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT     16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK      0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD      word3
+       uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT     0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK      0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD      word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT    16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK     0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD     word4
+       uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT   0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK    0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD    word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT    16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK     0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD     word5
+       uint32_t word6;
+       uint32_t word7;
+       uint32_t word8;
+       uint32_t word9;
+       uint32_t word10;
+       uint32_t word11;
+       uint32_t word12;
+       uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT     0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK      0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD      word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT      6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK      0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD      word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT                8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK         0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD         word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT                9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK         0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD         word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT     16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK      0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD      word13
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM                 2
+       uint32_t rsrc_desc_count;
+       struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+       struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE      0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT          0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE           0x2
+       struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM                 2
+       uint32_t rsrc_desc_count;
+       struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+       struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE      0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT          0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE           0x2
+       union {
+               struct {
+                       uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT    0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK     0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD     word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT    8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK     0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD     word10
+               } request;
+               struct {
+                       struct lpfc_prof_cfg prof_cfg;
+               } response;
+       } u;
+};
+
 /* Mailbox Completion Queue Error Messages */
 #define MB_CQE_STATUS_SUCCESS                  0x0
 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES  0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
 #define MB_CEQ_STATUS_QUEUE_FLUSHING           0x4
 #define MB_CQE_STATUS_DMA_FAILED               0x5
 
+#define LPFC_MBX_WR_CONFIG_MAX_BDE             8
+struct lpfc_mbx_wr_object {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT               31
+#define lpfc_wr_object_eof_MASK                        0x00000001
+#define lpfc_wr_object_eof_WORD                        word4
+#define lpfc_wr_object_write_length_SHIFT      0
+#define lpfc_wr_object_write_length_MASK       0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD       word4
+                       uint32_t write_offset;
+                       uint32_t object_name[26];
+                       uint32_t bde_count;
+                       struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+               } request;
+               struct {
+                       uint32_t actual_write_length;
+               } response;
+       } u;
+};
+
 /* mailbox queue entry structure */
 struct lpfc_mqe {
        uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
                struct lpfc_mbx_cq_destroy cq_destroy;
                struct lpfc_mbx_wq_destroy wq_destroy;
                struct lpfc_mbx_rq_destroy rq_destroy;
+               struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+               struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+               struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
                struct lpfc_mbx_post_sgl_pages post_sgl_pages;
                struct lpfc_mbx_nembed_cmd nembed_cmd;
                struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
                struct lpfc_mbx_supp_pages supp_pages;
                struct lpfc_mbx_pc_sli4_params sli4_params;
                struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+               struct lpfc_mbx_set_link_diag_state link_diag_state;
+               struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+               struct lpfc_mbx_run_link_diag_test link_diag_test;
+               struct lpfc_mbx_get_func_cfg get_func_cfg;
+               struct lpfc_mbx_get_prof_cfg get_prof_cfg;
                struct lpfc_mbx_nop nop;
+               struct lpfc_mbx_wr_object wr_object;
        } un;
 };
 
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
 #define SGL_ALIGN_SZ 64
 #define SGL_PAGE_SIZE 4096
 /* align SGL addr on a size boundary - adjust address up */
-#define NO_XRI  ((uint16_t)-1)
+#define NO_XRI  0xffff
 
 struct wqe_common {
        uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
        struct gen_req64_wqe gen_req;
 };
 
+#define LPFC_GROUP_OJECT_MAGIC_NUM             0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP                   0xf7
+#define LPFC_FILE_ID_GROUP                     0xa2
+struct lpfc_grp_hdr {
+       uint32_t size;
+       uint32_t magic_number;
+       uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT   24
+#define lpfc_grp_hdr_file_type_MASK    0x000000FF
+#define lpfc_grp_hdr_file_type_WORD    word2
+#define lpfc_grp_hdr_id_SHIFT          16
+#define lpfc_grp_hdr_id_MASK           0x000000FF
+#define lpfc_grp_hdr_id_WORD           word2
+       uint8_t rev_name[128];
+};
+
 #define FCP_COMMAND 0x0
 #define FCP_COMMAND_DATA_OUT 0x1
 #define ELS_COMMAND_NON_FIP 0xC
 #define ELS_COMMAND_FIP 0xD
 #define OTHER_COMMAND 0x8
 
+#define LPFC_FW_DUMP   1
+#define LPFC_FW_RESET  2
+#define LPFC_DV_RESET  3
index 7dda036..148b98d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/ctype.h>
 #include <linux/aer.h>
 #include <linux/slab.h>
+#include <linux/firmware.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
        lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
        if (!lpfc_vpd_data)
                goto out_free_mbox;
-
        do {
                lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 }
 
 /**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ *     cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ *   None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+       /* If the soft name exists then update it using the service params */
+       if (vport->phba->cfg_soft_wwnn)
+               u64_to_wwn(vport->phba->cfg_soft_wwnn,
+                          vport->fc_sparam.nodeName.u.wwn);
+       if (vport->phba->cfg_soft_wwpn)
+               u64_to_wwn(vport->phba->cfg_soft_wwpn,
+                          vport->fc_sparam.portName.u.wwn);
+
+       /*
+        * If the name is empty or there exists a soft name
+        * then copy the service params name, otherwise use the fc name
+        */
+       if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+               memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+                       sizeof(struct lpfc_name));
+       else
+               memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+                       sizeof(struct lpfc_name));
+
+       if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+               memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+                       sizeof(struct lpfc_name));
+       else
+               memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+                       sizeof(struct lpfc_name));
+}
+
+/**
  * lpfc_config_port_post - Perform lpfc initialization after config port
  * @phba: pointer to lpfc hba data structure.
  *
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
        pmb->context1 = NULL;
-
-       if (phba->cfg_soft_wwnn)
-               u64_to_wwn(phba->cfg_soft_wwnn,
-                          vport->fc_sparam.nodeName.u.wwn);
-       if (phba->cfg_soft_wwpn)
-               u64_to_wwn(phba->cfg_soft_wwpn,
-                          vport->fc_sparam.portName.u.wwn);
-       memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
-              sizeof (struct lpfc_name));
-       memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
-              sizeof (struct lpfc_name));
+       lpfc_update_vport_wwn(vport);
 
        /* Update the fc_host data structures with new wwn. */
        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                        /* Clear all pending interrupts */
                        writel(0xffffffff, phba->HAregaddr);
                        readl(phba->HAregaddr); /* flush */
-
                        phba->link_state = LPFC_HBA_ERROR;
                        if (rc != MBX_BUSY)
                                mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
                && descp && descp[0] != '\0')
                return;
 
-       if (phba->lmt & LMT_10Gb)
+       if (phba->lmt & LMT_16Gb)
+               max_speed = 16;
+       else if (phba->lmt & LMT_10Gb)
                max_speed = 10;
        else if (phba->lmt & LMT_8Gb)
                max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
                                "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_LANCER_FC:
-               oneConnect = 1;
-               m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
+       case PCI_DEVICE_ID_LANCER_FC_VF:
+               m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
                break;
        case PCI_DEVICE_ID_LANCER_FCOE:
+       case PCI_DEVICE_ID_LANCER_FCOE_VF:
                oneConnect = 1;
-               m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
+               m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
                break;
        default:
                m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 
        if (mdp && mdp[0] == '\0')
                snprintf(mdp, 79,"%s", m.name);
-       /* oneConnect hba requires special processing, they are all initiators
+       /*
+        * oneConnect hba requires special processing, they are all initiators
         * and we put the port number on the end
         */
        if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
                kfree(io);
                phba->total_iocbq_bufs--;
        }
+
        spin_unlock_irq(&phba->hbalock);
        return 0;
 }
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2718 Clear Virtual Link Received for VPI 0x%x"
                        " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
                vport = lpfc_find_vport_by_vpid(phba,
                                acqe_fip->index - phba->vpi_base);
                ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
        pci_try_set_mwi(pdev);
        pci_save_state(pdev);
 
+       /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+       if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+               pdev->needs_freset = 1;
+
        return 0;
 
 out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+       struct pci_dev *pdev = phba->pcidev;
+       int rc;
+
+       rc = pci_enable_sriov(pdev, nr_vfn);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2806 Failed to enable sriov on this device "
+                               "with vfn number nr_vf:%d, rc:%d\n",
+                               nr_vfn, rc);
+       } else
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2807 Successful enable sriov on this device "
+                               "with vfn number nr_vf:%d\n", nr_vfn);
+       return rc;
+}
+
+/**
  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -4011,6 +4079,7 @@ static int
 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
 {
        struct lpfc_sli *psli;
+       int rc;
 
        /*
         * Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
        if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
                return -ENOMEM;
 
+       /*
+        * Enable sr-iov virtual functions if supported and configured
+        * through the module parameter.
+        */
+       if (phba->cfg_sriov_nr_virtfn > 0) {
+               rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+                                                phba->cfg_sriov_nr_virtfn);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "2808 Requested number of SR-IOV "
+                                       "virtual functions (%d) is not "
+                                       "supported\n",
+                                       phba->cfg_sriov_nr_virtfn);
+                       phba->cfg_sriov_nr_virtfn = 0;
+               }
+       }
+
        return 0;
 }
 
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        phba->fcf.redisc_wait.data = (unsigned long)phba;
 
        /*
+        * Control structure for handling external multi-buffer mailbox
+        * command pass-through.
+        */
+       memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+               sizeof(struct lpfc_mbox_ext_buf_ctx));
+       INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+       /*
         * We need to do a READ_CONFIG mailbox command here before
         * calling lpfc_get_cfgparam. For VFs this will report the
         * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
 
        /*
-        * Initialize dirver internal slow-path work queues
+        * Initialize driver internal slow-path work queues
         */
 
        /* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* Receive queue CQ Event work queue list */
        INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
 
+       /* Initialize extent block lists. */
+       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+       INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
        /* Initialize the driver internal SLI layer lists. */
        lpfc_sli_setup(phba);
        lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        }
        /*
         * Get sli4 parameters that override parameters from Port capabilities.
-        * If this call fails it is not a critical error so continue loading.
+        * If this call fails, it isn't critical unless the SLI4 parameters come
+        * back in conflict.
         */
-       lpfc_get_sli4_parameters(phba, mboxq);
+       rc = lpfc_get_sli4_parameters(phba, mboxq);
+       if (rc) {
+               if (phba->sli4_hba.extents_in_use &&
+                   phba->sli4_hba.rpi_hdrs_in_use) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2999 Unsupported SLI4 Parameters "
+                               "Extents and RPI headers enabled.\n");
+                       goto out_free_bsmbx;
+               }
+       }
        mempool_free(mboxq, phba->mbox_mem_pool);
        /* Create all the SLI4 queues */
        rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                                "1430 Failed to initialize sgl list.\n");
                goto out_free_sgl_list;
        }
-
        rc = lpfc_sli4_init_rpi_hdrs(phba);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2759 Failed allocate memory for FCF round "
                                "robin failover bmask\n");
+               rc = -ENOMEM;
                goto out_remove_rpi_hdrs;
        }
 
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2572 Failed allocate memory for fast-path "
                                "per-EQ handle array\n");
+               rc = -ENOMEM;
                goto out_free_fcf_rr_bmask;
        }
 
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2573 Failed allocate memory for msi-x "
                                "interrupt vector entries\n");
+               rc = -ENOMEM;
                goto out_free_fcp_eq_hdl;
        }
 
+       /*
+        * Enable sr-iov virtual functions if supported and configured
+        * through the module parameter.
+        */
+       if (phba->cfg_sriov_nr_virtfn > 0) {
+               rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+                                                phba->cfg_sriov_nr_virtfn);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "3020 Requested number of SR-IOV "
+                                       "virtual functions (%d) is not "
+                                       "supported\n",
+                                       phba->cfg_sriov_nr_virtfn);
+                       phba->cfg_sriov_nr_virtfn = 0;
+               }
+       }
+
        return rc;
 
 out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
        lpfc_sli4_cq_event_release_all(phba);
        lpfc_sli4_cq_event_pool_destroy(phba);
 
+       /* Release resource identifiers. */
+       lpfc_sli4_dealloc_resource_identifiers(phba);
+
        /* Free the bsmbx region. */
        lpfc_destroy_bootstrap_mbox(phba);
 
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
                                "Unloading driver.\n", __func__);
                        goto out_free_iocbq;
                }
+               iocbq_entry->sli4_lxritag = NO_XRI;
                iocbq_entry->sli4_xritag = NO_XRI;
 
                spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
 
        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                               "2400 lpfc_init_sgl_list els %d.\n",
+                               "2400 ELS XRI count %d.\n",
                                els_xri_cnt);
        /* Initialize and populate the sglq list per host/VF. */
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
        phba->sli4_hba.scsi_xri_max =
                        phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
        phba->sli4_hba.scsi_xri_cnt = 0;
-
        phba->sli4_hba.lpfc_scsi_psb_array =
                        kzalloc((sizeof(struct lpfc_scsi_buf *) *
                        phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
                        goto out_free_mem;
                }
 
-               sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
-               if (sglq_entry->sli4_xritag == NO_XRI) {
-                       kfree(sglq_entry);
-                       printk(KERN_ERR "%s: failed to allocate XRI.\n"
-                               "Unloading driver.\n", __func__);
-                       goto out_free_mem;
-               }
                sglq_entry->buff_type = GEN_BUFF_TYPE;
                sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
                if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
 {
        int rc = 0;
-       int longs;
-       uint16_t rpi_count;
        struct lpfc_rpi_hdr *rpi_hdr;
 
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-
        /*
-        * Provision an rpi bitmask range for discovery. The total count
-        * is the difference between max and base + 1.
+        * If the SLI4 port supports extents, posting the rpi header isn't
+        * required.  Set the expected maximum count and let the actual value
+        * get set when extents are fully allocated.
         */
-       rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
-                   phba->sli4_hba.max_cfg_param.max_rpi - 1;
-
-       longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
-       phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
-                                          GFP_KERNEL);
-       if (!phba->sli4_hba.rpi_bmask)
-               return -ENOMEM;
+       if (!phba->sli4_hba.rpi_hdrs_in_use) {
+               phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+               return rc;
+       }
+       if (phba->sli4_hba.extents_in_use)
+               return -EIO;
 
        rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
        if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
        struct lpfc_rpi_hdr *rpi_hdr;
        uint32_t rpi_count;
 
+       /*
+        * If the SLI4 port supports extents, posting the rpi header isn't
+        * required.  Set the expected maximum count and let the actual value
+        * get set when extents are fully allocated.
+        */
+       if (!phba->sli4_hba.rpi_hdrs_in_use)
+               return NULL;
+       if (phba->sli4_hba.extents_in_use)
+               return NULL;
+
+       /* The limit on the logical index is just the max_rpi count. */
        rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
-                   phba->sli4_hba.max_cfg_param.max_rpi - 1;
+       phba->sli4_hba.max_cfg_param.max_rpi - 1;
 
        spin_lock_irq(&phba->hbalock);
-       curr_rpi_range = phba->sli4_hba.next_rpi;
+       /*
+        * Establish the starting RPI in this header block.  The starting
+        * rpi is normalized to a zero base because the physical rpi is
+        * port based.
+        */
+       curr_rpi_range = phba->sli4_hba.next_rpi -
+               phba->sli4_hba.max_cfg_param.rpi_base;
        spin_unlock_irq(&phba->hbalock);
 
        /*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
        else
                rpi_count = LPFC_RPI_HDR_COUNT;
 
+       if (!rpi_count)
+               return NULL;
        /*
         * First allocate the protocol header region for the port.  The
         * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
        rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
        rpi_hdr->page_count = 1;
        spin_lock_irq(&phba->hbalock);
-       rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+
+       /* The rpi_hdr stores the logical index only. */
+       rpi_hdr->start_rpi = curr_rpi_range;
        list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
 
        /*
-        * The next_rpi stores the next module-64 rpi value to post
-        * in any subsequent rpi memory region postings.
+        * The next_rpi stores the next logical module-64 rpi value used
+        * to post physical rpis in subsequent rpi postings.
         */
        phba->sli4_hba.next_rpi += rpi_count;
        spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to remove all memory resources allocated
- * to support rpis. This routine presumes the caller has released all
- * rpis consumed by fabric or port logins and is prepared to have
- * the header pages removed.
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
  **/
 void
 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
 {
        struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
 
+       if (!phba->sli4_hba.rpi_hdrs_in_use)
+               goto exit;
+
        list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
                                 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
                list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
                kfree(rpi_hdr->dmabuf);
                kfree(rpi_hdr);
        }
-
-       phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
-       memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+ exit:
+       /* There are no rpis available to the port now. */
+       phba->sli4_hba.next_rpi = 0;
 }
 
 /**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
                        /* Final checks.  The port status should be clean. */
                        if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
                                &reg_data.word0) ||
-                               bf_get(lpfc_sliport_status_err, &reg_data)) {
+                               (bf_get(lpfc_sliport_status_err, &reg_data) &&
+                                !bf_get(lpfc_sliport_status_rn, &reg_data))) {
                                phba->work_status[0] =
                                        readl(phba->sli4_hba.u.if_type2.
                                              ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 {
        LPFC_MBOXQ_t *pmb;
        struct lpfc_mbx_read_config *rd_config;
-       uint32_t rc = 0;
+       union  lpfc_sli4_cfg_shdr *shdr;
+       uint32_t shdr_status, shdr_add_status;
+       struct lpfc_mbx_get_func_cfg *get_func_cfg;
+       struct lpfc_rsrc_desc_fcfcoe *desc;
+       uint32_t desc_count;
+       int length, i, rc = 0;
 
        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                rc = -EIO;
        } else {
                rd_config = &pmb->u.mqe.un.rd_config;
+               phba->sli4_hba.extents_in_use =
+                       bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
                phba->sli4_hba.max_cfg_param.max_xri =
                        bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
                phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                        bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
                phba->sli4_hba.max_cfg_param.max_fcfi =
                        bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
-               phba->sli4_hba.max_cfg_param.fcfi_base =
-                       bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
                phba->sli4_hba.max_cfg_param.max_eq =
                        bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
                phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
                phba->max_vports = phba->max_vpi;
                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                               "2003 cfg params XRI(B:%d M:%d), "
+                               "2003 cfg params Extents? %d "
+                               "XRI(B:%d M:%d), "
                                "VPI(B:%d M:%d) "
                                "VFI(B:%d M:%d) "
                                "RPI(B:%d M:%d) "
-                               "FCFI(B:%d M:%d)\n",
+                               "FCFI(Count:%d)\n",
+                               phba->sli4_hba.extents_in_use,
                                phba->sli4_hba.max_cfg_param.xri_base,
                                phba->sli4_hba.max_cfg_param.max_xri,
                                phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                                phba->sli4_hba.max_cfg_param.max_vfi,
                                phba->sli4_hba.max_cfg_param.rpi_base,
                                phba->sli4_hba.max_cfg_param.max_rpi,
-                               phba->sli4_hba.max_cfg_param.fcfi_base,
                                phba->sli4_hba.max_cfg_param.max_fcfi);
        }
-       mempool_free(pmb, phba->mbox_mem_pool);
+
+       if (rc)
+               goto read_cfg_out;
 
        /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
        if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                phba->cfg_hba_queue_depth =
                        phba->sli4_hba.max_cfg_param.max_xri -
                                lpfc_sli4_get_els_iocb_cnt(phba);
+
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+           LPFC_SLI_INTF_IF_TYPE_2)
+               goto read_cfg_out;
+
+       /* get the pf# and vf# for SLI4 if_type 2 port */
+       length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+                        length, LPFC_SLI4_MBX_EMBED);
+
+       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+       shdr = (union lpfc_sli4_cfg_shdr *)
+                               &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc || shdr_status || shdr_add_status) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "3026 Mailbox failed , mbxCmd x%x "
+                               "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+                               bf_get(lpfc_mqe_command, &pmb->u.mqe),
+                               bf_get(lpfc_mqe_status, &pmb->u.mqe));
+               rc = -EIO;
+               goto read_cfg_out;
+       }
+
+       /* search for fc_fcoe resrouce descriptor */
+       get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+       desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+
+       for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+               desc = (struct lpfc_rsrc_desc_fcfcoe *)
+                       &get_func_cfg->func_cfg.desc[i];
+               if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+                   bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+                       phba->sli4_hba.iov.pf_number =
+                               bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+                       phba->sli4_hba.iov.vf_number =
+                               bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+                       break;
+               }
+       }
+
+       if (i < LPFC_RSRC_DESC_MAX_NUM)
+               lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                               "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+                               "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+                               phba->sli4_hba.iov.vf_number);
+       else {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "3028 GET_FUNCTION_CONFIG: failed to find "
+                               "Resrouce Descriptor:x%x\n",
+                               LPFC_RSRC_DESC_TYPE_FCFCOE);
+               rc = -EIO;
+       }
+
+read_cfg_out:
+       mempool_free(pmb, phba->mbox_mem_pool);
        return rc;
 }
 
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        phba->sli4_hba.mbx_cq = NULL;
 
        /* Release FCP response complete queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+       fcp_qidx = 0;
+       do
                lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+       while (++fcp_qidx < phba->cfg_fcp_eq_count);
        kfree(phba->sli4_hba.fcp_cq);
        phba->sli4_hba.fcp_cq = NULL;
 
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.sp_eq->queue_id);
 
        /* Set up fast-path FCP Response Complete Queue */
-       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+       fcp_cqidx = 0;
+       do {
                if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0526 Fast-path FCP CQ (%d) not "
                                        "allocated\n", fcp_cqidx);
                        goto out_destroy_fcp_cq;
                }
-               rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
-                                   phba->sli4_hba.fp_eq[fcp_cqidx],
-                                   LPFC_WCQ, LPFC_FCP);
+               if (phba->cfg_fcp_eq_count)
+                       rc = lpfc_cq_create(phba,
+                                           phba->sli4_hba.fcp_cq[fcp_cqidx],
+                                           phba->sli4_hba.fp_eq[fcp_cqidx],
+                                           LPFC_WCQ, LPFC_FCP);
+               else
+                       rc = lpfc_cq_create(phba,
+                                           phba->sli4_hba.fcp_cq[fcp_cqidx],
+                                           phba->sli4_hba.sp_eq,
+                                           LPFC_WCQ, LPFC_FCP);
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                }
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "2588 FCP CQ setup: cq[%d]-id=%d, "
-                               "parent eq[%d]-id=%d\n",
+                               "parent %seq[%d]-id=%d\n",
                                fcp_cqidx,
                                phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+                               (phba->cfg_fcp_eq_count) ? "" : "sp_",
                                fcp_cqidx,
-                               phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
-       }
+                               (phba->cfg_fcp_eq_count) ?
+                                  phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
+                                  phba->sli4_hba.sp_eq->queue_id);
+       } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
 
        /*
         * Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                                fcp_cq_index,
                                phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
                /* Round robin FCP Work Queue's Completion Queue assignment */
-               fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+               if (phba->cfg_fcp_eq_count)
+                       fcp_cq_index = ((fcp_cq_index + 1) %
+                                       phba->cfg_fcp_eq_count);
        }
 
        /*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
                        if (rdy_chk < 1000)
                                break;
                }
+               /* delay driver action following IF_TYPE_2 function reset */
+               msleep(100);
                break;
        case LPFC_SLI_INTF_IF_TYPE_1:
        default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
        /*
         * Assign MSI-X vectors to interrupt handlers
         */
-
-       /* The first vector must associated to slow-path handler for MQ */
-       rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
-                        &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
-                        LPFC_SP_DRIVER_HANDLER_NAME, phba);
+       if (vectors > 1)
+               rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+                                &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+                                LPFC_SP_DRIVER_HANDLER_NAME, phba);
+       else
+               /* All Interrupts need to be handled by one EQ */
+               rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+                                &lpfc_sli4_intr_handler, IRQF_SHARED,
+                                LPFC_DRIVER_NAME, phba);
        if (rc) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
                                "0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
 {
        int wait_cnt = 0;
        LPFC_MBOXQ_t *mboxq;
+       struct pci_dev *pdev = phba->pcidev;
 
        lpfc_stop_hba_timers(phba);
        phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        /* Disable PCI subsystem interrupt */
        lpfc_sli4_disable_intr(phba);
 
+       /* Disable SR-IOV if enabled */
+       if (phba->cfg_sriov_nr_virtfn)
+               pci_disable_sriov(pdev);
+
        /* Stop kthread signal shall trigger work_done one more time */
        kthread_stop(phba->worker_thread);
 
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
        sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
        sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+       /* Make sure that sge_supp_len can be handled by the driver */
+       if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+               sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
        return rc;
 }
 
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        int length;
        struct lpfc_sli4_parameters *mbx_sli4_parameters;
 
+       /*
+        * By default, the driver assumes the SLI4 port requires RPI
+        * header postings.  The SLI4_PARAM response will correct this
+        * assumption.
+        */
+       phba->sli4_hba.rpi_hdrs_in_use = 1;
+
        /* Read the port's SLI4 Config Parameters */
        length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
                  sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                                            mbx_sli4_parameters);
        sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
                                           mbx_sli4_parameters);
+       phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+       phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+       /* Make sure that sge_supp_len can be handled by the driver */
+       if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+               sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
        return 0;
 }
 
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
 
        lpfc_debugfs_terminate(vport);
 
+       /* Disable SR-IOV if enabled */
+       if (phba->cfg_sriov_nr_virtfn)
+               pci_disable_sriov(pdev);
+
        /* Disable interrupt */
        lpfc_sli_disable_intr(phba);
 
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @fw: pointer to firmware image returned from request_firmware.
+ *
+ * returns the number of bytes written if write is successful.
+ * returns a negative error value if there were errors.
+ * returns 0 if firmware matches currently active firmware on port.
+ **/
+int
+lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+{
+       char fwrev[32];
+       struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+       struct list_head dma_buffer_list;
+       int i, rc = 0;
+       struct lpfc_dmabuf *dmabuf, *next;
+       uint32_t offset = 0, temp_offset = 0;
+
+       INIT_LIST_HEAD(&dma_buffer_list);
+       if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+           (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
+           (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+           (image->size != fw->size)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3022 Invalid FW image found. "
+                               "Magic:%d Type:%x ID:%x\n",
+                               image->magic_number,
+                               bf_get(lpfc_grp_hdr_file_type, image),
+                               bf_get(lpfc_grp_hdr_id, image));
+               return -EINVAL;
+       }
+       lpfc_decode_firmware_rev(phba, fwrev, 1);
+       if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3023 Updating Firmware. Current Version:%s "
+                               "New Version:%s\n",
+                               fwrev, image->rev_name);
+               for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+                       dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+                                        GFP_KERNEL);
+                       if (!dmabuf) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
+                       dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+                                                         SLI4_PAGE_SIZE,
+                                                         &dmabuf->phys,
+                                                         GFP_KERNEL);
+                       if (!dmabuf->virt) {
+                               kfree(dmabuf);
+                               rc = -ENOMEM;
+                               goto out;
+                       }
+                       list_add_tail(&dmabuf->list, &dma_buffer_list);
+               }
+               while (offset < fw->size) {
+                       temp_offset = offset;
+                       list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+                               if (offset + SLI4_PAGE_SIZE > fw->size) {
+                                       temp_offset += fw->size - offset;
+                                       memcpy(dmabuf->virt,
+                                              fw->data + temp_offset,
+                                              fw->size - offset);
+                                       break;
+                               }
+                               temp_offset += SLI4_PAGE_SIZE;
+                               memcpy(dmabuf->virt, fw->data + temp_offset,
+                                      SLI4_PAGE_SIZE);
+                       }
+                       rc = lpfc_wr_object(phba, &dma_buffer_list,
+                                   (fw->size - offset), &offset);
+                       if (rc) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "3024 Firmware update failed. "
+                                               "%d\n", rc);
+                               goto out;
+                       }
+               }
+               rc = offset;
+       }
+out:
+       list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+               list_del(&dmabuf->list);
+               dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+                                 dmabuf->virt, dmabuf->phys);
+               kfree(dmabuf);
+       }
+       return rc;
+}
+
+/**
  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
  * @pdev: pointer to PCI device
  * @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        int error;
        uint32_t cfg_mode, intr_mode;
        int mcnt;
+       int adjusted_fcp_eq_count;
+       int fcp_qidx;
+       const struct firmware *fw;
+       uint8_t file_name[16];
 
        /* Allocate memory for HBA structure */
        phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                        error = -ENODEV;
                        goto out_free_sysfs_attr;
                }
-               /* Default to single FCP EQ for non-MSI-X */
+               /* Default to single EQ for non-MSI-X */
                if (phba->intr_type != MSIX)
-                       phba->cfg_fcp_eq_count = 1;
-               else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
-                       phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+                       adjusted_fcp_eq_count = 0;
+               else if (phba->sli4_hba.msix_vec_nr <
+                                       phba->cfg_fcp_eq_count + 1)
+                       adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+               else
+                       adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
+               /* Free unused EQs */
+               for (fcp_qidx = adjusted_fcp_eq_count;
+                    fcp_qidx < phba->cfg_fcp_eq_count;
+                    fcp_qidx++) {
+                       lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+                       /* do not delete the first fcp_cq */
+                       if (fcp_qidx)
+                               lpfc_sli4_queue_free(
+                                       phba->sli4_hba.fcp_cq[fcp_qidx]);
+               }
+               phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
                /* Set up SLI-4 HBA */
                if (lpfc_sli4_hba_setup(phba)) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Perform post initialization setup */
        lpfc_post_init_setup(phba);
 
+       /* check for firmware upgrade or downgrade */
+       snprintf(file_name, 16, "%s.grp", phba->ModelName);
+       error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+       if (!error) {
+               lpfc_write_firmware(phba, fw);
+               release_firmware(fw);
+       }
+
        /* Check if there are static vports to be created. */
        lpfc_create_static_vport(phba);
 
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
                PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+               PCI_ANY_ID, PCI_ANY_ID, },
        { 0 }
 };
 
index e6ce903..5567670 100644 (file)
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
        mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
        mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
        mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
-       mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
+       if (phba->sli_rev >= LPFC_SLI_REV3)
+               mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
 
        /* save address for completion */
        pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->un.varUnregDID.did = did;
-       if (vpi != 0xffff)
-               vpi += phba->vpi_base;
        mb->un.varUnregDID.vpi = vpi;
+       if ((vpi != 0xffff) &&
+           (phba->sli_rev == LPFC_SLI_REV4))
+               mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
 
        mb->mbxCommand = MBX_UNREG_D_ID;
        mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->un.varRegLogin.rpi = 0;
-       if (phba->sli_rev == LPFC_SLI_REV4) {
-               mb->un.varRegLogin.rpi = rpi;
-               if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
-                       return 1;
-       }
-       mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+       if (phba->sli_rev >= LPFC_SLI_REV3)
+               mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
        mb->un.varRegLogin.did = did;
        mb->mbxOwner = OWN_HOST;
        /* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
                                "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
                                "rpi x%x\n", vpi, did, rpi);
-               return (1);
+               return 1;
        }
        INIT_LIST_HEAD(&mp->list);
        sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
        mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
        mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
 
-       return (0);
+       return 0;
 }
 
 /**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
  *
  * This routine prepares the mailbox command for unregistering remote port
  * login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
  **/
 void
 lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
        mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
-       mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+       mb->un.varUnregLogin.rpi = rpi;
        mb->un.varUnregLogin.rsvd1 = 0;
-       mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
+       if (phba->sli_rev >= LPFC_SLI_REV3)
+               mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
 
        mb->mbxCommand = MBX_UNREG_LOGIN;
        mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mbox) {
-               lpfc_unreg_login(phba, vport->vpi,
-                       vport->vpi + phba->vpi_base, mbox);
-               mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+               /*
+                * For SLI4 functions, the rpi field is overloaded for
+                * the vport context unreg all.  This routine passes
+                * 0 for the rpi field in lpfc_unreg_login for compatibility
+                * with SLI3 and then overrides the rpi field with the
+                * expected value for SLI4.
+                */
+               lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+                                mbox);
+               mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
                mbox->vport = vport;
                mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
        if ((phba->sli_rev == LPFC_SLI_REV4) &&