Merge branch 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
Linus Torvalds [Wed, 6 Jul 2011 03:57:45 +0000 (20:57 -0700)]
* 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6:
  USB: fix regression occurring during device removal
  USB: fsl_udc_core: fix build breakage when building for ARM arch

115 files changed:
CREDITS
Documentation/hwmon/f71882fg
Documentation/hwmon/k10temp
Documentation/power/runtime_pm.txt
Makefile
README
arch/arm/mach-at91/at91cap9.c
arch/arm/mach-at91/at91cap9_devices.c
arch/arm/mach-at91/at91rm9200.c
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/at91sam9rl.c
arch/arm/mach-at91/at91sam9rl_devices.c
arch/arm/mach-at91/board-cap9adk.c
arch/arm/mach-at91/board-sam9260ek.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/board-sam9263ek.c
arch/arm/mach-at91/board-sam9g20ek.c
arch/arm/mach-at91/board-sam9m10g45ek.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/x86/pci/xen.c
arch/x86/xen/mmu.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/nid.h
drivers/hwmon/Kconfig
drivers/hwmon/adm1275.c
drivers/hwmon/emc6w201.c
drivers/hwmon/f71882fg.c
drivers/hwmon/hwmon-vid.c
drivers/hwmon/pmbus.c
drivers/hwmon/pmbus_core.c
drivers/hwmon/sch5627.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/uverbs_main.c
drivers/input/keyboard/pmic8xxx-keypad.c
drivers/input/misc/pmic8xxx-pwrkey.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/omap-usb-host.c
drivers/mfd/tps65911-comparator.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/isci/Makefile [new file with mode: 0644]
drivers/scsi/isci/firmware/Makefile [new file with mode: 0644]
drivers/scsi/isci/firmware/README [new file with mode: 0644]
drivers/scsi/isci/firmware/create_fw.c [new file with mode: 0644]
drivers/scsi/isci/firmware/create_fw.h [new file with mode: 0644]
drivers/scsi/isci/host.c [new file with mode: 0644]
drivers/scsi/isci/host.h [new file with mode: 0644]
drivers/scsi/isci/init.c [new file with mode: 0644]
drivers/scsi/isci/isci.h [new file with mode: 0644]
drivers/scsi/isci/phy.c [new file with mode: 0644]
drivers/scsi/isci/phy.h [new file with mode: 0644]
drivers/scsi/isci/port.c [new file with mode: 0644]
drivers/scsi/isci/port.h [new file with mode: 0644]
drivers/scsi/isci/port_config.c [new file with mode: 0644]
drivers/scsi/isci/probe_roms.c [new file with mode: 0644]
drivers/scsi/isci/probe_roms.h [new file with mode: 0644]
drivers/scsi/isci/registers.h [new file with mode: 0644]
drivers/scsi/isci/remote_device.c [new file with mode: 0644]
drivers/scsi/isci/remote_device.h [new file with mode: 0644]
drivers/scsi/isci/remote_node_context.c [new file with mode: 0644]
drivers/scsi/isci/remote_node_context.h [new file with mode: 0644]
drivers/scsi/isci/remote_node_table.c [new file with mode: 0644]
drivers/scsi/isci/remote_node_table.h [new file with mode: 0644]
drivers/scsi/isci/request.c [new file with mode: 0644]
drivers/scsi/isci/request.h [new file with mode: 0644]
drivers/scsi/isci/sas.h [new file with mode: 0644]
drivers/scsi/isci/scu_completion_codes.h [new file with mode: 0644]
drivers/scsi/isci/scu_event_codes.h [new file with mode: 0644]
drivers/scsi/isci/scu_remote_node_context.h [new file with mode: 0644]
drivers/scsi/isci/scu_task_context.h [new file with mode: 0644]
drivers/scsi/isci/task.c [new file with mode: 0644]
drivers/scsi/isci/task.h [new file with mode: 0644]
drivers/scsi/isci/unsolicited_frame_control.c [new file with mode: 0644]
drivers/scsi/isci/unsolicited_frame_control.h [new file with mode: 0644]
drivers/tty/serial/atmel_serial.c
drivers/video/amba-clcd.c
drivers/video/fsl-diu-fb.c
drivers/video/geode/gx1fb_core.c
drivers/video/hecubafb.c
drivers/video/sh_mobile_meram.c
drivers/video/sm501fb.c
drivers/video/udlfb.c
drivers/video/vesafb.c
firmware/Makefile
firmware/isci/isci_firmware.bin.ihex [new file with mode: 0644]
fs/ceph/file.c
fs/cifs/connect.c
fs/hfsplus/super.c
fs/hfsplus/wrapper.c
include/linux/fsl-diu-fb.h
include/sound/sb16_csp.h
net/ceph/osd_client.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/pci/cs5535audio/cs5535audio_pcm.c
sound/pci/hda/hda_eld.c
sound/pci/hda/patch_conexant.c
sound/pci/rme9652/hdspm.c
sound/spi/at73c213.c

diff --git a/CREDITS b/CREDITS
index d78359f..1deb331 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -518,7 +518,7 @@ N: Zach Brown
 E: zab@zabbo.net
 D: maestro pci sound
 
-M: David Brownell
+N: David Brownell
 D: Kernel engineer, mentor, and friend.  Maintained USB EHCI and
 D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
 D: device drivers.  His encouragement also helped many engineers get
index 84d2623..de91c0d 100644 (file)
@@ -22,6 +22,10 @@ Supported chips:
     Prefix: 'f71869'
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: Available from the Fintek website
+  * Fintek F71869A
+    Prefix: 'f71869a'
+    Addresses scanned: none, address read from Super I/O config space
+    Datasheet: Not public
   * Fintek F71882FG and F71883FG
     Prefix: 'f71882fg'
     Addresses scanned: none, address read from Super I/O config space
index 0393c89..a10f736 100644 (file)
@@ -9,8 +9,8 @@ Supported chips:
   Socket S1G3: Athlon II, Sempron, Turion II
 * AMD Family 11h processors:
   Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
-* AMD Family 12h processors: "Llano"
-* AMD Family 14h processors: "Brazos" (C/E/G-Series)
+* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
+* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
 * AMD Family 15h processors: "Bulldozer"
 
   Prefix: 'k10temp'
@@ -20,12 +20,16 @@ Supported chips:
     http://support.amd.com/us/Processor_TechDocs/31116.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41256.pdf
+  BIOS and Kernel Developer's Guide (BKDG) for AMD Family 12h Processors:
+    http://support.amd.com/us/Processor_TechDocs/41131.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
     http://support.amd.com/us/Processor_TechDocs/43170.pdf
   Revision Guide for AMD Family 10h Processors:
     http://support.amd.com/us/Processor_TechDocs/41322.pdf
   Revision Guide for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41788.pdf
+  Revision Guide for AMD Family 12h Processors:
+    http://support.amd.com/us/Processor_TechDocs/44739.pdf
   Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
     http://support.amd.com/us/Processor_TechDocs/47534.pdf
   AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
index 22accb3..b24875b 100644 (file)
@@ -501,13 +501,29 @@ helper functions described in Section 4.  In that case, pm_runtime_resume()
 should be used.  Of course, for this purpose the device's run-time PM has to be
 enabled earlier by calling pm_runtime_enable().
 
-If the device bus type's or driver's ->probe() or ->remove() callback runs
+If the device bus type's or driver's ->probe() callback runs
 pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts,
 they will fail returning -EAGAIN, because the device's usage counter is
-incremented by the core before executing ->probe() and ->remove().  Still, it
-may be desirable to suspend the device as soon as ->probe() or ->remove() has
-finished, so the PM core uses pm_runtime_idle_sync() to invoke the
-subsystem-level idle callback for the device at that time.
+incremented by the driver core before executing ->probe().  Still, it may be
+desirable to suspend the device as soon as ->probe() has finished, so the driver
+core uses pm_runtime_put_sync() to invoke the subsystem-level idle callback for
+the device at that time.
+
+Moreover, the driver core prevents runtime PM callbacks from racing with the bus
+notifier callback in __device_release_driver(), which is necessary, because the
+notifier is used by some subsystems to carry out operations affecting the
+runtime PM functionality.  It does so by calling pm_runtime_get_sync() before
+driver_sysfs_remove() and the BUS_NOTIFY_UNBIND_DRIVER notifications.  This
+resumes the device if it's in the suspended state and prevents it from
+being suspended again while those routines are being executed.
+
+To allow bus types and drivers to put devices into the suspended state by
+calling pm_runtime_suspend() from their ->remove() routines, the driver core
+executes pm_runtime_put_sync() after running the BUS_NOTIFY_UNBIND_DRIVER
+notifications in __device_release_driver().  This requires bus types and
+drivers to make their ->remove() callbacks avoid races with runtime PM directly,
+but also it allows of more flexibility in the handling of devices during the
+removal of their drivers.
 
 The user space can effectively disallow the driver of the device to power manage
 it at run time by changing the value of its /sys/devices/.../power/control
index dc67046..86f47a0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Sneaky Weasel
 
 # *DOCUMENTATION*
diff --git a/README b/README
index 8510017..0d5a7dd 100644 (file)
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
-       Linux kernel release 2.6.xx <http://kernel.org/>
+       Linux kernel release 3.x <http://kernel.org/>
 
-These are the release notes for Linux version 2.6.  Read them carefully,
+These are the release notes for Linux version 3.  Read them carefully,
 as they tell you what this is all about, explain how to install the
 kernel, and what to do if something goes wrong. 
 
@@ -62,10 +62,10 @@ INSTALLING the kernel source:
    directory where you have permissions (eg. your home directory) and
    unpack it:
 
-               gzip -cd linux-2.6.XX.tar.gz | tar xvf -
+               gzip -cd linux-3.X.tar.gz | tar xvf -
 
    or
-               bzip2 -dc linux-2.6.XX.tar.bz2 | tar xvf -
+               bzip2 -dc linux-3.X.tar.bz2 | tar xvf -
 
 
    Replace "XX" with the version number of the latest kernel.
@@ -75,15 +75,15 @@ INSTALLING the kernel source:
    files.  They should match the library, and not get messed up by
    whatever the kernel-du-jour happens to be.
 
- - You can also upgrade between 2.6.xx releases by patching.  Patches are
+ - You can also upgrade between 3.x releases by patching.  Patches are
    distributed in the traditional gzip and the newer bzip2 format.  To
    install by patching, get all the newer patch files, enter the
-   top level directory of the kernel source (linux-2.6.xx) and execute:
+   top level directory of the kernel source (linux-3.x) and execute:
 
-               gzip -cd ../patch-2.6.xx.gz | patch -p1
+               gzip -cd ../patch-3.x.gz | patch -p1
 
    or
-               bzip2 -dc ../patch-2.6.xx.bz2 | patch -p1
+               bzip2 -dc ../patch-3.x.bz2 | patch -p1
 
    (repeat xx for all versions bigger than the version of your current
    source tree, _in_order_) and you should be ok.  You may want to remove
@@ -91,9 +91,9 @@ INSTALLING the kernel source:
    failed patches (xxx# or xxx.rej). If there are, either you or me has
    made a mistake.
 
-   Unlike patches for the 2.6.x kernels, patches for the 2.6.x.y kernels
+   Unlike patches for the 3.x kernels, patches for the 3.x.y kernels
    (also known as the -stable kernels) are not incremental but instead apply
-   directly to the base 2.6.x kernel.  Please read
+   directly to the base 3.x kernel.  Please read
    Documentation/applying-patches.txt for more information.
 
    Alternatively, the script patch-kernel can be used to automate this
@@ -107,14 +107,14 @@ INSTALLING the kernel source:
    an alternative directory can be specified as the second argument.
 
  - If you are upgrading between releases using the stable series patches
-   (for example, patch-2.6.xx.y), note that these "dot-releases" are
-   not incremental and must be applied to the 2.6.xx base tree. For
-   example, if your base kernel is 2.6.12 and you want to apply the
-   2.6.12.3 patch, you do not and indeed must not first apply the
-   2.6.12.1 and 2.6.12.2 patches. Similarly, if you are running kernel
-   version 2.6.12.2 and want to jump to 2.6.12.3, you must first
-   reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying
-   the 2.6.12.3 patch.
+   (for example, patch-3.x.y), note that these "dot-releases" are
+   not incremental and must be applied to the 3.x base tree. For
+   example, if your base kernel is 3.0 and you want to apply the
+   3.0.3 patch, you do not and indeed must not first apply the
+   3.0.1 and 3.0.2 patches. Similarly, if you are running kernel
+   version 3.0.2 and want to jump to 3.0.3, you must first
+   reverse the 3.0.2 patch (that is, patch -R) _before_ applying
+   the 3.0.3 patch.
    You can read more on this in Documentation/applying-patches.txt
 
  - Make sure you have no stale .o files and dependencies lying around:
@@ -126,7 +126,7 @@ INSTALLING the kernel source:
 
 SOFTWARE REQUIREMENTS
 
-   Compiling and running the 2.6.xx kernels requires up-to-date
+   Compiling and running the 3.x kernels requires up-to-date
    versions of various software packages.  Consult
    Documentation/Changes for the minimum version numbers required
    and how to get updates for these packages.  Beware that using
@@ -142,11 +142,11 @@ BUILD directory for the kernel:
    Using the option "make O=output/dir" allow you to specify an alternate
    place for the output files (including .config).
    Example:
-     kernel source code:       /usr/src/linux-2.6.N
+     kernel source code:       /usr/src/linux-3.N
      build directory:          /home/name/build/kernel
 
    To configure and build the kernel use:
-   cd /usr/src/linux-2.6.N
+   cd /usr/src/linux-3.N
    make O=/home/name/build/kernel menuconfig
    make O=/home/name/build/kernel
    sudo make O=/home/name/build/kernel modules_install install
index 17fae4a..f1013d0 100644 (file)
@@ -223,15 +223,15 @@ static struct clk *periph_clocks[] __initdata = {
 };
 
 static struct clk_lookup periph_clocks_lookups[] = {
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
        CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
        CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
 };
 
 static struct clk_lookup usart_clocks_lookups[] = {
index cd850ed..dba0d8d 100644 (file)
@@ -1220,7 +1220,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91cap9_set_console_clock(portnr);
+               at91cap9_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index b228ce9..83a1a3f 100644 (file)
@@ -199,9 +199,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
 };
 
 static struct clk_lookup usart_clocks_lookups[] = {
index a0ba475..7227755 100644 (file)
@@ -1135,7 +1135,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91rm9200_set_console_clock(portnr);
+               at91rm9200_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1fdeb90..39f81f4 100644 (file)
@@ -1173,7 +1173,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9260_set_console_clock(portnr);
+               at91sam9260_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 3eb4538..5004bf0 100644 (file)
@@ -1013,7 +1013,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9261_set_console_clock(portnr);
+               at91sam9261_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index ffe081b..a050f41 100644 (file)
@@ -1395,7 +1395,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9263_set_console_clock(portnr);
+               at91sam9263_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 2bb6ff9..11e2141 100644 (file)
@@ -217,11 +217,11 @@ static struct clk *periph_clocks[] __initdata = {
 static struct clk_lookup periph_clocks_lookups[] = {
        /* One additional fake clock for ohci */
        CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
-       CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk),
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
-       CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
-       CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+       CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
+       CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk),
+       CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk),
index 0567486..600bffb 100644 (file)
@@ -1550,7 +1550,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9g45_set_console_clock(portnr);
+               at91sam9g45_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1a40f16..29dff18 100644 (file)
@@ -191,8 +191,8 @@ static struct clk *periph_clocks[] __initdata = {
 };
 
 static struct clk_lookup periph_clocks_lookups[] = {
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
index c296045..aacb19d 100644 (file)
@@ -1168,7 +1168,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9rl_set_console_clock(portnr);
+               at91sam9rl_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1904fdf..cdb65d4 100644 (file)
@@ -215,7 +215,7 @@ static void __init cap9adk_add_device_nand(void)
        csa = at91_sys_read(AT91_MATRIX_EBICSA);
        at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
 
-       cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit();
+       cap9adk_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (cap9adk_nand_data.bus_width_16)
                cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16;
index d600dc1..5c24074 100644 (file)
@@ -214,7 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index f897f84..b60c22b 100644 (file)
@@ -220,7 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 605b26f..9bbdc92 100644 (file)
@@ -221,7 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 7624cf0..1325a50 100644 (file)
@@ -198,7 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 063c95d..33eaa13 100644 (file)
@@ -178,7 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index b855ee7..8f48660 100644 (file)
  * the 16-31 bit are reserved for at91 generic information
  *
  * bit 31:
- *     0 => nand 16 bit
- *     1 => nand 8 bit
+ *     0 => nand 8 bit
+ *     1 => nand 16 bit
  */
-#define BOARD_HAVE_NAND_8BIT   (1 << 31)
-static int inline board_have_nand_8bit(void)
+#define BOARD_HAVE_NAND_16BIT  (1 << 31)
+static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_8BIT;
+       return system_rev & BOARD_HAVE_NAND_16BIT;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index 8214724..fe00830 100644 (file)
@@ -333,6 +333,7 @@ static int xen_register_pirq(u32 gsi, int triggering)
        struct physdev_map_pirq map_irq;
        int shareable = 0;
        char *name;
+       bool gsi_override = false;
 
        if (!xen_pv_domain())
                return -1;
@@ -349,11 +350,32 @@ static int xen_register_pirq(u32 gsi, int triggering)
        if (pirq < 0)
                goto out;
 
-       irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
+       /* Before we bind the GSI to a Linux IRQ, check whether
+        * we need to override it with bus_irq (IRQ) value. Usually for
+        * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
+        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
+        * but there are oddballs where the IRQ != GSI:
+        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
+        * which ends up being: gsi_to_irq[9] == 20
+        * (which is what acpi_gsi_to_irq ends up calling when starting the
+        * the ACPI interpreter and keels over since IRQ 9 has not been
+        * setup as we had setup IRQ 20 for it).
+        */
+       if (gsi == acpi_sci_override_gsi) {
+               /* Check whether the GSI != IRQ */
+               acpi_gsi_to_irq(gsi, &irq);
+               if (irq != gsi)
+                       /* Bugger, we MUST have that IRQ. */
+                       gsi_override = true;
+       }
+       if (gsi_override)
+               irq = xen_bind_pirq_gsi_to_irq(irq, pirq, shareable, name);
+       else
+               irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
        if (irq < 0)
                goto out;
 
-       printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq);
+       printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
 
        map_irq.domid = DOMID_SELF;
        map_irq.type = MAP_PIRQ_TYPE_GSI;
index 673e968..0ccccb6 100644 (file)
@@ -1232,7 +1232,11 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 {
        struct {
                struct mmuext_op op;
+#ifdef CONFIG_SMP
                DECLARE_BITMAP(mask, num_processors);
+#else
+               DECLARE_BITMAP(mask, NR_CPUS);
+#endif
        } *args;
        struct multicall_space mcs;
 
index 4d46441..0a893f7 100644 (file)
@@ -1207,13 +1207,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
        if (ret)
                return ret;
 
-       seq_printf(m, "power context ");
-       describe_obj(m, dev_priv->pwrctx);
-       seq_printf(m, "\n");
+       if (dev_priv->pwrctx) {
+               seq_printf(m, "power context ");
+               describe_obj(m, dev_priv->pwrctx);
+               seq_printf(m, "\n");
+       }
 
-       seq_printf(m, "render context ");
-       describe_obj(m, dev_priv->renderctx);
-       seq_printf(m, "\n");
+       if (dev_priv->renderctx) {
+               seq_printf(m, "render context ");
+               describe_obj(m, dev_priv->renderctx);
+               seq_printf(m, "\n");
+       }
 
        mutex_unlock(&dev->mode_config.mutex);
 
index 2b79588..e178702 100644 (file)
@@ -1266,30 +1266,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_modeset_gem_init(dev);
 
-       if (IS_IVYBRIDGE(dev)) {
-               /* Share pre & uninstall handlers with ILK/SNB */
-               dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ivybridge_enable_vblank;
-               dev->driver->disable_vblank = ivybridge_disable_vblank;
-       } else if (HAS_PCH_SPLIT(dev)) {
-               dev->driver->irq_handler = ironlake_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ironlake_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ironlake_enable_vblank;
-               dev->driver->disable_vblank = ironlake_disable_vblank;
-       } else {
-               dev->driver->irq_preinstall = i915_driver_irq_preinstall;
-               dev->driver->irq_postinstall = i915_driver_irq_postinstall;
-               dev->driver->irq_uninstall = i915_driver_irq_uninstall;
-               dev->driver->irq_handler = i915_driver_irq_handler;
-               dev->driver->enable_vblank = i915_enable_vblank;
-               dev->driver->disable_vblank = i915_disable_vblank;
-       }
-
        ret = drm_irq_install(dev);
        if (ret)
                goto cleanup_gem;
@@ -2017,12 +1993,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        /* enable GEM by default */
        dev_priv->has_gem = 1;
 
-       dev->driver->get_vblank_counter = i915_get_vblank_counter;
-       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
-               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
-               dev->driver->get_vblank_counter = gm45_get_vblank_counter;
-       }
+       intel_irq_init(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
index 609358f..013d304 100644 (file)
@@ -765,14 +765,6 @@ static struct drm_driver driver = {
        .resume = i915_resume,
 
        .device_is_agp = i915_driver_device_is_agp,
-       .enable_vblank = i915_enable_vblank,
-       .disable_vblank = i915_disable_vblank,
-       .get_vblank_timestamp = i915_get_vblank_timestamp,
-       .get_scanout_position = i915_get_crtc_scanoutpos,
-       .irq_preinstall = i915_driver_irq_preinstall,
-       .irq_postinstall = i915_driver_irq_postinstall,
-       .irq_uninstall = i915_driver_irq_uninstall,
-       .irq_handler = i915_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .master_create = i915_master_create,
        .master_destroy = i915_master_destroy,
index eddabf6..f245c58 100644 (file)
@@ -997,8 +997,6 @@ extern unsigned int i915_enable_fbc;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
@@ -1033,33 +1031,12 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-
-extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
-extern void ironlake_irq_preinstall(struct drm_device *dev);
-extern int ironlake_irq_postinstall(struct drm_device *dev);
-extern void ironlake_irq_uninstall(struct drm_device *dev);
-
-extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
-extern void ivybridge_irq_preinstall(struct drm_device *dev);
-extern int ivybridge_irq_postinstall(struct drm_device *dev);
-extern void ivybridge_irq_uninstall(struct drm_device *dev);
+extern void intel_irq_init(struct drm_device *dev);
 
 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
-extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
-extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
-extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 
@@ -1070,13 +1047,6 @@ void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 void intel_enable_asle (struct drm_device *dev);
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
-                             int *max_error,
-                             struct timeval *vblank_time,
-                             unsigned flags);
-
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-                            int *vpos, int *hpos);
 
 #ifdef CONFIG_DEBUG_FS
 extern void i915_destroy_error_state(struct drm_device *dev);
index ae2b499..3b03f85 100644 (file)
@@ -152,7 +152,7 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
@@ -184,7 +184,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        return (high1 << 8) | low;
 }
 
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -198,7 +198,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
        return I915_READ(reg);
 }
 
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                             int *vpos, int *hpos)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -264,7 +264,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        return ret;
 }
 
-int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                              int *max_error,
                              struct timeval *vblank_time,
                              unsigned flags)
@@ -462,7 +462,7 @@ static void pch_irq_handler(struct drm_device *dev)
                DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 }
 
-irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -550,7 +550,7 @@ done:
        return ret;
 }
 
-irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1209,7 +1209,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        }
 }
 
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1454,7 +1454,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1478,7 +1478,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1494,7 +1494,7 @@ int ironlake_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1513,7 +1513,7 @@ int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1529,7 +1529,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1540,7 +1540,7 @@ void ironlake_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1728,7 +1728,7 @@ repeat:
 
 /* drm_dma.h hooks
 */
-void ironlake_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -1740,7 +1740,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
                INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 
        I915_WRITE(HWSTAM, 0xeffe);
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
                /* Workaround stalls observed on Sandy Bridge GPUs by
                 * making the blitter command streamer generate a
                 * write to the Hardware Status Page for
@@ -1769,7 +1769,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
        POSTING_READ(SDEIER);
 }
 
-int ironlake_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
@@ -1841,7 +1841,7 @@ int ironlake_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ivybridge_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
@@ -1891,7 +1891,7 @@ int ivybridge_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-void i915_driver_irq_preinstall(struct drm_device * dev)
+static void i915_driver_irq_preinstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -1918,7 +1918,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
  * Must be called after intel_modeset_init or hotplug interrupts won't be
  * enabled correctly.
  */
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i915_driver_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
@@ -1994,7 +1994,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-void ironlake_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -2014,7 +2014,7 @@ void ironlake_irq_uninstall(struct drm_device *dev)
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 }
 
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i915_driver_irq_uninstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -2040,3 +2040,41 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
                           I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
        I915_WRITE(IIR, I915_READ(IIR));
 }
+
+void intel_irq_init(struct drm_device *dev)
+{
+       dev->driver->get_vblank_counter = i915_get_vblank_counter;
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+               dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+       }
+
+
+       dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+       dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+       if (IS_IVYBRIDGE(dev)) {
+               /* Share pre & uninstall handlers with ILK/SNB */
+               dev->driver->irq_handler = ivybridge_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ivybridge_enable_vblank;
+               dev->driver->disable_vblank = ivybridge_disable_vblank;
+       } else if (HAS_PCH_SPLIT(dev)) {
+               dev->driver->irq_handler = ironlake_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ironlake_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ironlake_enable_vblank;
+               dev->driver->disable_vblank = ironlake_disable_vblank;
+       } else {
+               dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+               dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+               dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+               dev->driver->irq_handler = i915_driver_irq_handler;
+               dev->driver->enable_vblank = i915_enable_vblank;
+               dev->driver->disable_vblank = i915_disable_vblank;
+       }
+}
index e8152d2..5257cfc 100644 (file)
@@ -597,7 +597,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        return;
 }
 
-void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -678,7 +678,6 @@ void i915_save_display(struct drm_device *dev)
        }
 
        /* VGA state */
-       mutex_lock(&dev->struct_mutex);
        dev_priv->saveVGA0 = I915_READ(VGA0);
        dev_priv->saveVGA1 = I915_READ(VGA1);
        dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -688,10 +687,9 @@ void i915_save_display(struct drm_device *dev)
                dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
 
        i915_save_vga(dev);
-       mutex_unlock(&dev->struct_mutex);
 }
 
-void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -783,7 +781,6 @@ void i915_restore_display(struct drm_device *dev)
        else
                I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
 
-       mutex_lock(&dev->struct_mutex);
        I915_WRITE(VGA0, dev_priv->saveVGA0);
        I915_WRITE(VGA1, dev_priv->saveVGA1);
        I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -791,7 +788,6 @@ void i915_restore_display(struct drm_device *dev)
        udelay(150);
 
        i915_restore_vga(dev);
-       mutex_unlock(&dev->struct_mutex);
 }
 
 int i915_save_state(struct drm_device *dev)
@@ -801,6 +797,8 @@ int i915_save_state(struct drm_device *dev)
 
        pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Hardware status page */
        dev_priv->saveHWS = I915_READ(HWS_PGA);
 
@@ -840,6 +838,8 @@ int i915_save_state(struct drm_device *dev)
        for (i = 0; i < 3; i++)
                dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -850,6 +850,8 @@ int i915_restore_state(struct drm_device *dev)
 
        pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Hardware status page */
        I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 
@@ -867,6 +869,7 @@ int i915_restore_state(struct drm_device *dev)
                I915_WRITE(IER, dev_priv->saveIER);
                I915_WRITE(IMR, dev_priv->saveIMR);
        }
+       mutex_unlock(&dev->struct_mutex);
 
        intel_init_clock_gating(dev);
 
@@ -878,6 +881,8 @@ int i915_restore_state(struct drm_device *dev)
        if (IS_GEN6(dev))
                gen6_enable_rps(dev_priv);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Cache mode state */
        I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
 
@@ -891,6 +896,8 @@ int i915_restore_state(struct drm_device *dev)
        for (i = 0; i < 3; i++)
                I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
+       mutex_unlock(&dev->struct_mutex);
+
        intel_i2c_reset(dev);
 
        return 0;
index 56a8e2a..9e2959b 100644 (file)
@@ -1409,6 +1409,11 @@ void intel_setup_overlay(struct drm_device *dev)
        overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
        if (!overlay)
                return;
+
+       mutex_lock(&dev->struct_mutex);
+       if (WARN_ON(dev_priv->overlay))
+               goto out_free;
+
        overlay->dev = dev;
 
        reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1416,8 +1421,6 @@ void intel_setup_overlay(struct drm_device *dev)
                goto out_free;
        overlay->reg_bo = reg_bo;
 
-       mutex_lock(&dev->struct_mutex);
-
        if (OVERLAY_NEEDS_PHYSICAL(dev)) {
                ret = i915_gem_attach_phys_object(dev, reg_bo,
                                                  I915_GEM_PHYS_OVERLAY_REGS,
@@ -1442,8 +1445,6 @@ void intel_setup_overlay(struct drm_device *dev)
                 }
        }
 
-       mutex_unlock(&dev->struct_mutex);
-
        /* init all values */
        overlay->color_key = 0x0101fe;
        overlay->brightness = -19;
@@ -1452,7 +1453,7 @@ void intel_setup_overlay(struct drm_device *dev)
 
        regs = intel_overlay_map_regs(overlay);
        if (!regs)
-               goto out_free_bo;
+               goto out_unpin_bo;
 
        memset(regs, 0, sizeof(struct overlay_registers));
        update_polyphase_filter(regs);
@@ -1461,15 +1462,17 @@ void intel_setup_overlay(struct drm_device *dev)
        intel_overlay_unmap_regs(overlay, regs);
 
        dev_priv->overlay = overlay;
+       mutex_unlock(&dev->struct_mutex);
        DRM_INFO("initialized overlay support\n");
        return;
 
 out_unpin_bo:
-       i915_gem_object_unpin(reg_bo);
+       if (!OVERLAY_NEEDS_PHYSICAL(dev))
+               i915_gem_object_unpin(reg_bo);
 out_free_bo:
        drm_gem_object_unreference(&reg_bo->base);
-       mutex_unlock(&dev->struct_mutex);
 out_free:
+       mutex_unlock(&dev->struct_mutex);
        kfree(overlay);
        return;
 }
index 144f79a..731acea 100644 (file)
@@ -371,7 +371,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->vram.flags_valid        = nv50_vram_flags_valid;
                break;
        case 0xC0:
-       case 0xD0:
                engine->instmem.init            = nvc0_instmem_init;
                engine->instmem.takedown        = nvc0_instmem_takedown;
                engine->instmem.suspend         = nvc0_instmem_suspend;
@@ -923,7 +922,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
                dev_priv->card_type = NV_50;
                break;
        case 0xc0:
-       case 0xd0:
                dev_priv->card_type = NV_C0;
                break;
        default:
index 12d2fdc..e8a5ffb 100644 (file)
@@ -2248,7 +2248,10 @@ int evergreen_mc_init(struct radeon_device *rdev)
 
        /* Get VRAM informations */
        rdev->mc.vram_is_ddr = true;
-       tmp = RREG32(MC_ARB_RAMCFG);
+       if (rdev->flags & RADEON_IS_IGP)
+               tmp = RREG32(FUS_MC_ARB_RAMCFG);
+       else
+               tmp = RREG32(MC_ARB_RAMCFG);
        if (tmp & CHANSIZE_OVERRIDE) {
                chansize = 16;
        } else if (tmp & CHANSIZE_MASK) {
index 9736746..4672869 100644 (file)
 #define        CGTS_USER_TCC_DISABLE                           0x914C
 #define                TCC_DISABLE_MASK                                0xFFFF0000
 #define                TCC_DISABLE_SHIFT                               16
-#define        CGTS_SM_CTRL_REG                                0x915C
+#define        CGTS_SM_CTRL_REG                                0x9150
 #define                OVERRIDE                                (1 << 21)
 
 #define        TA_CNTL_AUX                                     0x9508
index 16db83c..5f888f7 100644 (file)
@@ -333,7 +333,7 @@ config SENSORS_F71882FG
            F71858FG
            F71862FG
            F71863FG
-           F71869F/E
+           F71869F/E/A
            F71882FG
            F71883FG
            F71889FG/ED/A
index c2ee204..b9b7caf 100644 (file)
@@ -32,6 +32,7 @@ static int adm1275_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        int config;
+       int ret;
        struct pmbus_driver_info *info;
 
        if (!i2c_check_functionality(client->adapter,
@@ -43,8 +44,10 @@ static int adm1275_probe(struct i2c_client *client,
                return -ENOMEM;
 
        config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
-       if (config < 0)
-               return config;
+       if (config < 0) {
+               ret = config;
+               goto err_mem;
+       }
 
        info->pages = 1;
        info->direct[PSC_VOLTAGE_IN] = true;
@@ -76,7 +79,14 @@ static int adm1275_probe(struct i2c_client *client,
        else
                info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
 
-       return pmbus_do_probe(client, id, info);
+       ret = pmbus_do_probe(client, id, info);
+       if (ret)
+               goto err_mem;
+       return 0;
+
+err_mem:
+       kfree(info);
+       return ret;
 }
 
 static int adm1275_remove(struct i2c_client *client)
index e0ef323..0064432 100644 (file)
@@ -78,8 +78,9 @@ static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
 
        lsb = i2c_smbus_read_byte_data(client, reg);
        msb = i2c_smbus_read_byte_data(client, reg + 1);
-       if (lsb < 0 || msb < 0) {
-               dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg);
+       if (unlikely(lsb < 0 || msb < 0)) {
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       16, "read", reg);
                return 0xFFFF;  /* Arbitrary value */
        }
 
@@ -95,10 +96,39 @@ static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
        int err;
 
        err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
-       if (!err)
+       if (likely(!err))
                err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
-       if (err < 0)
-               dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg);
+       if (unlikely(err < 0))
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       16, "write", reg);
+
+       return err;
+}
+
+/* Read 8-bit value from register */
+static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
+{
+       int val;
+
+       val = i2c_smbus_read_byte_data(client, reg);
+       if (unlikely(val < 0)) {
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       8, "read", reg);
+               return 0x00;    /* Arbitrary value */
+       }
+
+       return val;
+}
+
+/* Write 8-bit value to register */
+static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
+{
+       int err;
+
+       err = i2c_smbus_write_byte_data(client, reg, val);
+       if (unlikely(err < 0))
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       8, "write", reg);
 
        return err;
 }
@@ -114,25 +144,25 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
                for (nr = 0; nr < 6; nr++) {
                        data->in[input][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN(nr));
                        data->in[min][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN_LOW(nr));
                        data->in[max][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN_HIGH(nr));
                }
 
                for (nr = 0; nr < 6; nr++) {
                        data->temp[input][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP(nr));
                        data->temp[min][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP_LOW(nr));
                        data->temp[max][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP_HIGH(nr));
                }
 
@@ -192,7 +222,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
 
        mutex_lock(&data->update_lock);
        data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
-       err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]);
+       err = emc6w201_write8(client, reg, data->in[sf][nr]);
        mutex_unlock(&data->update_lock);
 
        return err < 0 ? err : count;
@@ -229,7 +259,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
 
        mutex_lock(&data->update_lock);
        data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
-       err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]);
+       err = emc6w201_write8(client, reg, data->temp[sf][nr]);
        mutex_unlock(&data->update_lock);
 
        return err < 0 ? err : count;
@@ -444,7 +474,7 @@ static int emc6w201_detect(struct i2c_client *client,
 
        /* Check configuration */
        config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
-       if ((config & 0xF4) != 0x04)
+       if (config < 0 || (config & 0xF4) != 0x04)
                return -ENODEV;
        if (!(config & 0x01)) {
                dev_err(&client->dev, "Monitoring not enabled\n");
index a4a94a0..2d96ed2 100644 (file)
@@ -52,6 +52,7 @@
 #define SIO_F71858_ID          0x0507  /* Chipset ID */
 #define SIO_F71862_ID          0x0601  /* Chipset ID */
 #define SIO_F71869_ID          0x0814  /* Chipset ID */
+#define SIO_F71869A_ID         0x1007  /* Chipset ID */
 #define SIO_F71882_ID          0x0541  /* Chipset ID */
 #define SIO_F71889_ID          0x0723  /* Chipset ID */
 #define SIO_F71889E_ID         0x0909  /* Chipset ID */
@@ -108,8 +109,8 @@ static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
-            f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
+            f71889fg, f71889ed, f71889a, f8000, f81865f };
 
 static const char *f71882fg_names[] = {
        "f71808e",
@@ -117,6 +118,7 @@ static const char *f71882fg_names[] = {
        "f71858fg",
        "f71862fg",
        "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
+       "f71869a",
        "f71882fg",
        "f71889fg", /* f81801u too, same id */
        "f71889ed",
@@ -131,6 +133,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
        [f71858fg]      = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
        [f71862fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71869]        = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+       [f71869a]       = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71882fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71889fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71889ed]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -145,6 +148,7 @@ static const char f71882fg_has_in1_alarm[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 0,
        [f71869]        = 0,
+       [f71869a]       = 0,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -159,6 +163,7 @@ static const char f71882fg_fan_has_beep[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 1,
        [f71869]        = 1,
+       [f71869a]       = 1,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -173,6 +178,7 @@ static const char f71882fg_nr_fans[] = {
        [f71858fg]      = 3,
        [f71862fg]      = 3,
        [f71869]        = 3,
+       [f71869a]       = 3,
        [f71882fg]      = 4,
        [f71889fg]      = 3,
        [f71889ed]      = 3,
@@ -187,6 +193,7 @@ static const char f71882fg_temp_has_beep[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 1,
        [f71869]        = 1,
+       [f71869a]       = 1,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -201,6 +208,7 @@ static const char f71882fg_nr_temps[] = {
        [f71858fg]      = 3,
        [f71862fg]      = 3,
        [f71869]        = 3,
+       [f71869a]       = 3,
        [f71882fg]      = 3,
        [f71889fg]      = 3,
        [f71889ed]      = 3,
@@ -2243,6 +2251,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                case f71808e:
                case f71808a:
                case f71869:
+               case f71869a:
                        /* These always have signed auto point temps */
                        data->auto_point_temp_signed = 1;
                        /* Fall through to select correct fan/pwm reg bank! */
@@ -2305,6 +2314,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                case f71808e:
                case f71808a:
                case f71869:
+               case f71869a:
                case f71889fg:
                case f71889ed:
                case f71889a:
@@ -2528,6 +2538,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
        case SIO_F71869_ID:
                sio_data->type = f71869;
                break;
+       case SIO_F71869A_ID:
+               sio_data->type = f71869a;
+               break;
        case SIO_F71882_ID:
                sio_data->type = f71882fg;
                break;
@@ -2662,7 +2675,7 @@ static void __exit f71882fg_exit(void)
 }
 
 MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
 
 module_init(f71882fg_init);
index 2582bfe..c8195a0 100644 (file)
@@ -202,7 +202,7 @@ static struct vrm_model vrm_models[] = {
 
        {X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85},        /* Eden ESP/Ezra */
        {X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85},        /* Ezra T */
-       {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85},        /* Nemiah */
+       {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85},        /* Nehemiah */
        {X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17},        /* C3-M, Eden-N */
        {X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0},         /* No information */
        {X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13},        /* C7, Esther */
index 98e2e28..931d940 100644 (file)
@@ -47,12 +47,14 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
        if (info->func[0]
            && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
                info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
-       if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+       if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) &&
+           pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
                info->func[0] |= PMBUS_HAVE_FAN12;
                if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
                        info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
        }
-       if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+       if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
+           pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
                info->func[0] |= PMBUS_HAVE_FAN34;
                if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
                        info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
@@ -63,6 +65,10 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
                                              PMBUS_STATUS_TEMPERATURE))
                        info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
        }
+       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
+               info->func[0] |= PMBUS_HAVE_TEMP2;
+       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3))
+               info->func[0] |= PMBUS_HAVE_TEMP3;
 
        /* Sensors detected on all pages */
        for (page = 0; page < info->pages; page++) {
index 354770e..744672c 100644 (file)
@@ -1430,14 +1430,9 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
-       /*
-        * Bail out if status register or PMBus revision register
-        * does not exist.
-        */
-       if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
-           || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
-               dev_err(&client->dev,
-                       "Status or revision register not found\n");
+       /* Bail out if PMBus status register does not exist. */
+       if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
+               dev_err(&client->dev, "PMBus status register not found\n");
                ret = -ENODEV;
                goto out_data;
        }
index 020c872..3494a4c 100644 (file)
@@ -887,7 +887,7 @@ static void __exit sch5627_exit(void)
 }
 
 MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
 
 module_init(sch5627_init);
index f62f52f..fc0f2bd 100644 (file)
@@ -3641,7 +3641,8 @@ static struct kobj_type cm_port_obj_type = {
 
 static char *cm_devnode(struct device *dev, mode_t *mode)
 {
-       *mode = 0666;
+       if (mode)
+               *mode = 0666;
        return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 }
 
index e49a85f..56898b6 100644 (file)
@@ -826,7 +826,8 @@ static void ib_uverbs_remove_one(struct ib_device *device)
 
 static char *uverbs_devnode(struct device *dev, mode_t *mode)
 {
-       *mode = 0666;
+       if (mode)
+               *mode = 0666;
        return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 }
 
index 40b02ae..6229c3e 100644 (file)
@@ -520,7 +520,8 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
  */
 static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
 {
-       const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev);
+       const struct pm8xxx_keypad_platform_data *pdata =
+                                       dev_get_platdata(&pdev->dev);
        const struct matrix_keymap_data *keymap_data;
        struct pmic8xxx_kp *kp;
        int rc;
index 97e07e7..b3cfb9c 100644 (file)
@@ -90,7 +90,8 @@ static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
        unsigned int delay;
        u8 pon_cntl;
        struct pmic8xxx_pwrkey *pwrkey;
-       const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev);
+       const struct pm8xxx_pwrkey_platform_data *pdata =
+                                       dev_get_platdata(&pdev->dev);
 
        if (!pdata) {
                dev_err(&pdev->dev, "power key platform data not supplied\n");
index 0f09c05..6ca938a 100644 (file)
@@ -728,6 +728,9 @@ config MFD_TPS65910
          if you say yes here you get support for the TPS65910 series of
          Power Management chips.
 
+config TPS65911_COMPARATOR
+       tristate
+
 endif # MFD_SUPPORT
 
 menu "Multimedia Capabilities Port drivers"
index efe3cc3..d7d47d2 100644 (file)
@@ -94,3 +94,4 @@ obj-$(CONFIG_MFD_OMAP_USB_HOST)       += omap-usb-host.o
 obj-$(CONFIG_MFD_PM8921_CORE)  += pm8921-core.o
 obj-$(CONFIG_MFD_PM8XXX_IRQ)   += pm8xxx-irq.o
 obj-$(CONFIG_MFD_TPS65910)     += tps65910.o tps65910-irq.o
+obj-$(CONFIG_TPS65911_COMPARATOR)      += tps65911-comparator.o
index 8552195..1717144 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/spinlock.h>
 #include <linux/gpio.h>
 #include <plat/usb.h>
-#include <linux/pm_runtime.h>
 
 #define USBHS_DRIVER_NAME      "usbhs-omap"
 #define OMAP_EHCI_DEVICE       "ehci-omap"
 
 
 struct usbhs_hcd_omap {
+       struct clk                      *usbhost_ick;
+       struct clk                      *usbhost_hs_fck;
+       struct clk                      *usbhost_fs_fck;
        struct clk                      *xclk60mhsp1_ck;
        struct clk                      *xclk60mhsp2_ck;
        struct clk                      *utmi_p1_fck;
@@ -156,6 +158,8 @@ struct usbhs_hcd_omap {
        struct clk                      *usbhost_p2_fck;
        struct clk                      *usbtll_p2_fck;
        struct clk                      *init_60m_fclk;
+       struct clk                      *usbtll_fck;
+       struct clk                      *usbtll_ick;
 
        void __iomem                    *uhh_base;
        void __iomem                    *tll_base;
@@ -349,13 +353,46 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
        omap->platdata.ehci_data = pdata->ehci_data;
        omap->platdata.ohci_data = pdata->ohci_data;
 
-       pm_runtime_enable(&pdev->dev);
+       omap->usbhost_ick = clk_get(dev, "usbhost_ick");
+       if (IS_ERR(omap->usbhost_ick)) {
+               ret =  PTR_ERR(omap->usbhost_ick);
+               dev_err(dev, "usbhost_ick failed error:%d\n", ret);
+               goto err_end;
+       }
+
+       omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
+       if (IS_ERR(omap->usbhost_hs_fck)) {
+               ret = PTR_ERR(omap->usbhost_hs_fck);
+               dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
+               goto err_usbhost_ick;
+       }
+
+       omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
+       if (IS_ERR(omap->usbhost_fs_fck)) {
+               ret = PTR_ERR(omap->usbhost_fs_fck);
+               dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
+               goto err_usbhost_hs_fck;
+       }
+
+       omap->usbtll_fck = clk_get(dev, "usbtll_fck");
+       if (IS_ERR(omap->usbtll_fck)) {
+               ret = PTR_ERR(omap->usbtll_fck);
+               dev_err(dev, "usbtll_fck failed error:%d\n", ret);
+               goto err_usbhost_fs_fck;
+       }
+
+       omap->usbtll_ick = clk_get(dev, "usbtll_ick");
+       if (IS_ERR(omap->usbtll_ick)) {
+               ret = PTR_ERR(omap->usbtll_ick);
+               dev_err(dev, "usbtll_ick failed error:%d\n", ret);
+               goto err_usbtll_fck;
+       }
 
        omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
        if (IS_ERR(omap->utmi_p1_fck)) {
                ret = PTR_ERR(omap->utmi_p1_fck);
                dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
-               goto err_end;
+               goto err_usbtll_ick;
        }
 
        omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -485,8 +522,22 @@ err_xclk60mhsp1_ck:
 err_utmi_p1_fck:
        clk_put(omap->utmi_p1_fck);
 
+err_usbtll_ick:
+       clk_put(omap->usbtll_ick);
+
+err_usbtll_fck:
+       clk_put(omap->usbtll_fck);
+
+err_usbhost_fs_fck:
+       clk_put(omap->usbhost_fs_fck);
+
+err_usbhost_hs_fck:
+       clk_put(omap->usbhost_hs_fck);
+
+err_usbhost_ick:
+       clk_put(omap->usbhost_ick);
+
 err_end:
-       pm_runtime_disable(&pdev->dev);
        kfree(omap);
 
 end_probe:
@@ -520,7 +571,11 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
        clk_put(omap->utmi_p2_fck);
        clk_put(omap->xclk60mhsp1_ck);
        clk_put(omap->utmi_p1_fck);
-       pm_runtime_disable(&pdev->dev);
+       clk_put(omap->usbtll_ick);
+       clk_put(omap->usbtll_fck);
+       clk_put(omap->usbhost_fs_fck);
+       clk_put(omap->usbhost_hs_fck);
+       clk_put(omap->usbhost_ick);
        kfree(omap);
 
        return 0;
@@ -640,6 +695,7 @@ static int usbhs_enable(struct device *dev)
        struct usbhs_omap_platform_data *pdata = &omap->platdata;
        unsigned long                   flags = 0;
        int                             ret = 0;
+       unsigned long                   timeout;
        unsigned                        reg;
 
        dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -652,7 +708,11 @@ static int usbhs_enable(struct device *dev)
        if (omap->count > 0)
                goto end_count;
 
-       pm_runtime_get_sync(dev);
+       clk_enable(omap->usbhost_ick);
+       clk_enable(omap->usbhost_hs_fck);
+       clk_enable(omap->usbhost_fs_fck);
+       clk_enable(omap->usbtll_fck);
+       clk_enable(omap->usbtll_ick);
 
        if (pdata->ehci_data->phy_reset) {
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -676,6 +736,50 @@ static int usbhs_enable(struct device *dev)
        omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
        dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
+       /* perform TLL soft reset, and wait until reset is complete */
+       usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+                       OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+       /* Wait for TLL reset to complete */
+       timeout = jiffies + msecs_to_jiffies(1000);
+       while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+                       & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+               cpu_relax();
+
+               if (time_after(jiffies, timeout)) {
+                       dev_dbg(dev, "operation timed out\n");
+                       ret = -EINVAL;
+                       goto err_tll;
+               }
+       }
+
+       dev_dbg(dev, "TLL RESET DONE\n");
+
+       /* (1<<3) = no idle mode only for initial debugging */
+       usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+                       OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+                       OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+                       OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+
+       /* Put UHH in NoIdle/NoStandby mode */
+       reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+       if (is_omap_usbhs_rev1(omap)) {
+               reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+                               | OMAP_UHH_SYSCONFIG_SIDLEMODE
+                               | OMAP_UHH_SYSCONFIG_CACTIVITY
+                               | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+               reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+
+       } else if (is_omap_usbhs_rev2(omap)) {
+               reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+               reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+               reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+               reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+       }
+
+       usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
        reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
        /* setup ULPI bypass and burst configurations */
        reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -815,8 +919,6 @@ end_count:
        return 0;
 
 err_tll:
-       pm_runtime_put_sync(dev);
-       spin_unlock_irqrestore(&omap->lock, flags);
        if (pdata->ehci_data->phy_reset) {
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -824,6 +926,13 @@ err_tll:
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[1]);
        }
+
+       clk_disable(omap->usbtll_ick);
+       clk_disable(omap->usbtll_fck);
+       clk_disable(omap->usbhost_fs_fck);
+       clk_disable(omap->usbhost_hs_fck);
+       clk_disable(omap->usbhost_ick);
+       spin_unlock_irqrestore(&omap->lock, flags);
        return ret;
 }
 
@@ -896,7 +1005,11 @@ static void usbhs_disable(struct device *dev)
                clk_disable(omap->utmi_p1_fck);
        }
 
-       pm_runtime_put_sync(dev);
+       clk_disable(omap->usbtll_ick);
+       clk_disable(omap->usbtll_fck);
+       clk_disable(omap->usbhost_fs_fck);
+       clk_disable(omap->usbhost_hs_fck);
+       clk_disable(omap->usbhost_ick);
 
        /* The gpio_free migh sleep; so unlock the spinlock */
        spin_unlock_irqrestore(&omap->lock, flags);
index 3d2dc56..283ac67 100644 (file)
@@ -125,7 +125,7 @@ static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
 static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
 {
        struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
-       struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+       struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
        int ret;
 
        ret = comp_threshold_set(tps65910, COMP1,  pdata->vmbch_threshold);
index 4a1f029..8d9dae8 100644 (file)
@@ -830,6 +830,19 @@ config SCSI_GDTH
          To compile this driver as a module, choose M here: the
          module will be called gdth.
 
+config SCSI_ISCI
+       tristate "Intel(R) C600 Series Chipset SAS Controller"
+       depends on PCI && SCSI
+       depends on X86
+       # (temporary): known alpha quality driver
+       depends on EXPERIMENTAL
+       select SCSI_SAS_LIBSAS
+       ---help---
+         This driver supports the 6Gb/s SAS capabilities of the storage
+         control unit found in the Intel(R) C600 series chipset.
+
+         The experimental tag will be removed after the driver exits alpha
+
 config SCSI_GENERIC_NCR5380
        tristate "Generic NCR5380/53c400 SCSI PIO support"
        depends on ISA && SCSI
index 7ad0b8a..3c08f53 100644 (file)
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID)    += aacraid/
 obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
 obj-$(CONFIG_SCSI_AIC94XX)     += aic94xx/
 obj-$(CONFIG_SCSI_PM8001)      += pm8001/
+obj-$(CONFIG_SCSI_ISCI)                += isci/
 obj-$(CONFIG_SCSI_IPS)         += ips.o
 obj-$(CONFIG_SCSI_FD_MCS)      += fd_mcs.o
 obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
index c6c0434..6bba23a 100644 (file)
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
        unsigned char sense_key;
        unsigned char asc;      /* additional sense code */
        unsigned char ascq;     /* additional sense code qualifier */
+       unsigned long sense_data_size;
 
        ei = cp->err_info;
        cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
        cmd->result |= ei->ScsiStatus;
 
        /* copy the sense data whether we need to or not. */
-       memcpy(cmd->sense_buffer, ei->SenseInfo,
-               ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
-                       SCSI_SENSE_BUFFERSIZE :
-                       ei->SenseLen);
+       if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+               sense_data_size = SCSI_SENSE_BUFFERSIZE;
+       else
+               sense_data_size = sizeof(ei->SenseInfo);
+       if (ei->SenseLen < sense_data_size)
+               sense_data_size = ei->SenseLen;
+
+       memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
        scsi_set_resid(cmd, ei->ResidualCnt);
 
        if (ei->CommandStatus == 0) {
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                c->SG[0].Ext = 0; /* we are not chaining*/
        }
        hpsa_scsi_do_simple_cmd_core(h, c);
-       hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+       if (iocommand.buf_size > 0)
+               hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
        check_ioctl_unit_attention(h, c);
 
        /* Copy the error information out */
index b765061..bdfa223 100644 (file)
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
                spin_lock_irqsave(vhost->host->host_lock, flags);
                if (rc == H_CLOSED)
                        vio_enable_interrupts(to_vio_dev(vhost->dev));
-               else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
-                        (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
                        dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
                }
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644 (file)
index 0000000..3359e10
--- /dev/null
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+            remote_device.o port.o \
+            host.o task.o probe_roms.o \
+            remote_node_context.o \
+            remote_node_table.o \
+            unsolicited_frame_control.o \
+            port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644 (file)
index 0000000..5f54461
--- /dev/null
@@ -0,0 +1,19 @@
+# Makefile for create_fw
+#
+CC=gcc
+CFLAGS=-c -Wall -O2 -g
+LDFLAGS=
+SOURCES=create_fw.c
+OBJECTS=$(SOURCES:.cpp=.o)
+EXECUTABLE=create_fw
+
+all: $(SOURCES) $(EXECUTABLE)
+
+$(EXECUTABLE): $(OBJECTS)
+       $(CC) $(LDFLAGS) $(OBJECTS) -o $@
+
+.c.o:
+       $(CC) $(CFLAGS) $< -O $@
+
+clean:
+       rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644 (file)
index 0000000..8056d2b
--- /dev/null
@@ -0,0 +1,36 @@
+This defines the temporary binary blow we are to pass to the SCU
+driver to emulate the binary firmware that we will eventually be
+able to access via NVRAM on the SCU controller.
+
+The current size of the binary blob is expected to be 149 bytes or larger
+
+Header Types:
+0x1: Phy Masks
+0x2: Phy Gens
+0x3: SAS Addrs
+0xff: End of Data
+
+ID string - u8[12]: "#SCU MAGIC#\0"
+Version - u8: 1
+SubVersion - u8: 0
+
+Header Type - u8: 0x1
+Size - u8: 8
+Phy Mask - u32[8]
+
+Header Type - u8: 0x2
+Size - u8: 8
+Phy Gen - u32[8]
+
+Header Type - u8: 0x3
+Size - u8: 8
+Sas Addr - u64[8]
+
+Header Type - u8: 0xf
+
+
+==============================================================================
+
+Place isci_firmware.bin in /lib/firmware
+Be sure to recreate the initramfs image to include the firmware.
+
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644 (file)
index 0000000..c7a2887
--- /dev/null
@@ -0,0 +1,99 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <asm/types.h>
+#include <strings.h>
+#include <stdint.h>
+
+#include "create_fw.h"
+#include "../probe_roms.h"
+
+int write_blob(struct isci_orom *isci_orom)
+{
+       FILE *fd;
+       int err;
+       size_t count;
+
+       fd = fopen(blob_name, "w+");
+       if (!fd) {
+               perror("Open file for write failed");
+               fclose(fd);
+               return -EIO;
+       }
+
+       count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
+       if (count != 1) {
+               perror("Write data failed");
+               fclose(fd);
+               return -EIO;
+       }
+
+       fclose(fd);
+
+       return 0;
+}
+
+void set_binary_values(struct isci_orom *isci_orom)
+{
+       int ctrl_idx, phy_idx, port_idx;
+
+       /* setting OROM signature */
+       strncpy(isci_orom->hdr.signature, sig, strlen(sig));
+       isci_orom->hdr.version = version;
+       isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
+       isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
+       isci_orom->hdr.num_elements = num_elements;
+
+       for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
+               isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
+               isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
+                       max_num_concurrent_dev_spin_up;
+               isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
+                       enable_ssc;
+
+               for (port_idx = 0; port_idx < 4; port_idx++)
+                       isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
+                               phy_mask[ctrl_idx][port_idx];
+
+               for (phy_idx = 0; phy_idx < 4; phy_idx++) {
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
+                               (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
+                               (__u32)(sas_addr[ctrl_idx][phy_idx]);
+
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
+                               afe_tx_amp_control0;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
+                               afe_tx_amp_control1;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
+                               afe_tx_amp_control2;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
+                               afe_tx_amp_control3;
+               }
+       }
+}
+
+int main(void)
+{
+       int err;
+       struct isci_orom *isci_orom;
+
+       isci_orom = malloc(sizeof(struct isci_orom));
+       memset(isci_orom, 0, sizeof(struct isci_orom));
+
+       set_binary_values(isci_orom);
+
+       err = write_blob(isci_orom);
+       if (err < 0) {
+               free(isci_orom);
+               return err;
+       }
+
+       free(isci_orom);
+       return 0;
+}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644 (file)
index 0000000..5f29882
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef _CREATE_FW_H_
+#define _CREATE_FW_H_
+#include "../probe_roms.h"
+
+
+/* we are configuring for 2 SCUs */
+static const int num_elements = 2;
+
+/*
+ * For all defined arrays:
+ * elements 0-3 are for SCU0, ports 0-3
+ * elements 4-7 are for SCU1, ports 0-3
+ *
+ * valid configurations for one SCU are:
+ *  P0  P1  P2  P3
+ * ----------------
+ * 0xF,0x0,0x0,0x0 # 1 x4 port
+ * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
+ *                 # ports
+ * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
+ *                 # port
+ * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
+ * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
+ *
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value assigned to UNINIT_PARAM (255).
+ */
+
+/* discovery mode type (port auto config mode by default ) */
+
+/*
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value "0000000000000000". SAS address of zero's is
+ * considered invalid and will not be used.
+ */
+#ifdef MPC
+static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
+                                    {1, 2, 4, 8} };
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
+                                                    0x5FCFFFFFF0000002ULL,
+                                                    0x5FCFFFFFF0000003ULL,
+                                                    0x5FCFFFFFF0000004ULL },
+                                                  { 0x5FCFFFFFF0000005ULL,
+                                                    0x5FCFFFFFF0000006ULL,
+                                                    0x5FCFFFFFF0000007ULL,
+                                                    0x5FCFFFFFF0000008ULL } };
+#else  /* APC (default) */
+static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4];
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL },
+                                                  { 0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL } };
+#endif
+
+/* Maximum number of concurrent device spin up */
+static const int max_num_concurrent_dev_spin_up = 1;
+
+/* enable of ssc operation */
+static const int enable_ssc;
+
+/* AFE_TX_AMP_CONTROL */
+static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
+static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
+static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
+static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
+
+static const char blob_name[] = "isci_firmware.bin";
+static const char sig[] = "ISCUOEMB";
+static const unsigned char version = 0x10;
+
+#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644 (file)
index 0000000..26072f1
--- /dev/null
@@ -0,0 +1,2751 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "host.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
+
+#define smu_max_ports(dcc_value) \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+       )
+
+#define smu_max_task_contexts(dcc_value)       \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+       )
+
+#define smu_max_rncs(dcc_value) \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+       )
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+       ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+       (\
+               ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+               >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
+       )
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+       ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+       ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+                const struct sci_base_state *state_table, u32 initial_state)
+{
+       sci_state_transition_t handler;
+
+       sm->initial_state_id    = initial_state;
+       sm->previous_state_id   = initial_state;
+       sm->current_state_id    = initial_state;
+       sm->state_table         = state_table;
+
+       handler = sm->state_table[initial_state].enter_state;
+       if (handler)
+               handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+       sci_state_transition_t handler;
+
+       handler = sm->state_table[sm->current_state_id].exit_state;
+       if (handler)
+               handler(sm);
+
+       sm->previous_state_id = sm->current_state_id;
+       sm->current_state_id = next_state;
+
+       handler = sm->state_table[sm->current_state_id].enter_state;
+       if (handler)
+               handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+       u32 get_value = ihost->completion_queue_get;
+       u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+       if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+           COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+               return true;
+
+       return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+       if (sci_controller_completion_queue_has_entries(ihost)) {
+               return true;
+       } else {
+               /*
+                * we have a spurious interrupt it could be that we have already
+                * emptied the completion queue from a previous interrupt */
+               writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+               /*
+                * There is a race in the hardware that could cause us not to be notified
+                * of an interrupt completion if we do not take this step.  We will mask
+                * then unmask the interrupts so if there is another interrupt pending
+                * the clearing of the interrupt source we get the next interrupt message. */
+               writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+               writel(0, &ihost->smu_registers->interrupt_mask);
+       }
+
+       return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+       struct isci_host *ihost = data;
+
+       if (sci_controller_isr(ihost))
+               tasklet_schedule(&ihost->completion_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+       u32 interrupt_status;
+
+       interrupt_status =
+               readl(&ihost->smu_registers->interrupt_status);
+       interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+       if (interrupt_status != 0) {
+               /*
+                * There is an error interrupt pending so let it through and handle
+                * in the callback */
+               return true;
+       }
+
+       /*
+        * There is a race in the hardware that could cause us not to be notified
+        * of an interrupt completion if we do not take this step.  We will mask
+        * then unmask the error interrupts so if there was another interrupt
+        * pending we will be notified.
+        * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+       writel(0xff, &ihost->smu_registers->interrupt_mask);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+
+       return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+       u32 index = SCU_GET_COMPLETION_INDEX(ent);
+       struct isci_request *ireq = ihost->reqs[index];
+
+       /* Make sure that we really want to process this IO request */
+       if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+           ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+           ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+               /* Yep this is a valid io request pass it along to the
+                * io request handler
+                */
+               sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+       u32 index;
+       struct isci_request *ireq;
+       struct isci_remote_device *idev;
+
+       index = SCU_GET_COMPLETION_INDEX(ent);
+
+       switch (scu_get_command_request_type(ent)) {
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+               ireq = ihost->reqs[index];
+               dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+                        __func__, ent, ireq);
+               /* @todo For a post TC operation we need to fail the IO
+                * request
+                */
+               break;
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+               idev = ihost->device_table[index];
+               dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+                        __func__, ent, idev);
+               /* @todo For a port RNC operation we need to fail the
+                * device
+                */
+               break;
+       default:
+               dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+                        __func__, ent);
+               break;
+       }
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+       u32 index;
+       u32 frame_index;
+
+       struct scu_unsolicited_frame_header *frame_header;
+       struct isci_phy *iphy;
+       struct isci_remote_device *idev;
+
+       enum sci_status result = SCI_FAILURE;
+
+       frame_index = SCU_GET_FRAME_INDEX(ent);
+
+       frame_header = ihost->uf_control.buffers.array[frame_index].header;
+       ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+       if (SCU_GET_FRAME_ERROR(ent)) {
+               /*
+                * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+                * /       this cause a problem? We expect the phy initialization will
+                * /       fail if there is an error in the frame. */
+               sci_controller_release_frame(ihost, frame_index);
+               return;
+       }
+
+       if (frame_header->is_address_frame) {
+               index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+               iphy = &ihost->phys[index];
+               result = sci_phy_frame_handler(iphy, frame_index);
+       } else {
+
+               index = SCU_GET_COMPLETION_INDEX(ent);
+
+               if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+                       /*
+                        * This is a signature fis or a frame from a direct attached SATA
+                        * device that has not yet been created.  In either case forwared
+                        * the frame to the PE and let it take care of the frame data. */
+                       index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+                       iphy = &ihost->phys[index];
+                       result = sci_phy_frame_handler(iphy, frame_index);
+               } else {
+                       if (index < ihost->remote_node_entries)
+                               idev = ihost->device_table[index];
+                       else
+                               idev = NULL;
+
+                       if (idev != NULL)
+                               result = sci_remote_device_frame_handler(idev, frame_index);
+                       else
+                               sci_controller_release_frame(ihost, frame_index);
+               }
+       }
+
+       if (result != SCI_SUCCESS) {
+               /*
+                * / @todo Is there any reason to report some additional error message
+                * /       when we get this failure notifiction? */
+       }
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+       struct isci_remote_device *idev;
+       struct isci_request *ireq;
+       struct isci_phy *iphy;
+       u32 index;
+
+       index = SCU_GET_COMPLETION_INDEX(ent);
+
+       switch (scu_get_event_type(ent)) {
+       case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+               /* / @todo The driver did something wrong and we need to fix the condtion. */
+               dev_err(&ihost->pdev->dev,
+                       "%s: SCIC Controller 0x%p received SMU command error "
+                       "0x%x\n",
+                       __func__,
+                       ihost,
+                       ent);
+               break;
+
+       case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+       case SCU_EVENT_TYPE_SMU_ERROR:
+       case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+               /*
+                * / @todo This is a hardware failure and its likely that we want to
+                * /       reset the controller. */
+               dev_err(&ihost->pdev->dev,
+                       "%s: SCIC Controller 0x%p received fatal controller "
+                       "event  0x%x\n",
+                       __func__,
+                       ihost,
+                       ent);
+               break;
+
+       case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+               ireq = ihost->reqs[index];
+               sci_io_request_event_handler(ireq, ent);
+               break;
+
+       case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+               switch (scu_get_event_specifier(ent)) {
+               case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+               case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+                       ireq = ihost->reqs[index];
+                       if (ireq != NULL)
+                               sci_io_request_event_handler(ireq, ent);
+                       else
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: SCIC Controller 0x%p received "
+                                        "event 0x%x for io request object "
+                                        "that doesnt exist.\n",
+                                        __func__,
+                                        ihost,
+                                        ent);
+
+                       break;
+
+               case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+                       idev = ihost->device_table[index];
+                       if (idev != NULL)
+                               sci_remote_device_event_handler(idev, ent);
+                       else
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: SCIC Controller 0x%p received "
+                                        "event 0x%x for remote device object "
+                                        "that doesnt exist.\n",
+                                        __func__,
+                                        ihost,
+                                        ent);
+
+                       break;
+               }
+               break;
+
+       case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+       /*
+        * direct the broadcast change event to the phy first and then let
+        * the phy redirect the broadcast change to the port object */
+       case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+       /*
+        * direct error counter event to the phy object since that is where
+        * we get the event notification.  This is a type 4 event. */
+       case SCU_EVENT_TYPE_OSSP_EVENT:
+               index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+               iphy = &ihost->phys[index];
+               sci_phy_event_handler(iphy, ent);
+               break;
+
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+       case SCU_EVENT_TYPE_RNC_OPS_MISC:
+               if (index < ihost->remote_node_entries) {
+                       idev = ihost->device_table[index];
+
+                       if (idev != NULL)
+                               sci_remote_device_event_handler(idev, ent);
+               } else
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC Controller 0x%p received event 0x%x "
+                               "for remote device object 0x%0x that doesnt "
+                               "exist.\n",
+                               __func__,
+                               ihost,
+                               ent,
+                               index);
+
+               break;
+
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC Controller received unknown event code %x\n",
+                        __func__,
+                        ent);
+               break;
+       }
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+       u32 completion_count = 0;
+       u32 ent;
+       u32 get_index;
+       u32 get_cycle;
+       u32 event_get;
+       u32 event_cycle;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completion queue begining get:0x%08x\n",
+               __func__,
+               ihost->completion_queue_get);
+
+       /* Get the component parts of the completion queue */
+       get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+       get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+       event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+       event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+       while (
+               NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+               == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+               ) {
+               completion_count++;
+
+               ent = ihost->completion_queue[get_index];
+
+               /* increment the get pointer and check for rollover to toggle the cycle bit */
+               get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+                            (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+               get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: completion queue entry:0x%08x\n",
+                       __func__,
+                       ent);
+
+               switch (SCU_GET_COMPLETION_TYPE(ent)) {
+               case SCU_COMPLETION_TYPE_TASK:
+                       sci_controller_task_completion(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_SDMA:
+                       sci_controller_sdma_completion(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_UFI:
+                       sci_controller_unsolicited_frame(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_EVENT:
+               case SCU_COMPLETION_TYPE_NOTIFY: {
+                       event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+                                      (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+                       event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+                       sci_controller_event_completion(ihost, ent);
+                       break;
+               }
+               default:
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: SCIC Controller received unknown "
+                                "completion type %x\n",
+                                __func__,
+                                ent);
+                       break;
+               }
+       }
+
+       /* Update the get register if we completed one or more entries */
+       if (completion_count > 0) {
+               ihost->completion_queue_get =
+                       SMU_CQGR_GEN_BIT(ENABLE) |
+                       SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+                       event_cycle |
+                       SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+                       get_cycle |
+                       SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+               writel(ihost->completion_queue_get,
+                      &ihost->smu_registers->completion_queue_get);
+
+       }
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completion queue ending get:0x%08x\n",
+               __func__,
+               ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+       u32 interrupt_status;
+
+       interrupt_status =
+               readl(&ihost->smu_registers->interrupt_status);
+
+       if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+           sci_controller_completion_queue_has_entries(ihost)) {
+
+               sci_controller_process_completions(ihost);
+               writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+       } else {
+               dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+                       interrupt_status);
+
+               sci_change_state(&ihost->sm, SCIC_FAILED);
+
+               return;
+       }
+
+       /* If we dont process any completions I am not sure that we want to do this.
+        * We are in the middle of a hardware fault and should probably be reset.
+        */
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+       irqreturn_t ret = IRQ_NONE;
+       struct isci_host *ihost = data;
+
+       if (sci_controller_isr(ihost)) {
+               writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+               tasklet_schedule(&ihost->completion_tasklet);
+               ret = IRQ_HANDLED;
+       } else if (sci_controller_error_isr(ihost)) {
+               spin_lock(&ihost->scic_lock);
+               sci_controller_error_handler(ihost);
+               spin_unlock(&ihost->scic_lock);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+       struct isci_host *ihost = data;
+
+       if (sci_controller_error_isr(ihost))
+               sci_controller_error_handler(ihost);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ *    through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ *    core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+       if (completion_status != SCI_SUCCESS)
+               dev_info(&ihost->pdev->dev,
+                       "controller start timed out, continuing...\n");
+       isci_host_change_state(ihost, isci_ready);
+       clear_bit(IHOST_START_PENDING, &ihost->flags);
+       wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
+       if (test_bit(IHOST_START_PENDING, &ihost->flags))
+               return 0;
+
+       /* todo: use sas_flush_discovery once it is upstream */
+       scsi_flush_work(shost);
+
+       scsi_flush_work(shost);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: ihost->status = %d, time = %ld\n",
+                __func__, isci_host_get_state(ihost), time);
+
+       return 1;
+
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ *    suggested sci_controller_start() timeout amount.  The user is free to
+ *    use any timeout value, but this method provides the suggested minimum
+ *    start timeout value.  The returned value is based upon empirical
+ *    information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ *    suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+       /* Validate the user supplied parameters. */
+       if (!ihost)
+               return 0;
+
+       /*
+        * The suggested minimum timeout value for a controller start operation:
+        *
+        *     Signature FIS Timeout
+        *   + Phy Start Timeout
+        *   + Number of Phy Spin Up Intervals
+        *   ---------------------------------
+        *   Number of milliseconds for the controller start operation.
+        *
+        * NOTE: The number of phy spin up intervals will be equivalent
+        *       to the number of phys divided by the number phys allowed
+        *       per interval - 1 (once OEM parameters are supported).
+        *       Currently we assume only 1 phy per interval. */
+
+       return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+               + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+               + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+       BUG_ON(ihost->smu_registers == NULL);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+       BUG_ON(ihost->smu_registers == NULL);
+       writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+       u32 port_task_scheduler_value;
+
+       port_task_scheduler_value =
+               readl(&ihost->scu_registers->peg0.ptsg.control);
+       port_task_scheduler_value |=
+               (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+                SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+       writel(port_task_scheduler_value,
+              &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+       u32 task_assignment;
+
+       /*
+        * Assign all the TCs to function 0
+        * TODO: Do we actually need to read this register to write it back?
+        */
+
+       task_assignment =
+               readl(&ihost->smu_registers->task_context_assignment[0]);
+
+       task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+               (SMU_TCA_GEN_VAL(ENDING,  ihost->task_context_entries - 1)) |
+               (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+       writel(task_assignment,
+               &ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+       u32 index;
+       u32 completion_queue_control_value;
+       u32 completion_queue_get_value;
+       u32 completion_queue_put_value;
+
+       ihost->completion_queue_get = 0;
+
+       completion_queue_control_value =
+               (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+                SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+       writel(completion_queue_control_value,
+              &ihost->smu_registers->completion_queue_control);
+
+
+       /* Set the completion queue get pointer and enable the queue */
+       completion_queue_get_value = (
+               (SMU_CQGR_GEN_VAL(POINTER, 0))
+               | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+               | (SMU_CQGR_GEN_BIT(ENABLE))
+               | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+               );
+
+       writel(completion_queue_get_value,
+              &ihost->smu_registers->completion_queue_get);
+
+       /* Set the completion queue put pointer */
+       completion_queue_put_value = (
+               (SMU_CQPR_GEN_VAL(POINTER, 0))
+               | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+               );
+
+       writel(completion_queue_put_value,
+              &ihost->smu_registers->completion_queue_put);
+
+       /* Initialize the cycle bit of the completion queue entries */
+       for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+               /*
+                * If get.cycle_bit != completion_queue.cycle_bit
+                * its not a valid completion queue entry
+                * so at system start all entries are invalid */
+               ihost->completion_queue[index] = 0x80000000;
+       }
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+       u32 frame_queue_control_value;
+       u32 frame_queue_get_value;
+       u32 frame_queue_put_value;
+
+       /* Write the queue size */
+       frame_queue_control_value =
+               SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+       writel(frame_queue_control_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+       /* Setup the get pointer for the unsolicited frame queue */
+       frame_queue_get_value = (
+               SCU_UFQGP_GEN_VAL(POINTER, 0)
+               |  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+               );
+
+       writel(frame_queue_get_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+       /* Setup the put pointer for the unsolicited frame queue */
+       frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+       writel(frame_queue_put_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+       if (ihost->sm.current_state_id == SCIC_STARTING) {
+               /*
+                * We move into the ready state, because some of the phys/ports
+                * may be up and operational.
+                */
+               sci_change_state(&ihost->sm, SCIC_READY);
+
+               isci_host_start_complete(ihost, status);
+       }
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+       enum sci_phy_states state;
+
+       state = iphy->sm.current_state_id;
+       switch (state) {
+       case SCI_PHY_STARTING:
+       case SCI_PHY_SUB_INITIAL:
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_IAF_UF:
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+       case SCI_PHY_SUB_FINAL:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+       struct sci_oem_params *oem = &ihost->oem_parameters;
+       struct isci_phy *iphy;
+       enum sci_status status;
+
+       status = SCI_SUCCESS;
+
+       if (ihost->phy_startup_timer_pending)
+               return status;
+
+       if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+               bool is_controller_start_complete = true;
+               u32 state;
+               u8 index;
+
+               for (index = 0; index < SCI_MAX_PHYS; index++) {
+                       iphy = &ihost->phys[index];
+                       state = iphy->sm.current_state_id;
+
+                       if (!phy_get_non_dummy_port(iphy))
+                               continue;
+
+                       /* The controller start operation is complete iff:
+                        * - all links have been given an opportunity to start
+                        * - have no indication of a connected device
+                        * - have an indication of a connected device and it has
+                        *   finished the link training process.
+                        */
+                       if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+                           (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+                           (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+                               is_controller_start_complete = false;
+                               break;
+                       }
+               }
+
+               /*
+                * The controller has successfully finished the start process.
+                * Inform the SCI Core user and transition to the READY state. */
+               if (is_controller_start_complete == true) {
+                       sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+                       sci_del_timer(&ihost->phy_timer);
+                       ihost->phy_startup_timer_pending = false;
+               }
+       } else {
+               iphy = &ihost->phys[ihost->next_phy_to_start];
+
+               if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+                       if (phy_get_non_dummy_port(iphy) == NULL) {
+                               ihost->next_phy_to_start++;
+
+                               /* Caution recursion ahead be forwarned
+                                *
+                                * The PHY was never added to a PORT in MPC mode
+                                * so start the next phy in sequence This phy
+                                * will never go link up and will not draw power
+                                * the OEM parameters either configured the phy
+                                * incorrectly for the PORT or it was never
+                                * assigned to a PORT
+                                */
+                               return sci_controller_start_next_phy(ihost);
+                       }
+               }
+
+               status = sci_phy_start(iphy);
+
+               if (status == SCI_SUCCESS) {
+                       sci_mod_timer(&ihost->phy_timer,
+                                     SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+                       ihost->phy_startup_timer_pending = true;
+               } else {
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed "
+                                "to stop phy %d because of status "
+                                "%d.\n",
+                                __func__,
+                                ihost->phys[ihost->next_phy_to_start].phy_index,
+                                status);
+               }
+
+               ihost->next_phy_to_start++;
+       }
+
+       return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+       unsigned long flags;
+       enum sci_status status;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       ihost->phy_startup_timer_pending = false;
+
+       do {
+               status = sci_controller_start_next_phy(ihost);
+       } while (status != SCI_SUCCESS);
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+       return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+                                            u32 timeout)
+{
+       enum sci_status result;
+       u16 index;
+
+       if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller start operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       /* Build the TCi free pool */
+       BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+       ihost->tci_head = 0;
+       ihost->tci_tail = 0;
+       for (index = 0; index < ihost->task_context_entries; index++)
+               isci_tci_free(ihost, index);
+
+       /* Build the RNi free pool */
+       sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+                                        ihost->remote_node_entries);
+
+       /*
+        * Before anything else lets make sure we will not be
+        * interrupted by the hardware.
+        */
+       sci_controller_disable_interrupts(ihost);
+
+       /* Enable the port task scheduler */
+       sci_controller_enable_port_task_scheduler(ihost);
+
+       /* Assign all the task entries to ihost physical function */
+       sci_controller_assign_task_entries(ihost);
+
+       /* Now initialize the completion queue */
+       sci_controller_initialize_completion_queue(ihost);
+
+       /* Initialize the unsolicited frame queue for use */
+       sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+       /* Start all of the ports on this controller */
+       for (index = 0; index < ihost->logical_port_entries; index++) {
+               struct isci_port *iport = &ihost->ports[index];
+
+               result = sci_port_start(iport);
+               if (result)
+                       return result;
+       }
+
+       sci_controller_start_next_phy(ihost);
+
+       sci_mod_timer(&ihost->timer, timeout);
+
+       sci_change_state(&ihost->sm, SCIC_STARTING);
+
+       return SCI_SUCCESS;
+}
+
+void isci_host_scan_start(struct Scsi_Host *shost)
+{
+       struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+       unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+       set_bit(IHOST_START_PENDING, &ihost->flags);
+
+       spin_lock_irq(&ihost->scic_lock);
+       sci_controller_start(ihost, tmo);
+       sci_controller_enable_interrupts(ihost);
+       spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+       isci_host_change_state(ihost, isci_stopped);
+       sci_controller_disable_interrupts(ihost);
+       clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+       wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+       /* Empty out the completion queue */
+       if (sci_controller_completion_queue_has_entries(ihost))
+               sci_controller_process_completions(ihost);
+
+       /* Clear the interrupt and enable all interrupts again */
+       writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+       /* Could we write the value of SMU_ISR_COMPLETION? */
+       writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ *    routine that calls the sci core library's completion handler. It's
+ *    scheduled as a tasklet from the interrupt service routine when interrupts
+ *    in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+static void isci_host_completion_routine(unsigned long data)
+{
+       struct isci_host *ihost = (struct isci_host *)data;
+       struct list_head    completed_request_list;
+       struct list_head    errored_request_list;
+       struct list_head    *current_position;
+       struct list_head    *next_position;
+       struct isci_request *request;
+       struct isci_request *next_request;
+       struct sas_task     *task;
+
+       INIT_LIST_HEAD(&completed_request_list);
+       INIT_LIST_HEAD(&errored_request_list);
+
+       spin_lock_irq(&ihost->scic_lock);
+
+       sci_controller_completion_handler(ihost);
+
+       /* Take the lists of completed I/Os from the host. */
+
+       list_splice_init(&ihost->requests_to_complete,
+                        &completed_request_list);
+
+       /* Take the list of errored I/Os from the host. */
+       list_splice_init(&ihost->requests_to_errorback,
+                        &errored_request_list);
+
+       spin_unlock_irq(&ihost->scic_lock);
+
+       /* Process any completions in the lists. */
+       list_for_each_safe(current_position, next_position,
+                          &completed_request_list) {
+
+               request = list_entry(current_position, struct isci_request,
+                                    completed_node);
+               task = isci_request_access_task(request);
+
+               /* Normal notification (task_done) */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Normal - request/task = %p/%p\n",
+                       __func__,
+                       request,
+                       task);
+
+               /* Return the task to libsas */
+               if (task != NULL) {
+
+                       task->lldd_task = NULL;
+                       if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+                               /* If the task is already in the abort path,
+                               * the task_done callback cannot be called.
+                               */
+                               task->task_done(task);
+                       }
+               }
+
+               spin_lock_irq(&ihost->scic_lock);
+               isci_free_tag(ihost, request->io_tag);
+               spin_unlock_irq(&ihost->scic_lock);
+       }
+       list_for_each_entry_safe(request, next_request, &errored_request_list,
+                                completed_node) {
+
+               task = isci_request_access_task(request);
+
+               /* Use sas_task_abort */
+               dev_warn(&ihost->pdev->dev,
+                        "%s: Error - request/task = %p/%p\n",
+                        __func__,
+                        request,
+                        task);
+
+               if (task != NULL) {
+
+                       /* Put the task into the abort path if it's not there
+                        * already.
+                        */
+                       if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
+                               sas_task_abort(task);
+
+               } else {
+                       /* This is a case where the request has completed with a
+                        * status such that it needed further target servicing,
+                        * but the sas_task reference has already been removed
+                        * from the request.  Since it was errored, it was not
+                        * being aborted, so there is nothing to do except free
+                        * it.
+                        */
+
+                       spin_lock_irq(&ihost->scic_lock);
+                       /* Remove the request from the remote device's list
+                       * of pending requests.
+                       */
+                       list_del_init(&request->dev_node);
+                       isci_free_tag(ihost, request->io_tag);
+                       spin_unlock_irq(&ihost->scic_lock);
+               }
+       }
+
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ *    object.This method will invoke the associated user callback upon
+ *    completion.  The completion callback is called when the following
+ *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ *    controller has been quiesced. This method will ensure that all IO
+ *    requests are quiesced, phys are stopped, and all additional operation by
+ *    the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller stop operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_mod_timer(&ihost->timer, timeout);
+       sci_change_state(&ihost->sm, SCIC_STOPPING);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ *    controller regardless of the state of said controller.  This operation is
+ *    considered destructive.  In other words, all current operations are wiped
+ *    out.  No IO completions for outstanding devices occur.  Outstanding IO
+ *    requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_RESET:
+       case SCIC_READY:
+       case SCIC_STOPPED:
+       case SCIC_FAILED:
+               /*
+                * The reset operation is not a graceful cleanup, just
+                * perform the state transition.
+                */
+               sci_change_state(&ihost->sm, SCIC_RESETTING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller reset operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+void isci_host_deinit(struct isci_host *ihost)
+{
+       int i;
+
+       isci_host_change_state(ihost, isci_stopping);
+       for (i = 0; i < SCI_MAX_PORTS; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+               struct isci_remote_device *idev, *d;
+
+               list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
+                       if (test_bit(IDEV_ALLOCATED, &idev->flags))
+                               isci_remote_device_stop(ihost, idev);
+               }
+       }
+
+       set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+       spin_lock_irq(&ihost->scic_lock);
+       sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+       spin_unlock_irq(&ihost->scic_lock);
+
+       wait_for_stop(ihost);
+       sci_controller_reset(ihost);
+
+       /* Cancel any/all outstanding port timers */
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+               del_timer_sync(&iport->timer.timer);
+       }
+
+       /* Cancel any/all outstanding phy timers */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               struct isci_phy *iphy = &ihost->phys[i];
+               del_timer_sync(&iphy->sata_timer.timer);
+       }
+
+       del_timer_sync(&ihost->port_agent.timer.timer);
+
+       del_timer_sync(&ihost->power_control.timer.timer);
+
+       del_timer_sync(&ihost->timer.timer);
+
+       del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+       struct pci_dev *pdev = isci_host->pdev;
+       int id = isci_host->id;
+
+       return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+       struct pci_dev *pdev = isci_host->pdev;
+       int id = isci_host->id;
+
+       return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+       int i;
+
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               struct sci_phy_user_params *u_phy = &u->phys[i];
+
+               u_phy->max_speed_generation = phy_gen;
+
+               /* we are not exporting these for now */
+               u_phy->align_insertion_frequency = 0x7f;
+               u_phy->in_connection_align_insertion_frequency = 0xff;
+               u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+       }
+
+       u->stp_inactivity_timeout = stp_inactive_to;
+       u->ssp_inactivity_timeout = ssp_inactive_to;
+       u->stp_max_occupancy_timeout = stp_max_occ_to;
+       u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+       u->no_outbound_task_timeout = no_outbound_task_to;
+       u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX                        256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ *    configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ *    for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ *    Queue before an interrupt is generated. If the number of entries exceed
+ *    this number, an interrupt will be generated. The valid range of the input
+ *    is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ *    input is [0, 2700000] . A setting of 0 is allowed and results in no
+ *    interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+                                        u32 coalesce_number,
+                                        u32 coalesce_timeout)
+{
+       u8 timeout_encode = 0;
+       u32 min = 0;
+       u32 max = 0;
+
+       /* Check if the input parameters fall in the range. */
+       if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+       /*
+        *  Defined encoding for interrupt coalescing timeout:
+        *              Value   Min      Max     Units
+        *              -----   ---      ---     -----
+        *              0       -        -       Disabled
+        *              1       13.3     20.0    ns
+        *              2       26.7     40.0
+        *              3       53.3     80.0
+        *              4       106.7    160.0
+        *              5       213.3    320.0
+        *              6       426.7    640.0
+        *              7       853.3    1280.0
+        *              8       1.7      2.6     us
+        *              9       3.4      5.1
+        *              10      6.8      10.2
+        *              11      13.7     20.5
+        *              12      27.3     41.0
+        *              13      54.6     81.9
+        *              14      109.2    163.8
+        *              15      218.5    327.7
+        *              16      436.9    655.4
+        *              17      873.8    1310.7
+        *              18      1.7      2.6     ms
+        *              19      3.5      5.2
+        *              20      7.0      10.5
+        *              21      14.0     21.0
+        *              22      28.0     41.9
+        *              23      55.9     83.9
+        *              24      111.8    167.8
+        *              25      223.7    335.5
+        *              26      447.4    671.1
+        *              27      894.8    1342.2
+        *              28      1.8      2.7     s
+        *              Others Undefined */
+
+       /*
+        * Use the table above to decide the encode of interrupt coalescing timeout
+        * value for register writing. */
+       if (coalesce_timeout == 0)
+               timeout_encode = 0;
+       else{
+               /* make the timeout value in unit of (10 ns). */
+               coalesce_timeout = coalesce_timeout * 100;
+               min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+               max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+               /* get the encode of timeout for register writing. */
+               for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+                     timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+                     timeout_encode++) {
+                       if (min <= coalesce_timeout &&  max > coalesce_timeout)
+                               break;
+                       else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+                                && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+                               if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+                                       break;
+                               else{
+                                       timeout_encode++;
+                                       break;
+                               }
+                       } else {
+                               max = max * 2;
+                               min = min * 2;
+                       }
+               }
+
+               if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+                       /* the value is out of range. */
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+       }
+
+       writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+              SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+              &ihost->smu_registers->interrupt_coalesce_control);
+
+
+       ihost->interrupt_coalesce_number = (u16)coalesce_number;
+       ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+       return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* set the default interrupt coalescence number and timeout value. */
+       sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* disable interrupt coalescence. */
+       sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status status;
+       enum sci_status phy_status;
+
+       status = SCI_SUCCESS;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               phy_status = sci_phy_stop(&ihost->phys[index]);
+
+               if (phy_status != SCI_SUCCESS &&
+                   phy_status != SCI_FAILURE_INVALID_STATE) {
+                       status = SCI_FAILURE;
+
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed to stop "
+                                "phy %d because of status %d.\n",
+                                __func__,
+                                ihost->phys[index].phy_index, phy_status);
+               }
+       }
+
+       return status;
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status port_status;
+       enum sci_status status = SCI_SUCCESS;
+
+       for (index = 0; index < ihost->logical_port_entries; index++) {
+               struct isci_port *iport = &ihost->ports[index];
+
+               port_status = sci_port_stop(iport);
+
+               if ((port_status != SCI_SUCCESS) &&
+                   (port_status != SCI_FAILURE_INVALID_STATE)) {
+                       status = SCI_FAILURE;
+
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed to "
+                                "stop port %d because of status %d.\n",
+                                __func__,
+                                iport->logical_port_index,
+                                port_status);
+               }
+       }
+
+       return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status status;
+       enum sci_status device_status;
+
+       status = SCI_SUCCESS;
+
+       for (index = 0; index < ihost->remote_node_entries; index++) {
+               if (ihost->device_table[index] != NULL) {
+                       /* / @todo What timeout value do we want to provide to this request? */
+                       device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+                       if ((device_status != SCI_SUCCESS) &&
+                           (device_status != SCI_FAILURE_INVALID_STATE)) {
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: Controller stop operation failed "
+                                        "to stop device 0x%p because of "
+                                        "status %d.\n",
+                                        __func__,
+                                        ihost->device_table[index], device_status);
+                       }
+               }
+       }
+
+       return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* Stop all of the components for this controller */
+       sci_controller_stop_phys(ihost);
+       sci_controller_stop_ports(ihost);
+       sci_controller_stop_devices(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+       /* Disable interrupts so we dont take any spurious interrupts */
+       sci_controller_disable_interrupts(ihost);
+
+       /* Reset the SCU */
+       writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+       /* Delay for 1ms to before clearing the CQP and UFQPR. */
+       udelay(1000);
+
+       /* The write to the CQGR clears the CQP */
+       writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+       /* The write to the UFQGP clears the UFQPR */
+       writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_controller_reset_hardware(ihost);
+       sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+       [SCIC_INITIAL] = {
+               .enter_state = sci_controller_initial_state_enter,
+       },
+       [SCIC_RESET] = {},
+       [SCIC_INITIALIZING] = {},
+       [SCIC_INITIALIZED] = {},
+       [SCIC_STARTING] = {
+               .exit_state  = sci_controller_starting_state_exit,
+       },
+       [SCIC_READY] = {
+               .enter_state = sci_controller_ready_state_enter,
+               .exit_state  = sci_controller_ready_state_exit,
+       },
+       [SCIC_RESETTING] = {
+               .enter_state = sci_controller_resetting_state_enter,
+       },
+       [SCIC_STOPPING] = {
+               .enter_state = sci_controller_stopping_state_enter,
+               .exit_state = sci_controller_stopping_state_exit,
+       },
+       [SCIC_STOPPED] = {},
+       [SCIC_FAILED] = {}
+};
+
+static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
+{
+       /* these defaults are overridden by the platform / firmware */
+       u16 index;
+
+       /* Default to APC mode. */
+       ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+       /* Default to APC mode. */
+       ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+
+       /* Default to no SSC operation. */
+       ihost->oem_parameters.controller.do_enable_ssc = false;
+
+       /* Initialize all of the port parameter information to narrow ports. */
+       for (index = 0; index < SCI_MAX_PORTS; index++) {
+               ihost->oem_parameters.ports[index].phy_mask = 0;
+       }
+
+       /* Initialize all of the phy parameter information. */
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               /* Default to 6G (i.e. Gen 3) for now. */
+               ihost->user_parameters.phys[index].max_speed_generation = 3;
+
+               /* the frequencies cannot be 0 */
+               ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
+               ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
+               ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
+
+               /*
+                * Previous Vitesse based expanders had a arbitration issue that
+                * is worked around by having the upper 32-bits of SAS address
+                * with a value greater then the Vitesse company identifier.
+                * Hence, usage of 0x5FCFFFFF. */
+               ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
+               ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
+       }
+
+       ihost->user_parameters.stp_inactivity_timeout = 5;
+       ihost->user_parameters.ssp_inactivity_timeout = 5;
+       ihost->user_parameters.stp_max_occupancy_timeout = 5;
+       ihost->user_parameters.ssp_max_occupancy_timeout = 20;
+       ihost->user_parameters.no_outbound_task_timeout = 20;
+}
+
+static void controller_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+       struct sci_base_state_machine *sm = &ihost->sm;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       if (sm->current_state_id == SCIC_STARTING)
+               sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+       else if (sm->current_state_id == SCIC_STOPPING) {
+               sci_change_state(sm, SCIC_FAILED);
+               isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+       } else  /* / @todo Now what do we want to do in this case? */
+               dev_err(&ihost->pdev->dev,
+                       "%s: Controller timer fired when controller was not "
+                       "in a state being timed.\n",
+                       __func__);
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+                                               void __iomem *scu_base,
+                                               void __iomem *smu_base)
+{
+       u8 i;
+
+       sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+       ihost->scu_registers = scu_base;
+       ihost->smu_registers = smu_base;
+
+       sci_port_configuration_agent_construct(&ihost->port_agent);
+
+       /* Construct the ports for this controller */
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               sci_port_construct(&ihost->ports[i], i, ihost);
+       sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+       /* Construct the phys for this controller */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               /* Add all the PHYs to the dummy port */
+               sci_phy_construct(&ihost->phys[i],
+                                 &ihost->ports[SCI_MAX_PORTS], i);
+       }
+
+       ihost->invalid_phy_mask = 0;
+
+       sci_init_timer(&ihost->timer, controller_timeout);
+
+       /* Initialize the User and OEM parameters to default values. */
+       sci_controller_set_default_config_parameters(ihost);
+
+       return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem)
+{
+       int i;
+
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+                       return -EINVAL;
+
+       for (i = 0; i < SCI_MAX_PHYS; i++)
+               if (oem->phys[i].sas_address.high == 0 &&
+                   oem->phys[i].sas_address.low == 0)
+                       return -EINVAL;
+
+       if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+               for (i = 0; i < SCI_MAX_PHYS; i++)
+                       if (oem->ports[i].phy_mask != 0)
+                               return -EINVAL;
+       } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+               u8 phy_mask = 0;
+
+               for (i = 0; i < SCI_MAX_PHYS; i++)
+                       phy_mask |= oem->ports[i].phy_mask;
+
+               if (phy_mask == 0)
+                       return -EINVAL;
+       } else
+               return -EINVAL;
+
+       if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+               return -EINVAL;
+
+       return 0;
+}
+
+static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
+{
+       u32 state = ihost->sm.current_state_id;
+
+       if (state == SCIC_RESET ||
+           state == SCIC_INITIALIZING ||
+           state == SCIC_INITIALIZED) {
+
+               if (sci_oem_parameters_validate(&ihost->oem_parameters))
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+static void power_control_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+       struct isci_phy *iphy;
+       unsigned long flags;
+       u8 i;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       ihost->power_control.phys_granted_power = 0;
+
+       if (ihost->power_control.phys_waiting == 0) {
+               ihost->power_control.timer_started = false;
+               goto done;
+       }
+
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+               if (ihost->power_control.phys_waiting == 0)
+                       break;
+
+               iphy = ihost->power_control.requesters[i];
+               if (iphy == NULL)
+                       continue;
+
+               if (ihost->power_control.phys_granted_power >=
+                   ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+                       break;
+
+               ihost->power_control.requesters[i] = NULL;
+               ihost->power_control.phys_waiting--;
+               ihost->power_control.phys_granted_power++;
+               sci_phy_consume_power_handler(iphy);
+       }
+
+       /*
+        * It doesn't matter if the power list is empty, we need to start the
+        * timer in case another phy becomes ready.
+        */
+       sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+       ihost->power_control.timer_started = true;
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+                                              struct isci_phy *iphy)
+{
+       BUG_ON(iphy == NULL);
+
+       if (ihost->power_control.phys_granted_power <
+           ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+               ihost->power_control.phys_granted_power++;
+               sci_phy_consume_power_handler(iphy);
+
+               /*
+                * stop and start the power_control timer. When the timer fires, the
+                * no_of_phys_granted_power will be set to 0
+                */
+               if (ihost->power_control.timer_started)
+                       sci_del_timer(&ihost->power_control.timer);
+
+               sci_mod_timer(&ihost->power_control.timer,
+                                SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+               ihost->power_control.timer_started = true;
+
+       } else {
+               /* Add the phy in the waiting list */
+               ihost->power_control.requesters[iphy->phy_index] = iphy;
+               ihost->power_control.phys_waiting++;
+       }
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+                                              struct isci_phy *iphy)
+{
+       BUG_ON(iphy == NULL);
+
+       if (ihost->power_control.requesters[iphy->phy_index])
+               ihost->power_control.phys_waiting--;
+
+       ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+/* Initialize the AFE for this phy index. We need to read the AFE setup from
+ * the OEM parameters
+ */
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+       const struct sci_oem_params *oem = &ihost->oem_parameters;
+       struct pci_dev *pdev = ihost->pdev;
+       u32 afe_status;
+       u32 phy_id;
+
+       /* Clear DFX Status registers */
+       writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       if (is_b0(pdev)) {
+               /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+                * Timer, PM Stagger Timer */
+               writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       /* Configure bias currents to normal */
+       if (is_a2(pdev))
+               writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+       else if (is_b0(pdev) || is_c0(pdev))
+               writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       /* Enable PLL */
+       if (is_b0(pdev) || is_c0(pdev))
+               writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
+       else
+               writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       /* Wait for the PLL to lock */
+       do {
+               afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       } while ((afe_status & 0x00001000) == 0);
+
+       if (is_a2(pdev)) {
+               /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
+               writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+               const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+
+               if (is_b0(pdev)) {
+                        /* Configure transmitter SSC parameters */
+                       writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else if (is_c0(pdev)) {
+                        /* Configure transmitter SSC parameters */
+                       writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * All defaults, except the Receive Word Alignament/Comma Detect
+                        * Enable....(0xe800) */
+                       writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else {
+                       /*
+                        * All defaults, except the Receive Word Alignament/Comma Detect
+                        * Enable....(0xe800) */
+                       writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               }
+
+               /*
+                * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                * & increase TX int & ext bias 20%....(0xe85c) */
+               if (is_a2(pdev))
+                       writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               else if (is_b0(pdev)) {
+                        /* Power down TX and RX (PWRDNTX and PWRDNRX) */
+                       writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                        * & increase TX int & ext bias 20%....(0xe85c) */
+                       writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               } else {
+                       writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                        * & increase TX int & ext bias 20%....(0xe85c) */
+                       writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               }
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               if (is_a2(pdev)) {
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               }
+
+               /*
+                * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
+                * RDD=0x0(RX Detect Enabled) ....(0xe800) */
+               writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               /* Leave DFE/FFE on */
+               if (is_a2(pdev))
+                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+               else if (is_b0(pdev)) {
+                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+               } else {
+                       writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+               }
+
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control0,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control1,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control2,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control3,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       /* Transfer control to the PEs */
+       writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+       sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+       memset(ihost->power_control.requesters, 0,
+              sizeof(ihost->power_control.requesters));
+
+       ihost->power_control.phys_waiting = 0;
+       ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+       struct sci_base_state_machine *sm = &ihost->sm;
+       enum sci_status result = SCI_FAILURE;
+       unsigned long i, state, val;
+
+       if (ihost->sm.current_state_id != SCIC_RESET) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller initialize operation requested "
+                        "in invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(sm, SCIC_INITIALIZING);
+
+       sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+       ihost->next_phy_to_start = 0;
+       ihost->phy_startup_timer_pending = false;
+
+       sci_controller_initialize_power_control(ihost);
+
+       /*
+        * There is nothing to do here for B0 since we do not have to
+        * program the AFE registers.
+        * / @todo The AFE settings are supposed to be correct for the B0 but
+        * /       presently they seem to be wrong. */
+       sci_controller_afe_initialization(ihost);
+
+
+       /* Take the hardware out of reset */
+       writel(0, &ihost->smu_registers->soft_reset_control);
+
+       /*
+        * / @todo Provide meaningfull error code for hardware failure
+        * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+       for (i = 100; i >= 1; i--) {
+               u32 status;
+
+               /* Loop until the hardware reports success */
+               udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+               status = readl(&ihost->smu_registers->control_status);
+
+               if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+                       break;
+       }
+       if (i == 0)
+               goto out;
+
+       /*
+        * Determine what are the actaul device capacities that the
+        * hardware will support */
+       val = readl(&ihost->smu_registers->device_context_capacity);
+
+       /* Record the smaller of the two capacity values */
+       ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+       ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+       ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+       /*
+        * Make all PEs that are unassigned match up with the
+        * logical ports
+        */
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct scu_port_task_scheduler_group_registers __iomem
+                       *ptsg = &ihost->scu_registers->peg0.ptsg;
+
+               writel(i, &ptsg->protocol_engine[i]);
+       }
+
+       /* Initialize hardware PCI Relaxed ordering in DMA engines */
+       val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+       val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+       writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+       val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+       val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+       writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+       /*
+        * Initialize the PHYs before the PORTs because the PHY registers
+        * are accessed during the port initialization.
+        */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               result = sci_phy_initialize(&ihost->phys[i],
+                                           &ihost->scu_registers->peg0.pe[i].tl,
+                                           &ihost->scu_registers->peg0.pe[i].ll);
+               if (result != SCI_SUCCESS)
+                       goto out;
+       }
+
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+
+               iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+               iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+               iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+       }
+
+       result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+       /* Advance the controller state machine */
+       if (result == SCI_SUCCESS)
+               state = SCIC_INITIALIZED;
+       else
+               state = SCIC_FAILED;
+       sci_change_state(sm, state);
+
+       return result;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+                                              struct sci_user_parameters *sci_parms)
+{
+       u32 state = ihost->sm.current_state_id;
+
+       if (state == SCIC_RESET ||
+           state == SCIC_INITIALIZING ||
+           state == SCIC_INITIALIZED) {
+               u16 index;
+
+               /*
+                * Validate the user parameters.  If they are not legal, then
+                * return a failure.
+                */
+               for (index = 0; index < SCI_MAX_PHYS; index++) {
+                       struct sci_phy_user_params *user_phy;
+
+                       user_phy = &sci_parms->phys[index];
+
+                       if (!((user_phy->max_speed_generation <=
+                                               SCIC_SDS_PARM_MAX_SPEED) &&
+                             (user_phy->max_speed_generation >
+                                               SCIC_SDS_PARM_NO_SPEED)))
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+                       if (user_phy->in_connection_align_insertion_frequency <
+                                       3)
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+                       if ((user_phy->in_connection_align_insertion_frequency <
+                                               3) ||
+                           (user_phy->align_insertion_frequency == 0) ||
+                           (user_phy->
+                               notify_enable_spin_up_insertion_frequency ==
+                                               0))
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+               }
+
+               if ((sci_parms->stp_inactivity_timeout == 0) ||
+                   (sci_parms->ssp_inactivity_timeout == 0) ||
+                   (sci_parms->stp_max_occupancy_timeout == 0) ||
+                   (sci_parms->ssp_max_occupancy_timeout == 0) ||
+                   (sci_parms->no_outbound_task_timeout == 0))
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+               memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+       struct device *dev = &ihost->pdev->dev;
+       dma_addr_t dma;
+       size_t size;
+       int err;
+
+       size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+       ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+       if (!ihost->completion_queue)
+               return -ENOMEM;
+
+       writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
+
+       size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+       ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+                                                              GFP_KERNEL);
+       if (!ihost->remote_node_context_table)
+               return -ENOMEM;
+
+       writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
+
+       size = ihost->task_context_entries * sizeof(struct scu_task_context),
+       ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+       if (!ihost->task_context_table)
+               return -ENOMEM;
+
+       ihost->task_context_dma = dma;
+       writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+
+       err = sci_unsolicited_frame_control_construct(ihost);
+       if (err)
+               return err;
+
+       /*
+        * Inform the silicon as to the location of the UF headers and
+        * address table.
+        */
+       writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+               &ihost->scu_registers->sdma.uf_header_base_address_lower);
+       writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+               &ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+       writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+               &ihost->scu_registers->sdma.uf_address_table_lower);
+       writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+               &ihost->scu_registers->sdma.uf_address_table_upper);
+
+       return 0;
+}
+
+int isci_host_init(struct isci_host *ihost)
+{
+       int err = 0, i;
+       enum sci_status status;
+       struct sci_user_parameters sci_user_params;
+       struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+
+       spin_lock_init(&ihost->state_lock);
+       spin_lock_init(&ihost->scic_lock);
+       init_waitqueue_head(&ihost->eventq);
+
+       isci_host_change_state(ihost, isci_starting);
+
+       status = sci_controller_construct(ihost, scu_base(ihost),
+                                         smu_base(ihost));
+
+       if (status != SCI_SUCCESS) {
+               dev_err(&ihost->pdev->dev,
+                       "%s: sci_controller_construct failed - status = %x\n",
+                       __func__,
+                       status);
+               return -ENODEV;
+       }
+
+       ihost->sas_ha.dev = &ihost->pdev->dev;
+       ihost->sas_ha.lldd_ha = ihost;
+
+       /*
+        * grab initial values stored in the controller object for OEM and USER
+        * parameters
+        */
+       isci_user_parameters_get(&sci_user_params);
+       status = sci_user_parameters_set(ihost, &sci_user_params);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_user_parameters_set failed\n",
+                        __func__);
+               return -ENODEV;
+       }
+
+       /* grab any OEM parameters specified in orom */
+       if (pci_info->orom) {
+               status = isci_parse_oem_parameters(&ihost->oem_parameters,
+                                                  pci_info->orom,
+                                                  ihost->id);
+               if (status != SCI_SUCCESS) {
+                       dev_warn(&ihost->pdev->dev,
+                                "parsing firmware oem parameters failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       status = sci_oem_parameters_set(ihost);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                               "%s: sci_oem_parameters_set failed\n",
+                               __func__);
+               return -ENODEV;
+       }
+
+       tasklet_init(&ihost->completion_tasklet,
+                    isci_host_completion_routine, (unsigned long)ihost);
+
+       INIT_LIST_HEAD(&ihost->requests_to_complete);
+       INIT_LIST_HEAD(&ihost->requests_to_errorback);
+
+       spin_lock_irq(&ihost->scic_lock);
+       status = sci_controller_initialize(ihost);
+       spin_unlock_irq(&ihost->scic_lock);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_controller_initialize failed -"
+                        " status = 0x%x\n",
+                        __func__, status);
+               return -ENODEV;
+       }
+
+       err = sci_controller_mem_init(ihost);
+       if (err)
+               return err;
+
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               isci_port_init(&ihost->ports[i], ihost, i);
+
+       for (i = 0; i < SCI_MAX_PHYS; i++)
+               isci_phy_init(&ihost->phys[i], ihost, i);
+
+       for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+               struct isci_remote_device *idev = &ihost->devices[i];
+
+               INIT_LIST_HEAD(&idev->reqs_in_process);
+               INIT_LIST_HEAD(&idev->node);
+       }
+
+       for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+               struct isci_request *ireq;
+               dma_addr_t dma;
+
+               ireq = dmam_alloc_coherent(&ihost->pdev->dev,
+                                          sizeof(struct isci_request), &dma,
+                                          GFP_KERNEL);
+               if (!ireq)
+                       return -ENOMEM;
+
+               ireq->tc = &ihost->task_context_table[i];
+               ireq->owning_controller = ihost;
+               spin_lock_init(&ireq->state_lock);
+               ireq->request_daddr = dma;
+               ireq->isci_host = ihost;
+               ihost->reqs[i] = ireq;
+       }
+
+       return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+                           struct isci_phy *iphy)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STARTING:
+               sci_del_timer(&ihost->phy_timer);
+               ihost->phy_startup_timer_pending = false;
+               ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+                                                 iport, iphy);
+               sci_controller_start_next_phy(ihost);
+               break;
+       case SCIC_READY:
+               ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+                                                 iport, iphy);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: SCIC Controller linkup event from phy %d in "
+                       "unexpected state %d\n", __func__, iphy->phy_index,
+                       ihost->sm.current_state_id);
+       }
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+                             struct isci_phy *iphy)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STARTING:
+       case SCIC_READY:
+               ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+                                                  iport, iphy);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: SCIC Controller linkdown event from phy %d in "
+                       "unexpected state %d\n",
+                       __func__,
+                       iphy->phy_index,
+                       ihost->sm.current_state_id);
+       }
+}
+
+static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+       u32 index;
+
+       for (index = 0; index < ihost->remote_node_entries; index++) {
+               if ((ihost->device_table[index] != NULL) &&
+                  (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+                       return true;
+       }
+
+       return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+                                         struct isci_remote_device *idev)
+{
+       if (ihost->sm.current_state_id != SCIC_STOPPING) {
+               dev_dbg(&ihost->pdev->dev,
+                       "SCIC Controller 0x%p remote device stopped event "
+                       "from device 0x%p in unexpected state %d\n",
+                       ihost, idev,
+                       ihost->sm.current_state_id);
+               return;
+       }
+
+       if (!sci_controller_has_remote_devices_stopping(ihost))
+               sci_change_state(&ihost->sm, SCIC_STOPPED);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+       dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+               __func__, ihost->id, request);
+
+       writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+       u16 task_index;
+       u16 task_sequence;
+
+       task_index = ISCI_TAG_TCI(io_tag);
+
+       if (task_index < ihost->task_context_entries) {
+               struct isci_request *ireq = ihost->reqs[task_index];
+
+               if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+                       task_sequence = ISCI_TAG_SEQ(io_tag);
+
+                       if (task_sequence == ihost->io_request_sequence[task_index])
+                               return ireq;
+               }
+       }
+
+       return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ *    context space for use. This method can fail if there are no more remote
+ *    node index available.
+ * @scic: This is the controller object which contains the set of
+ *    free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ *    id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ *    is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+                                                           struct isci_remote_device *idev,
+                                                           u16 *node_id)
+{
+       u16 node_index;
+       u32 remote_node_count = sci_remote_device_node_count(idev);
+
+       node_index = sci_remote_node_table_allocate_remote_node(
+               &ihost->available_remote_nodes, remote_node_count
+               );
+
+       if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+               ihost->device_table[node_index] = idev;
+
+               *node_id = node_index;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+                                            struct isci_remote_device *idev,
+                                            u16 node_id)
+{
+       u32 remote_node_count = sci_remote_device_node_count(idev);
+
+       if (ihost->device_table[node_id] == idev) {
+               ihost->device_table[node_id] = NULL;
+
+               sci_remote_node_table_release_remote_node_index(
+                       &ihost->available_remote_nodes, remote_node_count, node_id
+                       );
+       }
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+                                      void *frame_header,
+                                      void *frame_buffer)
+{
+       /* XXX type safety? */
+       memcpy(response_buffer, frame_header, sizeof(u32));
+
+       memcpy(response_buffer + sizeof(u32),
+              frame_buffer,
+              sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+       if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+               writel(ihost->uf_control.get,
+                       &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+       u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+       ihost->tci_pool[tail] = tci;
+       ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+       u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+       u16 tci = ihost->tci_pool[head];
+
+       ihost->tci_head = head + 1;
+       return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+       return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+       if (isci_tci_space(ihost)) {
+               u16 tci = isci_tci_alloc(ihost);
+               u8 seq = ihost->io_request_sequence[tci];
+
+               return ISCI_TAG(seq, tci);
+       }
+
+       return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+       u16 tci = ISCI_TAG_TCI(io_tag);
+       u16 seq = ISCI_TAG_SEQ(io_tag);
+
+       /* prevent tail from passing head */
+       if (isci_tci_active(ihost) == 0)
+               return SCI_FAILURE_INVALID_IO_TAG;
+
+       if (seq == ihost->io_request_sequence[tci]) {
+               ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+               isci_tci_free(ihost, tci);
+
+               return SCI_SUCCESS;
+       }
+       return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+                                       struct isci_remote_device *idev,
+                                       struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_device_start_io(ihost, idev, ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       set_bit(IREQ_ACTIVE, &ireq->flags);
+       sci_controller_post_request(ihost, ireq->post_context);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+                                                struct isci_remote_device *idev,
+                                                struct isci_request *ireq)
+{
+       /* terminate an ongoing (i.e. started) core IO request.  This does not
+        * abort the IO request at the target, but rather removes the IO
+        * request from the host controller.
+        */
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "invalid state to terminate request\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_io_request_terminate(ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       /*
+        * Utilize the original post context command and or in the POST_TC_ABORT
+        * request sub-type.
+        */
+       sci_controller_post_request(ihost,
+                                   ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ *    completion operations for an IO request.  After this method is invoked,
+ *    the user should consider the IO request as invalid until it is properly
+ *    reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ *    IO request.
+ * @idev: The handle to the remote device object for which to complete
+ *    the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+                                          struct isci_remote_device *idev,
+                                          struct isci_request *ireq)
+{
+       enum sci_status status;
+       u16 index;
+
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STOPPING:
+               /* XXX: Implement this function */
+               return SCI_FAILURE;
+       case SCIC_READY:
+               status = sci_remote_device_complete_io(ihost, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               index = ISCI_TAG_TCI(ireq->io_tag);
+               clear_bit(IREQ_ACTIVE, &ireq->flags);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       set_bit(IREQ_ACTIVE, &ireq->flags);
+       sci_controller_post_request(ihost, ireq->post_context);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ *    send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ *    management request.
+ * @remote_device: the handle to the remote device object for which to start
+ *    the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+                                              struct isci_remote_device *idev,
+                                              struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC Controller starting task from invalid "
+                        "state\n",
+                        __func__);
+               return SCI_TASK_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_device_start_task(ihost, idev, ireq);
+       switch (status) {
+       case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+               set_bit(IREQ_ACTIVE, &ireq->flags);
+
+               /*
+                * We will let framework know this task request started successfully,
+                * although core is still woring on starting the request (to post tc when
+                * RNC is resumed.)
+                */
+               return SCI_SUCCESS;
+       case SCI_SUCCESS:
+               set_bit(IREQ_ACTIVE, &ireq->flags);
+               sci_controller_post_request(ihost, ireq->post_context);
+               break;
+       default:
+               break;
+       }
+
+       return status;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644 (file)
index 0000000..062101a
--- /dev/null
@@ -0,0 +1,542 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+       /**
+        * This field is set when the power control timer is running and cleared when
+        * it is not.
+        */
+       bool timer_started;
+
+       /**
+        * Timer to control when the directed attached disks can consume power.
+        */
+       struct sci_timer timer;
+
+       /**
+        * This field is used to keep track of how many phys are put into the
+        * requesters field.
+        */
+       u8 phys_waiting;
+
+       /**
+        * This field is used to keep track of how many phys have been granted to consume power
+        */
+       u8 phys_granted_power;
+
+       /**
+        * This field is an array of phys that we are waiting on. The phys are direct
+        * mapped into requesters via struct sci_phy.phy_index
+        */
+       struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+                              struct sci_port_configuration_agent *,
+                              struct isci_port *, struct isci_phy *);
+
+struct sci_port_configuration_agent {
+       u16 phy_configured_mask;
+       u16 phy_ready_mask;
+       struct {
+               u8 min_index;
+               u8 max_index;
+       } phy_valid_port_range[SCI_MAX_PHYS];
+       bool timer_pending;
+       port_config_fn link_up_handler;
+       port_config_fn link_down_handler;
+       struct sci_timer        timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ *                   the phy index is set so further notifications are not
+ *                   made.  Once the phy reports link up and is made part of a
+ *                   port then this bit is cleared.
+
+ */
+struct isci_host {
+       struct sci_base_state_machine sm;
+       /* XXX can we time this externally */
+       struct sci_timer timer;
+       /* XXX drop reference module params directly */
+       struct sci_user_parameters user_parameters;
+       /* XXX no need to be a union */
+       struct sci_oem_params oem_parameters;
+       struct sci_port_configuration_agent port_agent;
+       struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+       struct sci_remote_node_table available_remote_nodes;
+       struct sci_power_control power_control;
+       u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+       struct scu_task_context *task_context_table;
+       dma_addr_t task_context_dma;
+       union scu_remote_node_context *remote_node_context_table;
+       u32 *completion_queue;
+       u32 completion_queue_get;
+       u32 logical_port_entries;
+       u32 remote_node_entries;
+       u32 task_context_entries;
+       struct sci_unsolicited_frame_control uf_control;
+
+       /* phy startup */
+       struct sci_timer phy_timer;
+       /* XXX kill */
+       bool phy_startup_timer_pending;
+       u32 next_phy_to_start;
+       /* XXX convert to unsigned long and use bitops */
+       u8 invalid_phy_mask;
+
+       /* TODO attempt dynamic interrupt coalescing scheme */
+       u16 interrupt_coalesce_number;
+       u32 interrupt_coalesce_timeout;
+       struct smu_registers __iomem *smu_registers;
+       struct scu_registers __iomem *scu_registers;
+
+       u16 tci_head;
+       u16 tci_tail;
+       u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+       int id; /* unique within a given pci device */
+       struct isci_phy phys[SCI_MAX_PHYS];
+       struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+       struct sas_ha_struct sas_ha;
+
+       spinlock_t state_lock;
+       struct pci_dev *pdev;
+       enum isci_status status;
+       #define IHOST_START_PENDING 0
+       #define IHOST_STOP_PENDING 1
+       unsigned long flags;
+       wait_queue_head_t eventq;
+       struct Scsi_Host *shost;
+       struct tasklet_struct completion_tasklet;
+       struct list_head requests_to_complete;
+       struct list_head requests_to_errorback;
+       spinlock_t scic_lock;
+       struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+       struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ *    for the common controller state machine.
+ */
+enum sci_controller_states {
+       /**
+        * Simply the initial state for the base controller state machine.
+        */
+       SCIC_INITIAL = 0,
+
+       /**
+        * This state indicates that the controller is reset.  The memory for
+        * the controller is in it's initial state, but the controller requires
+        * initialization.
+        * This state is entered from the INITIAL state.
+        * This state is entered from the RESETTING state.
+        */
+       SCIC_RESET,
+
+       /**
+        * This state is typically an action state that indicates the controller
+        * is in the process of initialization.  In this state no new IO operations
+        * are permitted.
+        * This state is entered from the RESET state.
+        */
+       SCIC_INITIALIZING,
+
+       /**
+        * This state indicates that the controller has been successfully
+        * initialized.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        */
+       SCIC_INITIALIZED,
+
+       /**
+        * This state indicates the the controller is in the process of becoming
+        * ready (i.e. starting).  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZED state.
+        */
+       SCIC_STARTING,
+
+       /**
+        * This state indicates the controller is now ready.  Thus, the user
+        * is able to perform IO operations on the controller.
+        * This state is entered from the STARTING state.
+        */
+       SCIC_READY,
+
+       /**
+        * This state is typically an action state that indicates the controller
+        * is in the process of resetting.  Thus, the user is unable to perform
+        * IO operations on the controller.  A reset is considered destructive in
+        * most cases.
+        * This state is entered from the READY state.
+        * This state is entered from the FAILED state.
+        * This state is entered from the STOPPED state.
+        */
+       SCIC_RESETTING,
+
+       /**
+        * This state indicates that the controller is in the process of stopping.
+        * In this state no new IO operations are permitted, but existing IO
+        * operations are allowed to complete.
+        * This state is entered from the READY state.
+        */
+       SCIC_STOPPING,
+
+       /**
+        * This state indicates that the controller has successfully been stopped.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the STOPPING state.
+        */
+       SCIC_STOPPED,
+
+       /**
+        * This state indicates that the controller could not successfully be
+        * initialized.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        * This state is entered from the STARTING state.
+        * This state is entered from the STOPPING state.
+        * This state is entered from the RESETTING state.
+        */
+       SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ *    controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ *    the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+       struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+       struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+       struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+       return pci_get_drvdata(pdev);
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+       for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+            id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+            ihost = to_pci_info(pdev)->hosts[++id])
+
+static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
+{
+       return isci_host->status;
+}
+
+static inline void isci_host_change_state(struct isci_host *isci_host,
+                                         enum isci_status status)
+{
+       unsigned long flags;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_host = %p, state = 0x%x",
+               __func__,
+               isci_host,
+               status);
+       spin_lock_irqsave(&isci_host->state_lock, flags);
+       isci_host->status = status;
+       spin_unlock_irqrestore(&isci_host->state_lock, flags);
+
+}
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+       wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+       wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+       return dev->port->ha->lldd_ha;
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+       struct domain_device *dev = idev->domain_dev;
+
+       if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+           !idev->is_direct_attached)
+               return SCU_STP_REMOTE_NODE_COUNT;
+       return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object.  This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+       ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+
+       if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
+               return NULL;
+
+       return &iphy->isci_port->isci_host->pdev->dev;
+}
+
+static inline struct device *sciport_to_dev(struct isci_port *iport)
+{
+
+       if (!iport || !iport->isci_host)
+               return NULL;
+
+       return &iport->isci_host->pdev->dev;
+}
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+       if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+               return NULL;
+
+       return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+       if (pdev->revision < 4)
+               return true;
+       return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+       if (pdev->revision == 4)
+               return true;
+       return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+       if (pdev->revision >= 5)
+               return true;
+       return false;
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+                                     u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+                                      u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+                                           void *frame_header,
+                                           void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+                                                                struct isci_remote_device *idev,
+                                                                u16 *node_id);
+void sci_controller_free_remote_node_context(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost,
+                                            u16 io_tag);
+
+void sci_controller_power_control_queue_insert(
+       struct isci_host *ihost,
+       struct isci_phy *iphy);
+
+void sci_controller_power_control_queue_remove(
+       struct isci_host *ihost,
+       struct isci_phy *iphy);
+
+void sci_controller_link_up(
+       struct isci_host *ihost,
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_controller_link_down(
+       struct isci_host *ihost,
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_controller_remote_device_stopped(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev);
+
+void sci_controller_copy_task_context(
+       struct isci_host *ihost,
+       struct isci_request *ireq);
+
+void sci_controller_register_setup(struct isci_host *ihost);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_scan_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+
+int isci_host_init(struct isci_host *);
+
+void isci_host_init_controller_names(
+       struct isci_host *isci_host,
+       unsigned int controller_idx);
+
+void isci_host_deinit(
+       struct isci_host *);
+
+void isci_host_port_link_up(
+       struct isci_host *,
+       struct isci_port *,
+       struct isci_phy *);
+int isci_host_dev_found(struct domain_device *);
+
+void isci_host_remote_device_start_complete(
+       struct isci_host *,
+       struct isci_remote_device *,
+       enum sci_status);
+
+void sci_controller_disable_interrupts(
+       struct isci_host *ihost);
+
+enum sci_status sci_controller_start_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+       struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644 (file)
index 0000000..61e0d09
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static struct scsi_transport_template *isci_transport_template;
+
+static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
+       { PCI_VDEVICE(INTEL, 0x1D61),},
+       { PCI_VDEVICE(INTEL, 0x1D63),},
+       { PCI_VDEVICE(INTEL, 0x1D65),},
+       { PCI_VDEVICE(INTEL, 0x1D67),},
+       { PCI_VDEVICE(INTEL, 0x1D69),},
+       { PCI_VDEVICE(INTEL, 0x1D6B),},
+       { PCI_VDEVICE(INTEL, 0x1D60),},
+       { PCI_VDEVICE(INTEL, 0x1D62),},
+       { PCI_VDEVICE(INTEL, 0x1D64),},
+       { PCI_VDEVICE(INTEL, 0x1D66),},
+       { PCI_VDEVICE(INTEL, 0x1D68),},
+       { PCI_VDEVICE(INTEL, 0x1D6A),},
+       {}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 20;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = 3;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup = 1;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+static struct scsi_host_template isci_sht = {
+
+       .module                         = THIS_MODULE,
+       .name                           = DRV_NAME,
+       .proc_name                      = DRV_NAME,
+       .queuecommand                   = sas_queuecommand,
+       .target_alloc                   = sas_target_alloc,
+       .slave_configure                = sas_slave_configure,
+       .slave_destroy                  = sas_slave_destroy,
+       .scan_finished                  = isci_host_scan_finished,
+       .scan_start                     = isci_host_scan_start,
+       .change_queue_depth             = sas_change_queue_depth,
+       .change_queue_type              = sas_change_queue_type,
+       .bios_param                     = sas_bios_param,
+       .can_queue                      = ISCI_CAN_QUEUE_VAL,
+       .cmd_per_lun                    = 1,
+       .this_id                        = -1,
+       .sg_tablesize                   = SG_ALL,
+       .max_sectors                    = SCSI_DEFAULT_MAX_SECTORS,
+       .use_clustering                 = ENABLE_CLUSTERING,
+       .eh_device_reset_handler        = sas_eh_device_reset_handler,
+       .eh_bus_reset_handler           = isci_bus_reset_handler,
+       .slave_alloc                    = sas_slave_alloc,
+       .target_destroy                 = sas_target_destroy,
+       .ioctl                          = sas_ioctl,
+};
+
+static struct sas_domain_function_template isci_transport_ops  = {
+
+       /* The class calls these to notify the LLDD of an event. */
+       .lldd_port_formed       = isci_port_formed,
+       .lldd_port_deformed     = isci_port_deformed,
+
+       /* The class calls these when a device is found or gone. */
+       .lldd_dev_found         = isci_remote_device_found,
+       .lldd_dev_gone          = isci_remote_device_gone,
+
+       .lldd_execute_task      = isci_task_execute_task,
+       /* Task Management Functions. Must be called from process context. */
+       .lldd_abort_task        = isci_task_abort_task,
+       .lldd_abort_task_set    = isci_task_abort_task_set,
+       .lldd_clear_aca         = isci_task_clear_aca,
+       .lldd_clear_task_set    = isci_task_clear_task_set,
+       .lldd_I_T_nexus_reset   = isci_task_I_T_nexus_reset,
+       .lldd_lu_reset          = isci_task_lu_reset,
+       .lldd_query_task        = isci_task_query_task,
+
+       /* Port and Adapter management */
+       .lldd_clear_nexus_port  = isci_task_clear_nexus_port,
+       .lldd_clear_nexus_ha    = isci_task_clear_nexus_ha,
+
+       /* Phy management */
+       .lldd_control_phy       = isci_phy_control,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D  M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ *    specific members of the sas_ha struct and calls the libsas
+ *    sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ *    libsas sas_ha struct.
+ *
+ * This method returns an error code indicating sucess or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+       int i;
+       struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+       struct asd_sas_phy **sas_phys;
+       struct asd_sas_port **sas_ports;
+
+       sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+                               SCI_MAX_PHYS * sizeof(void *),
+                               GFP_KERNEL);
+       if (!sas_phys)
+               return -ENOMEM;
+
+       sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+                                SCI_MAX_PORTS * sizeof(void *),
+                                GFP_KERNEL);
+       if (!sas_ports)
+               return -ENOMEM;
+
+       /*----------------- Libsas Initialization Stuff----------------------
+        * Set various fields in the sas_ha struct:
+        */
+
+       sas_ha->sas_ha_name = DRV_NAME;
+       sas_ha->lldd_module = THIS_MODULE;
+       sas_ha->sas_addr    = &isci_host->phys[0].sas_addr[0];
+
+       /* set the array of phy and port structs.  */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               sas_phys[i] = &isci_host->phys[i].sas_phy;
+               sas_ports[i] = &isci_host->ports[i].sas_port;
+       }
+
+       sas_ha->sas_phy  = sas_phys;
+       sas_ha->sas_port = sas_ports;
+       sas_ha->num_phys = SCI_MAX_PHYS;
+
+       sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
+       sas_ha->lldd_max_execute_num = 1;
+       sas_ha->strict_wide_ports = 1;
+
+       sas_register_ha(sas_ha);
+
+       return 0;
+}
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+       struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+       struct Scsi_Host *shost;
+
+       if (!isci_host)
+               return;
+
+       shost = isci_host->shost;
+       device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
+
+       sas_unregister_ha(&isci_host->sas_ha);
+
+       sas_remove_host(isci_host->shost);
+       scsi_remove_host(isci_host->shost);
+       scsi_host_put(isci_host->shost);
+}
+
+static int __devinit isci_pci_init(struct pci_dev *pdev)
+{
+       int err, bar_num, bar_mask = 0;
+       void __iomem * const *iomap;
+
+       err = pcim_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "failed enable PCI device %s!\n",
+                       pci_name(pdev));
+               return err;
+       }
+
+       for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+               bar_mask |= 1 << (bar_num * 2);
+
+       err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+       if (err)
+               return err;
+
+       iomap = pcim_iomap_table(pdev);
+       if (!iomap)
+               return -ENOMEM;
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err)
+                       return err;
+       }
+
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+       /* bar size alone can tell us if we are running with a dual controller
+        * part, no need to trust revision ids that might be under broken firmware
+        * control
+        */
+       resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+       resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+       if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+           smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+               return SCI_MAX_CONTROLLERS;
+       else
+               return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+       int err, i, num_msix;
+       struct isci_host *ihost;
+       struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+       /*
+        *  Determine the number of vectors associated with this
+        *  PCI function.
+        */
+       num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+       for (i = 0; i < num_msix; i++)
+               pci_info->msix_entries[i].entry = i;
+
+       err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
+       if (err)
+               goto intx;
+
+       for (i = 0; i < num_msix; i++) {
+               int id = i / SCI_NUM_MSI_X_INT;
+               struct msix_entry *msix = &pci_info->msix_entries[i];
+               irq_handler_t isr;
+
+               ihost = pci_info->hosts[id];
+               /* odd numbered vectors are error interrupts */
+               if (i & 1)
+                       isr = isci_error_isr;
+               else
+                       isr = isci_msix_isr;
+
+               err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+                                      DRV_NAME"-msix", ihost);
+               if (!err)
+                       continue;
+
+               dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+               while (i--) {
+                       id = i / SCI_NUM_MSI_X_INT;
+                       ihost = pci_info->hosts[id];
+                       msix = &pci_info->msix_entries[i];
+                       devm_free_irq(&pdev->dev, msix->vector, ihost);
+               }
+               pci_disable_msix(pdev);
+               goto intx;
+       }
+       return 0;
+
+ intx:
+       for_each_isci_host(i, ihost, pdev) {
+               err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+                                      IRQF_SHARED, DRV_NAME"-intx", ihost);
+               if (err)
+                       break;
+       }
+       return err;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+       struct isci_host *isci_host;
+       struct Scsi_Host *shost;
+       int err;
+
+       isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
+       if (!isci_host)
+               return NULL;
+
+       isci_host->pdev = pdev;
+       isci_host->id = id;
+
+       shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+       if (!shost)
+               return NULL;
+       isci_host->shost = shost;
+
+       err = isci_host_init(isci_host);
+       if (err)
+               goto err_shost;
+
+       SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
+       isci_host->sas_ha.core.shost = shost;
+       shost->transportt = isci_transport_template;
+
+       shost->max_id = ~0;
+       shost->max_lun = ~0;
+       shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+       err = scsi_add_host(shost, &pdev->dev);
+       if (err)
+               goto err_shost;
+
+       err = isci_register_sas_ha(isci_host);
+       if (err)
+               goto err_shost_remove;
+
+       err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
+       if (err)
+               goto err_unregister_ha;
+
+       return isci_host;
+
+ err_unregister_ha:
+       sas_unregister_ha(&(isci_host->sas_ha));
+ err_shost_remove:
+       scsi_remove_host(shost);
+ err_shost:
+       scsi_host_put(shost);
+
+       return NULL;
+}
+
+static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct isci_pci_info *pci_info;
+       int err, i;
+       struct isci_host *isci_host;
+       const struct firmware *fw = NULL;
+       struct isci_orom *orom = NULL;
+       char *source = "(platform)";
+
+       dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+                pdev->revision);
+
+       pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+       pci_set_drvdata(pdev, pci_info);
+
+       if (efi_enabled)
+               orom = isci_get_efi_var(pdev);
+
+       if (!orom)
+               orom = isci_request_oprom(pdev);
+
+       for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+               if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+                       dev_warn(&pdev->dev,
+                                "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+                       devm_kfree(&pdev->dev, orom);
+                       orom = NULL;
+                       break;
+               }
+       }
+
+       if (!orom) {
+               source = "(firmware)";
+               orom = isci_request_firmware(pdev, fw);
+               if (!orom) {
+                       /* TODO convert this to WARN_TAINT_ONCE once the
+                        * orom/efi parameter support is widely available
+                        */
+                       dev_warn(&pdev->dev,
+                                "Loading user firmware failed, using default "
+                                "values\n");
+                       dev_warn(&pdev->dev,
+                                "Default OEM configuration being used: 4 "
+                                "narrow ports, and default SAS Addresses\n");
+               }
+       }
+
+       if (orom)
+               dev_info(&pdev->dev,
+                        "OEM SAS parameters (version: %u.%u) loaded %s\n",
+                        (orom->hdr.version & 0xf0) >> 4,
+                        (orom->hdr.version & 0xf), source);
+
+       pci_info->orom = orom;
+
+       err = isci_pci_init(pdev);
+       if (err)
+               return err;
+
+       for (i = 0; i < num_controllers(pdev); i++) {
+               struct isci_host *h = isci_host_alloc(pdev, i);
+
+               if (!h) {
+                       err = -ENOMEM;
+                       goto err_host_alloc;
+               }
+               pci_info->hosts[i] = h;
+       }
+
+       err = isci_setup_interrupts(pdev);
+       if (err)
+               goto err_host_alloc;
+
+       for_each_isci_host(i, isci_host, pdev)
+               scsi_scan_host(isci_host->shost);
+
+       return 0;
+
+ err_host_alloc:
+       for_each_isci_host(i, isci_host, pdev)
+               isci_unregister(isci_host);
+       return err;
+}
+
+static void __devexit isci_pci_remove(struct pci_dev *pdev)
+{
+       struct isci_host *ihost;
+       int i;
+
+       for_each_isci_host(i, ihost, pdev) {
+               isci_unregister(ihost);
+               isci_host_deinit(ihost);
+               sci_controller_disable_interrupts(ihost);
+       }
+}
+
+static struct pci_driver isci_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = isci_id_table,
+       .probe          = isci_pci_probe,
+       .remove         = __devexit_p(isci_pci_remove),
+};
+
+static __init int isci_init(void)
+{
+       int err;
+
+       pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+
+       isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+       if (!isci_transport_template)
+               return -ENOMEM;
+
+       err = pci_register_driver(&isci_pci_driver);
+       if (err)
+               sas_release_transport(isci_transport_template);
+
+       return err;
+}
+
+static __exit void isci_exit(void)
+{
+       pci_unregister_driver(&isci_pci_driver);
+       sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644 (file)
index 0000000..d1de633
--- /dev/null
@@ -0,0 +1,538 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR       0
+#define SCI_SMU_BAR_SIZE  (16*1024)
+#define SCI_SCU_BAR       1
+#define SCI_SCU_BAR_SIZE  (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS  (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS  (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES  (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS  SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS    (384)
+#define SCU_MAX_EVENTS_SHIFT             (7)
+#define SCU_MAX_EVENTS                    (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES        (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH  (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES  (SCU_MAX_CRITICAL_NOTIFICATIONS \
+                                          + SCU_MAX_EVENTS \
+                                          + SCU_MAX_UNSOLICITED_FRAMES \
+                                          + SCI_MAX_IO_REQUESTS \
+                                          + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT   (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE   (1024)
+#define SCU_INVALID_FRAME_INDEX             (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE         (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH  (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+       BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+       BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ *    non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+       /**
+        * This member indicates successful completion.
+        */
+       SCI_SUCCESS = 0,
+
+       /**
+        * This value indicates that the calling method completed successfully,
+        * but that the IO may have completed before having it's start method
+        * invoked.  This occurs during SAT translation for requests that do
+        * not require an IO to the target or for any other requests that may
+        * be completed without having to submit IO.
+        */
+       SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+       /**
+        *  This Value indicates that the SCU hardware returned an early response
+        *  because the io request specified more data than is returned by the
+        *  target device (mode pages, inquiry data, etc.). The completion routine
+        *  will handle this case to get the actual number of bytes transferred.
+        */
+       SCI_SUCCESS_IO_DONE_EARLY,
+
+       /**
+        * This member indicates that the object for which a state change is
+        * being requested is already in said state.
+        */
+       SCI_WARNING_ALREADY_IN_STATE,
+
+       /**
+        * This member indicates interrupt coalescence timer may cause SAS
+        * specification compliance issues (i.e. SMP target mode response
+        * frames must be returned within 1.9 milliseconds).
+        */
+       SCI_WARNING_TIMER_CONFLICT,
+
+       /**
+        * This field indicates a sequence of action is not completed yet. Mostly,
+        * this status is used when multiple ATA commands are needed in a SATI translation.
+        */
+       SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+       /**
+        * This member indicates that there was a general failure.
+        */
+       SCI_FAILURE,
+
+       /**
+        * This member indicates that the SCI implementation is unable to complete
+        * an operation due to a critical flaw the prevents any further operation
+        * (i.e. an invalid pointer).
+        */
+       SCI_FATAL_ERROR,
+
+       /**
+        * This member indicates the calling function failed, because the state
+        * of the controller is in a state that prevents successful completion.
+        */
+       SCI_FAILURE_INVALID_STATE,
+
+       /**
+        * This member indicates the calling function failed, because there is
+        * insufficient resources/memory to complete the request.
+        */
+       SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * controller object required for the operation can't be located.
+        */
+       SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * discovered controller type is not supported by the library.
+        */
+       SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested initialization data version isn't supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested configuration of SAS Phys into SAS Ports is not supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested protocol is not supported by the remote device, port,
+        * or controller.
+        */
+       SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested information type is not supported by the SCI implementation.
+        */
+       SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * device already exists.
+        */
+       SCI_FAILURE_DEVICE_EXISTS,
+
+       /**
+        * This member indicates the calling function failed, because adding
+        * a phy to the object is not possible.
+        */
+       SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested information type is not supported by the SCI implementation.
+        */
+       SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+       /**
+        * This member indicates the calling function failed, because the SCI
+        * implementation does not support the supplied time limit.
+        */
+       SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified Phy.
+        */
+       SCI_FAILURE_INVALID_PHY,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified Port.
+        */
+       SCI_FAILURE_INVALID_PORT,
+
+       /**
+        * This member indicates the calling method was partly successful
+        * The port was reset but not all phys in port are operational
+        */
+       SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+       /**
+        * This member indicates that calling method failed
+        * The port reset did not complete because none of the phys are operational
+        */
+       SCI_FAILURE_RESET_PORT_FAILURE,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified remote device.
+        */
+       SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+       /**
+        * This member indicates the calling method failed, because the remote
+        * device is in a bad state and requires a reset.
+        */
+       SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain or support the specified IO tag.
+        */
+       SCI_FAILURE_INVALID_IO_TAG,
+
+       /**
+        * This member indicates that the operation failed and the user should
+        * check the response data associated with the IO.
+        */
+       SCI_FAILURE_IO_RESPONSE_VALID,
+
+       /**
+        * This member indicates that the operation failed, the failure is
+        * controller implementation specific, and the response data associated
+        * with the request is not valid.  You can query for the controller
+        * specific error information via sci_controller_get_request_status()
+        */
+       SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+       /**
+        * This member indicated that the operation failed because the
+        * user requested this IO to be terminated.
+        */
+       SCI_FAILURE_IO_TERMINATED,
+
+       /**
+        * This member indicates that the operation failed and the associated
+        * request requires a SCSI abort task to be sent to the target.
+        */
+       SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+       /**
+        * This member indicates that the operation failed because the supplied
+        * device could not be located.
+        */
+       SCI_FAILURE_DEVICE_NOT_FOUND,
+
+       /**
+        * This member indicates that the operation failed because the
+        * objects association is required and is not correctly set.
+        */
+       SCI_FAILURE_INVALID_ASSOCIATION,
+
+       /**
+        * This member indicates that the operation failed, because a timeout
+        * occurred.
+        */
+       SCI_FAILURE_TIMEOUT,
+
+       /**
+        * This member indicates that the operation failed, because the user
+        * specified a value that is either invalid or not supported.
+        */
+       SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+       /**
+        * This value indicates that the operation failed, because the number
+        * of messages (MSI-X) is not supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+       /**
+        * This value indicates that the method failed due to a lack of
+        * available NCQ tags.
+        */
+       SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+       /**
+        * This value indicates that a protocol violation has occurred on the
+        * link.
+        */
+       SCI_FAILURE_PROTOCOL_VIOLATION,
+
+       /**
+        * This value indicates a failure condition that retry may help to clear.
+        */
+       SCI_FAILURE_RETRY_REQUIRED,
+
+       /**
+        * This field indicates the retry limit was reached when a retry is attempted
+        */
+       SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+       /**
+        * This member indicates the calling method was partly successful.
+        * Mostly, this status is used when a LUN_RESET issued to an expander attached
+        * STP device in READY NCQ substate needs to have RNC suspended/resumed
+        * before posting TC.
+        */
+       SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+       /**
+        * This field indicates an illegal phy connection based on the routing attribute
+        * of both expander phy attached to each other.
+        */
+       SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+       /**
+        * This field indicates a CONFIG ROUTE INFO command has a response with function result
+        * INDEX DOES NOT EXIST, usually means exceeding max route index.
+        */
+       SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+       /**
+        * This value indicates that an unsupported PCI device ID has been
+        * specified.  This indicates that attempts to invoke
+        * sci_library_allocate_controller() will fail.
+        */
+       SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+       SCI_IO_SUCCESS                         = SCI_SUCCESS,
+       SCI_IO_FAILURE                         = SCI_FAILURE,
+       SCI_IO_SUCCESS_COMPLETE_BEFORE_START   = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+       SCI_IO_SUCCESS_IO_DONE_EARLY           = SCI_SUCCESS_IO_DONE_EARLY,
+       SCI_IO_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+       SCI_IO_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+       SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+       SCI_IO_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+       SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+       SCI_IO_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+       SCI_IO_FAILURE_REQUIRES_SCSI_ABORT     = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+       SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+       SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE    = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+       SCI_IO_FAILURE_PROTOCOL_VIOLATION      = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+       SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+       SCI_IO_FAILURE_RETRY_REQUIRED      = SCI_FAILURE_RETRY_REQUIRED,
+       SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+       SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+       SCI_TASK_SUCCESS                         = SCI_SUCCESS,
+       SCI_TASK_FAILURE                         = SCI_FAILURE,
+       SCI_TASK_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+       SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+       SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+       SCI_TASK_FAILURE_INVALID_TAG             = SCI_FAILURE_INVALID_IO_TAG,
+       SCI_TASK_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+       SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+       SCI_TASK_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+       SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+       SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+       SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order.  Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+       u32 *dest = _dest, *src = _src;
+
+       while (--word_cnt >= 0)
+               dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+       struct timer_list       timer;
+       bool                    cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+       tmr->timer.function = fn;
+       tmr->timer.data = (unsigned long) tmr;
+       tmr->cancel = 0;
+       init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+       tmr->cancel = 0;
+       mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+       tmr->cancel = 1;
+       del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+       const struct sci_base_state *state_table;
+       u32 initial_state_id;
+       u32 current_state_id;
+       u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+       sci_state_transition_t enter_state;     /* Called on state entry */
+       sci_state_transition_t exit_state;      /* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+                       const struct sci_base_state *state_table,
+                       u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif  /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644 (file)
index 0000000..79313a7
--- /dev/null
@@ -0,0 +1,1312 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME  (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+       return iphy->max_negotiated_speed;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+                                      struct scu_transport_layer_registers __iomem *reg)
+{
+       u32 tl_control;
+
+       iphy->transport_layer_registers = reg;
+
+       writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+               &iphy->transport_layer_registers->stp_rni);
+
+       /*
+        * Hardware team recommends that we enable the STP prefetch for all
+        * transports
+        */
+       tl_control = readl(&iphy->transport_layer_registers->control);
+       tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+       writel(tl_control, &iphy->transport_layer_registers->control);
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+                                 struct scu_link_layer_registers __iomem *reg)
+{
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       int phy_idx = iphy->phy_index;
+       struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
+       struct sci_phy_oem_params *phy_oem =
+               &ihost->oem_parameters.phys[phy_idx];
+       u32 phy_configuration;
+       struct sci_phy_cap phy_cap;
+       u32 parity_check = 0;
+       u32 parity_count = 0;
+       u32 llctl, link_rate;
+       u32 clksm_value = 0;
+
+       iphy->link_layer_registers = reg;
+
+       /* Set our IDENTIFY frame data */
+       #define SCI_END_DEVICE 0x01
+
+       writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+              SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+              &iphy->link_layer_registers->transmit_identification);
+
+       /* Write the device SAS Address */
+       writel(0xFEDCBA98,
+              &iphy->link_layer_registers->sas_device_name_high);
+       writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+
+       /* Write the source SAS Address */
+       writel(phy_oem->sas_address.high,
+               &iphy->link_layer_registers->source_sas_address_high);
+       writel(phy_oem->sas_address.low,
+               &iphy->link_layer_registers->source_sas_address_low);
+
+       /* Clear and Set the PHY Identifier */
+       writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
+       writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
+               &iphy->link_layer_registers->identify_frame_phy_id);
+
+       /* Change the initial state of the phy configuration register */
+       phy_configuration =
+               readl(&iphy->link_layer_registers->phy_configuration);
+
+       /* Hold OOB state machine in reset */
+       phy_configuration |=  SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+       writel(phy_configuration,
+               &iphy->link_layer_registers->phy_configuration);
+
+       /* Configure the SNW capabilities */
+       phy_cap.all = 0;
+       phy_cap.start = 1;
+       phy_cap.gen3_no_ssc = 1;
+       phy_cap.gen2_no_ssc = 1;
+       phy_cap.gen1_no_ssc = 1;
+       if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+               phy_cap.gen3_ssc = 1;
+               phy_cap.gen2_ssc = 1;
+               phy_cap.gen1_ssc = 1;
+       }
+
+       /*
+        * The SAS specification indicates that the phy_capabilities that
+        * are transmitted shall have an even parity.  Calculate the parity. */
+       parity_check = phy_cap.all;
+       while (parity_check != 0) {
+               if (parity_check & 0x1)
+                       parity_count++;
+               parity_check >>= 1;
+       }
+
+       /*
+        * If parity indicates there are an odd number of bits set, then
+        * set the parity bit to 1 in the phy capabilities. */
+       if ((parity_count % 2) != 0)
+               phy_cap.parity = 1;
+
+       writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+
+       /* Set the enable spinup period but disable the ability to send
+        * notify enable spinup
+        */
+       writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+                       phy_user->notify_enable_spin_up_insertion_frequency),
+               &iphy->link_layer_registers->notify_enable_spinup_control);
+
+       /* Write the ALIGN Insertion Ferequency for connected phy and
+        * inpendent of connected state
+        */
+       clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+                       phy_user->in_connection_align_insertion_frequency);
+
+       clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+                       phy_user->align_insertion_frequency);
+
+       writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+
+       /* @todo Provide a way to write this register correctly */
+       writel(0x02108421,
+               &iphy->link_layer_registers->afe_lookup_table_control);
+
+       llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+               (u8)ihost->user_parameters.no_outbound_task_timeout);
+
+       switch (phy_user->max_speed_generation) {
+       case SCIC_SDS_PARM_GEN3_SPEED:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+               break;
+       case SCIC_SDS_PARM_GEN2_SPEED:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+               break;
+       default:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+               break;
+       }
+       llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+       writel(llctl, &iphy->link_layer_registers->link_layer_control);
+
+       if (is_a2(ihost->pdev)) {
+               /* Program the max ARB time for the PHY to 700us so we inter-operate with
+                * the PMC expander which shuts down PHYs if the expander PHY generates too
+                * many breaks.  This time value will guarantee that the initiator PHY will
+                * generate the break.
+                */
+               writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+                       &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+       }
+
+       /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
+       writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+
+       /* We can exit the initial state to the stopped state */
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+       return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       dev_dbg(sciphy_to_dev(iphy),
+                "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+                "timeout.\n",
+                __func__,
+                iphy);
+
+       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ *    currently contained by the dummy port, then the phy is considered to not
+ *    be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ *    containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+       struct isci_port *iport = iphy->owning_port;
+
+       if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+               return NULL;
+
+       return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ *    object.
+ *
+ *
+ */
+void sci_phy_set_port(
+       struct isci_phy *iphy,
+       struct isci_port *iport)
+{
+       iphy->owning_port = iport;
+
+       if (iphy->bcn_received_while_port_unassigned) {
+               iphy->bcn_received_while_port_unassigned = false;
+               sci_port_broadcast_change_received(iphy->owning_port, iphy);
+       }
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+                                  struct scu_transport_layer_registers __iomem *tl,
+                                  struct scu_link_layer_registers __iomem *ll)
+{
+       /* Perfrom the initialization of the TL hardware */
+       sci_phy_transport_layer_initialization(iphy, tl);
+
+       /* Perofrm the initialization of the PE hardware */
+       sci_phy_link_layer_initialization(iphy, ll);
+
+       /* There is nothing that needs to be done in this state just
+        * transition to the stopped state
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ *       be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ *       This will either be the RNi for the device or an invalid RNi if there
+ *       is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+       u32 tl_control;
+
+       writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+       /*
+        * The read should guarantee that the first write gets posted
+        * before the next write
+        */
+       tl_control = readl(&iphy->transport_layer_registers->control);
+       tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+       writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+       writel(scu_sas_pcfg_value,
+               &iphy->link_layer_registers->phy_configuration);
+
+       sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+       writel(scu_sas_pcfg_value,
+               &iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+       sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+       sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+       struct sas_identify_frame *iaf;
+
+       iaf = &iphy->frame_rcvd.iaf;
+       memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+       proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       if (state != SCI_PHY_STOPPED) {
+               dev_dbg(sciphy_to_dev(iphy),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_INITIAL:
+       case SCI_PHY_SUB_AWAIT_OSSP_EN:
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+       case SCI_PHY_SUB_FINAL:
+       case SCI_PHY_READY:
+               break;
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       if (state != SCI_PHY_READY) {
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+               u32 enable_spinup;
+
+               enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+               enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+               writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+               /* Change state to the final state this substate machine has run to completion */
+               sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+               return SCI_SUCCESS;
+       }
+       case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+               u32 scu_sas_pcfg_value;
+
+               /* Release the spinup hold state and reset the OOB state machine */
+               scu_sas_pcfg_value =
+                       readl(&iphy->link_layer_registers->phy_configuration);
+               scu_sas_pcfg_value &=
+                       ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+               scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+               writel(scu_sas_pcfg_value,
+                       &iphy->link_layer_registers->phy_configuration);
+
+               /* Now restart the OOB operation */
+               scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+               scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+               writel(scu_sas_pcfg_value,
+                       &iphy->link_layer_registers->phy_configuration);
+
+               /* Change state to the final state this substate machine has run to completion */
+               sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+               return SCI_SUCCESS;
+       }
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+       /* continue the link training for the phy as if it were a SAS PHY
+        * instead of a SATA PHY. This is done because the completion queue had a SAS
+        * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+        */
+       u32 phy_control;
+
+       phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+       phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+       writel(phy_control,
+              &iphy->link_layer_registers->phy_configuration);
+
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+       /* This method continues the link training for the phy as if it were a SATA PHY
+        * instead of a SAS PHY.  This is done because the completion queue had a SATA
+        * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ *    all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ *    has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ *    associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ *    sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+                                          enum sas_linkrate max_link_rate,
+                                          u32 next_state)
+{
+       iphy->max_negotiated_speed = max_link_rate;
+
+       sci_change_state(&iphy->sm, next_state);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_OSSP_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       sci_phy_start_sas_link_training(iphy);
+                       iphy->is_in_link_training = true;
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       sci_phy_start_sata_link_training(iphy);
+                       iphy->is_in_link_training = true;
+                       break;
+               default:
+                       dev_dbg(sciphy_to_dev(iphy),
+                               "%s: PHY starting substate machine received "
+                               "unexpected event_code %x\n",
+                               __func__,
+                               event_code);
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /*
+                        * Why is this being reported again by the controller?
+                        * We would re-enter this state so just stay here */
+                       break;
+               case SCU_EVENT_SAS_15:
+               case SCU_EVENT_SAS_15_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_IAF_UF);
+                       break;
+               case SCU_EVENT_SAS_30:
+               case SCU_