#include "libata.h"
-static unsigned int ata_busy_sleep (struct ata_port *ap,
- unsigned long tmout_pat,
- unsigned long tmout);
static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
static void ata_set_mode(struct ata_port *ap);
static int ata_choose_xfer_mode(const struct ata_port *ap,
u8 *xfer_mode_out,
unsigned int *xfer_shift_out);
-static void __ata_qc_complete(struct ata_queued_cmd *qc);
+static void ata_pio_error(struct ata_port *ap);
static unsigned int ata_unique_id = 1;
static struct workqueue_struct *ata_wq;
} else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
/* Unable to use DMA due to host limitation */
tf->protocol = ATA_PROT_PIO;
- index = dev->multi_count ? 0 : 4;
+ index = dev->multi_count ? 0 : 8;
} else {
tf->protocol = ATA_PROT_DMA;
index = 16;
* ata_dev_try_classify - Parse returned ATA device signature
* @ap: ATA channel to examine
* @device: Device to examine (starting at zero)
+ * @r_err: Value of error register on completion
*
* After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
* an ATA/ATAPI-defined set of values is placed in the ATA
*
* LOCKING:
* caller.
+ *
+ * RETURNS:
+ * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
*/
-static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
+static unsigned int
+ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
{
- struct ata_device *dev = &ap->device[device];
struct ata_taskfile tf;
unsigned int class;
u8 err;
ap->ops->tf_read(ap, &tf);
err = tf.feature;
-
- dev->class = ATA_DEV_NONE;
+ if (r_err)
+ *r_err = err;
/* see if device passed diags */
if (err == 1)
else if ((device == 0) && (err == 0x81))
/* do nothing */ ;
else
- return err;
+ return ATA_DEV_NONE;
- /* determine if device if ATA or ATAPI */
+ /* determine if device is ATA or ATAPI */
class = ata_dev_classify(&tf);
+
if (class == ATA_DEV_UNKNOWN)
- return err;
+ return ATA_DEV_NONE;
if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
- return err;
-
- dev->class = class;
-
- return err;
+ return ATA_DEV_NONE;
+ return class;
}
/**
timing API will get this right anyway */
}
-struct ata_exec_internal_arg {
- unsigned int err_mask;
- struct ata_taskfile *tf;
- struct completion *waiting;
-};
+static inline void
+ata_queue_pio_task(struct ata_port *ap)
+{
+ if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
+ queue_work(ata_wq, &ap->pio_task);
+}
-int ata_qc_complete_internal(struct ata_queued_cmd *qc)
+static inline void
+ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
{
- struct ata_exec_internal_arg *arg = qc->private_data;
- struct completion *waiting = arg->waiting;
+ if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
+ queue_delayed_work(ata_wq, &ap->pio_task, delay);
+}
- if (!(qc->err_mask & ~AC_ERR_DEV))
- qc->ap->ops->tf_read(qc->ap, arg->tf);
- arg->err_mask = qc->err_mask;
- arg->waiting = NULL;
- complete(waiting);
+/**
+ * ata_flush_pio_tasks - Flush pio_task and packet_task
+ * @ap: the target ata_port
+ *
+ * After this function completes, pio_task and packet_task are
+ * guranteed not to be running or scheduled.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
- return 0;
+static void ata_flush_pio_tasks(struct ata_port *ap)
+{
+ int tmp = 0;
+ unsigned long flags;
+
+ DPRINTK("ENTER\n");
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->flags |= ATA_FLAG_FLUSH_PIO_TASK;
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ DPRINTK("flush #1\n");
+ flush_workqueue(ata_wq);
+
+ /*
+ * At this point, if a task is running, it's guaranteed to see
+ * the FLUSH flag; thus, it will never queue pio tasks again.
+ * Cancel and flush.
+ */
+ tmp |= cancel_delayed_work(&ap->pio_task);
+ tmp |= cancel_delayed_work(&ap->packet_task);
+ if (!tmp) {
+ DPRINTK("flush #2\n");
+ flush_workqueue(ata_wq);
+ }
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK;
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ DPRINTK("EXIT\n");
+}
+
+void ata_qc_complete_internal(struct ata_queued_cmd *qc)
+{
+ struct completion *waiting = qc->private_data;
+
+ qc->ap->ops->tf_read(qc->ap, &qc->tf);
+ complete(waiting);
}
/**
struct ata_queued_cmd *qc;
DECLARE_COMPLETION(wait);
unsigned long flags;
- struct ata_exec_internal_arg arg;
+ unsigned int err_mask;
spin_lock_irqsave(&ap->host_set->lock, flags);
qc->nsect = buflen / ATA_SECT_SIZE;
}
- arg.waiting = &wait;
- arg.tf = tf;
- qc->private_data = &arg;
+ qc->private_data = &wait;
qc->complete_fn = ata_qc_complete_internal;
- if (ata_qc_issue(qc))
- goto issue_fail;
+ qc->err_mask = ata_qc_issue(qc);
+ if (qc->err_mask)
+ ata_qc_complete(qc);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
* before the caller cleans up, it will result in a
* spurious interrupt. We can live with that.
*/
- if (arg.waiting) {
- qc->err_mask = AC_ERR_OTHER;
+ if (qc->flags & ATA_QCFLAG_ACTIVE) {
+ qc->err_mask = AC_ERR_TIMEOUT;
ata_qc_complete(qc);
printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
ap->id, command);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
}
- return arg.err_mask;
+ *tf = qc->tf;
+ err_mask = qc->err_mask;
- issue_fail:
ata_qc_free(qc);
- spin_unlock_irqrestore(&ap->host_set->lock, flags);
- return AC_ERR_OTHER;
+
+ return err_mask;
}
/**
}
+ if (dev->id[59] & 0x100) {
+ dev->multi_count = dev->id[59] & 0xff;
+ DPRINTK("ata%u: dev %u multi count %u\n",
+ ap->id, device, dev->multi_count);
+ }
+
ap->host->max_cmd_len = 16;
}
ap->cdb_len = (unsigned int) rc;
ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
+ if (ata_id_cdb_intr(dev->id))
+ dev->flags |= ATA_DFLAG_CDB_INTR;
+
/* print device info to dmesg */
printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
ap->id, device,
}
/**
- * ata_dev_config - Run device specific handlers and check for
- * SATA->PATA bridges
- * @ap: Bus
- * @i: Device
+ * ata_dev_config - Run device specific handlers & check for SATA->PATA bridges
+ * @ap: Bus
+ * @i: Device
*
- * LOCKING:
+ * LOCKING:
*/
void ata_dev_config(struct ata_port *ap, unsigned int i)
{
unsigned int i, found = 0;
- ap->ops->phy_reset(ap);
+ if (ap->ops->probe_reset) {
+ unsigned int classes[ATA_MAX_DEVICES];
+ int rc;
+
+ ata_port_probe(ap);
+
+ rc = ap->ops->probe_reset(ap, classes);
+ if (rc == 0) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ap->device[i].class = classes[i];
+ } else {
+ printk(KERN_ERR "ata%u: probe reset failed, "
+ "disabling port\n", ap->id);
+ ata_port_disable(ap);
+ }
+ } else
+ ap->ops->phy_reset(ap);
+
if (ap->flags & ATA_FLAG_PORT_DISABLED)
goto err_out;
ata_timing_quantize(t, t, T, UT);
/*
- * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
- * and some other commands. We have to ensure that the DMA cycle timing is
- * slower/equal than the fastest PIO timing.
+ * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
+ * S.M.A.R.T * and some other commands. We have to ensure that the
+ * DMA cycle timing is slower/equal than the fastest PIO timing.
*/
if (speed > XFER_PIO_4) {
}
/*
- * Lenghten active & recovery time so that cycle time is correct.
+ * Lengthen active & recovery time so that cycle time is correct.
*/
if (t->act8b + t->rec8b < t->cyc8b) {
*
* LOCKING:
* PCI/etc. bus probe sem.
- *
*/
static void ata_set_mode(struct ata_port *ap)
{
* or a timeout occurs.
*
* LOCKING: None.
- *
*/
-static unsigned int ata_busy_sleep (struct ata_port *ap,
- unsigned long tmout_pat,
- unsigned long tmout)
+unsigned int ata_busy_sleep (struct ata_port *ap,
+ unsigned long tmout_pat, unsigned long tmout)
{
unsigned long timer_start, timeout;
u8 status;
/*
* determine by signature whether we have ATA or ATAPI devices
*/
- err = ata_dev_try_classify(ap, 0);
+ ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
if ((slave_possible) && (err != 0x81))
- ata_dev_try_classify(ap, 1);
+ ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
/* re-enable interrupts */
if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
DPRINTK("EXIT\n");
}
+static int sata_phy_resume(struct ata_port *ap)
+{
+ unsigned long timeout = jiffies + (HZ * 5);
+ u32 sstatus;
+
+ scr_write_flush(ap, SCR_CONTROL, 0x300);
+
+ /* Wait for phy to become ready, if necessary. */
+ do {
+ msleep(200);
+ sstatus = scr_read(ap, SCR_STATUS);
+ if ((sstatus & 0xf) != 1)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -1;
+}
+
+/**
+ * ata_std_probeinit - initialize probing
+ * @ap: port to be probed
+ *
+ * @ap is about to be probed. Initialize it. This function is
+ * to be used as standard callback for ata_drive_probe_reset().
+ */
+extern void ata_std_probeinit(struct ata_port *ap)
+{
+ if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
+ sata_phy_resume(ap);
+}
+
+/**
+ * ata_std_softreset - reset host port via ATA SRST
+ * @ap: port to reset
+ * @verbose: fail verbosely
+ * @classes: resulting classes of attached devices
+ *
+ * Reset host port using ATA SRST. This function is to be used
+ * as standard callback for ata_drive_*_reset() functions.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
+{
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ unsigned int devmask = 0, err_mask;
+ u8 err;
+
+ DPRINTK("ENTER\n");
+
+ /* determine if device 0/1 are present */
+ if (ata_devchk(ap, 0))
+ devmask |= (1 << 0);
+ if (slave_possible && ata_devchk(ap, 1))
+ devmask |= (1 << 1);
+
+ /* devchk reports device presence without actual device on
+ * most SATA controllers. Check SStatus and turn devmask off
+ * if link is offline. Note that we should continue resetting
+ * even when it seems like there's no device.
+ */
+ if (ap->ops->scr_read && !sata_dev_present(ap))
+ devmask = 0;
+
+ /* select device 0 again */
+ ap->ops->dev_select(ap, 0);
+
+ /* issue bus reset */
+ DPRINTK("about to softreset, devmask=%x\n", devmask);
+ err_mask = ata_bus_softreset(ap, devmask);
+ if (err_mask) {
+ if (verbose)
+ printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
+ ap->id, err_mask);
+ else
+ DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
+ err_mask);
+ return -EIO;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_dev_try_classify(ap, 0, &err);
+ if (slave_possible && err != 0x81)
+ classes[1] = ata_dev_try_classify(ap, 1, &err);
+
+ DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+ return 0;
+}
+
+/**
+ * sata_std_hardreset - reset host port via SATA phy reset
+ * @ap: port to reset
+ * @verbose: fail verbosely
+ * @class: resulting class of attached device
+ *
+ * SATA phy-reset host port using DET bits of SControl register.
+ * This function is to be used as standard callback for
+ * ata_drive_*_reset().
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
+{
+ u32 serror;
+
+ DPRINTK("ENTER\n");
+
+ /* Issue phy wake/reset */
+ scr_write_flush(ap, SCR_CONTROL, 0x301);
+
+ /*
+ * Couldn't find anything in SATA I/II specs, but AHCI-1.1
+ * 10.4.2 says at least 1 ms.
+ */
+ msleep(1);
+
+ /* Bring phy back */
+ sata_phy_resume(ap);
+
+ /* Clear SError */
+ serror = scr_read(ap, SCR_ERROR);
+ scr_write(ap, SCR_ERROR, serror);
+
+ /* TODO: phy layer with polling, timeouts, etc. */
+ if (!sata_dev_present(ap)) {
+ *class = ATA_DEV_NONE;
+ DPRINTK("EXIT, link offline\n");
+ return 0;
+ }
+
+ if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
+ if (verbose)
+ printk(KERN_ERR "ata%u: COMRESET failed "
+ "(device not ready)\n", ap->id);
+ else
+ DPRINTK("EXIT, device not ready\n");
+ return -EIO;
+ }
+
+ *class = ata_dev_try_classify(ap, 0, NULL);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+}
+
+/**
+ * ata_std_postreset - standard postreset callback
+ * @ap: the target ata_port
+ * @classes: classes of attached devices
+ *
+ * This function is invoked after a successful reset. Note that
+ * the device might have been reset more than once using
+ * different reset methods before postreset is invoked.
+ * postreset is also reponsible for setting cable type.
+ *
+ * This function is to be used as standard callback for
+ * ata_drive_*_reset().
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
+{
+ DPRINTK("ENTER\n");
+
+ /* set cable type */
+ if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
+ ap->cbl = ATA_CBL_SATA;
+
+ /* print link status */
+ if (ap->cbl == ATA_CBL_SATA)
+ sata_print_link_status(ap);
+
+ /* bail out if no device is present */
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+ DPRINTK("EXIT, no device\n");
+ return;
+ }
+
+ /* is double-select really necessary? */
+ if (classes[0] != ATA_DEV_NONE)
+ ap->ops->dev_select(ap, 1);
+ if (classes[1] != ATA_DEV_NONE)
+ ap->ops->dev_select(ap, 0);
+
+ /* re-enable interrupts & set up device control */
+ if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
+ ata_irq_on(ap);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_std_probe_reset - standard probe reset method
+ * @ap: prot to perform probe-reset
+ * @classes: resulting classes of attached devices
+ *
+ * The stock off-the-shelf ->probe_reset method.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
+{
+ ata_reset_fn_t hardreset;
+
+ hardreset = NULL;
+ if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
+ hardreset = sata_std_hardreset;
+
+ return ata_drive_probe_reset(ap, ata_std_probeinit,
+ ata_std_softreset, hardreset,
+ ata_std_postreset, classes);
+}
+
+static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
+ ata_postreset_fn_t postreset,
+ unsigned int *classes)
+{
+ int i, rc;
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ classes[i] = ATA_DEV_UNKNOWN;
+
+ rc = reset(ap, 0, classes);
+ if (rc)
+ return rc;
+
+ /* If any class isn't ATA_DEV_UNKNOWN, consider classification
+ * is complete and convert all ATA_DEV_UNKNOWN to
+ * ATA_DEV_NONE.
+ */
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ if (classes[i] != ATA_DEV_UNKNOWN)
+ break;
+
+ if (i < ATA_MAX_DEVICES)
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ if (classes[i] == ATA_DEV_UNKNOWN)
+ classes[i] = ATA_DEV_NONE;
+
+ if (postreset)
+ postreset(ap, classes);
+
+ return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
+}
+
+/**
+ * ata_drive_probe_reset - Perform probe reset with given methods
+ * @ap: port to reset
+ * @probeinit: probeinit method (can be NULL)
+ * @softreset: softreset method (can be NULL)
+ * @hardreset: hardreset method (can be NULL)
+ * @postreset: postreset method (can be NULL)
+ * @classes: resulting classes of attached devices
+ *
+ * Reset the specified port and classify attached devices using
+ * given methods. This function prefers softreset but tries all
+ * possible reset sequences to reset and classify devices. This
+ * function is intended to be used for constructing ->probe_reset
+ * callback by low level drivers.
+ *
+ * Reset methods should follow the following rules.
+ *
+ * - Return 0 on sucess, -errno on failure.
+ * - If classification is supported, fill classes[] with
+ * recognized class codes.
+ * - If classification is not supported, leave classes[] alone.
+ * - If verbose is non-zero, print error message on failure;
+ * otherwise, shut up.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
+ * if classification fails, and any error code from reset
+ * methods.
+ */
+int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
+ ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+ ata_postreset_fn_t postreset, unsigned int *classes)
+{
+ int rc = -EINVAL;
+
+ if (probeinit)
+ probeinit(ap);
+
+ if (softreset) {
+ rc = do_probe_reset(ap, softreset, postreset, classes);
+ if (rc == 0)
+ return 0;
+ }
+
+ if (!hardreset)
+ return rc;
+
+ rc = do_probe_reset(ap, hardreset, postreset, classes);
+ if (rc == 0 || rc != -ENODEV)
+ return rc;
+
+ if (softreset)
+ rc = do_probe_reset(ap, softreset, postreset, classes);
+
+ return rc;
+}
+
static void ata_pr_blacklisted(const struct ata_port *ap,
const struct ata_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&ap->host_set->lock, flags);
- ap->flags &= ~ATA_FLAG_NOINTR;
ata_irq_on(ap);
ata_qc_complete(qc);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
}
/**
- * ata_pio_poll -
+ * ata_pio_poll - poll using PIO, depending on current state
* @ap: the target ata_port
*
* LOCKING:
status = ata_chk_status(ap);
if (status & ATA_BUSY) {
if (time_after(jiffies, ap->pio_task_timeout)) {
- qc->err_mask |= AC_ERR_ATA_BUS;
+ qc->err_mask |= AC_ERR_TIMEOUT;
ap->hsm_task_state = HSM_ST_TMOUT;
return 0;
}
* None. (executing in kernel thread context)
*
* RETURNS:
- * Non-zero if qc completed, zero otherwise.
+ * Zero if qc completed.
+ * Non-zero if has next.
*/
static int ata_pio_complete (struct ata_port *ap)
* we enter, BSY will be cleared in a chk-status or two. If not,
* the drive is probably seeking or something. Snooze for a couple
* msecs, then chk-status again. If still busy, fall back to
- * HSM_ST_POLL state.
+ * HSM_ST_LAST_POLL state.
*/
drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
if (drv_stat & ATA_BUSY) {
if (drv_stat & ATA_BUSY) {
ap->hsm_task_state = HSM_ST_LAST_POLL;
ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
- return 0;
+ return 1;
}
}
if (!ata_ok(drv_stat)) {
qc->err_mask |= __ac_err_mask(drv_stat);
ap->hsm_task_state = HSM_ST_ERR;
- return 0;
+ return 1;
}
ap->hsm_task_state = HSM_ST_IDLE;
/* another command may start at this point */
- return 1;
+ return 0;
}
/**
- * swap_buf_le16 - swap halves of 16-words in place
+ * swap_buf_le16 - swap halves of 16-bit words in place
* @buf: Buffer to swap
* @buf_words: Number of 16-bit words in buffer.
*
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
- buf = kmap(page) + offset;
+ DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+ if (PageHighMem(page)) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ buf = kmap_atomic(page, KM_IRQ0);
+
+ /* do the actual data transfer */
+ ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
+
+ kunmap_atomic(buf, KM_IRQ0);
+ local_irq_restore(flags);
+ } else {
+ buf = page_address(page);
+ ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
+ }
qc->cursect++;
qc->cursg_ofs++;
qc->cursg++;
qc->cursg_ofs = 0;
}
-
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-
- /* do the actual data transfer */
- do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
- ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
-
- kunmap(page);
}
/**
- * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * ata_pio_sectors - Transfer one or many 512-byte sectors.
* @qc: Command on going
- * @bytes: number of bytes
*
- * Transfer Transfer data from/to the ATAPI device.
+ * Transfer one or many ATA_SECT_SIZE of data from/to the
+ * ATA device for the DRQ request.
*
* LOCKING:
* Inherited from caller.
- *
*/
-static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+static void ata_pio_sectors(struct ata_queued_cmd *qc)
{
- int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
- struct scatterlist *sg = qc->__sg;
- struct ata_port *ap = qc->ap;
- struct page *page;
- unsigned char *buf;
+ if (is_multi_taskfile(&qc->tf)) {
+ /* READ/WRITE MULTIPLE */
+ unsigned int nsect;
+
+ assert(qc->dev->multi_count);
+
+ nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
+ while (nsect--)
+ ata_pio_sector(qc);
+ } else
+ ata_pio_sector(qc);
+}
+
+/**
+ * atapi_send_cdb - Write CDB bytes to hardware
+ * @ap: Port to which ATAPI device is attached.
+ * @qc: Taskfile currently active
+ *
+ * When device has indicated its readiness to accept
+ * a CDB, this function is called. Send the CDB.
+ *
+ * LOCKING:
+ * caller.
+ */
+
+static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ /* send SCSI cdb */
+ DPRINTK("send cdb\n");
+ assert(ap->cdb_len >= 12);
+
+ ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
+ ata_altstatus(ap); /* flush */
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_ATAPI:
+ ap->hsm_task_state = HSM_ST;
+ break;
+ case ATA_PROT_ATAPI_NODATA:
+ ap->hsm_task_state = HSM_ST_LAST;
+ break;
+ case ATA_PROT_ATAPI_DMA:
+ ap->hsm_task_state = HSM_ST_LAST;
+ /* initiate bmdma */
+ ap->ops->bmdma_start(qc);
+ break;
+ }
+}
+
+/**
+ * ata_pio_first_block - Write first data block to hardware
+ * @ap: Port to which ATA/ATAPI device is attached.
+ *
+ * When device has indicated its readiness to accept
+ * the data, this function sends out the CDB or
+ * the first data block by PIO.
+ * After this,
+ * - If polling, ata_pio_task() handles the rest.
+ * - Otherwise, interrupt handler takes over.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * Zero if irq handler takes over
+ * Non-zero if has next (polling).
+ */
+
+static int ata_pio_first_block(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ u8 status;
+ unsigned long flags;
+ int has_next;
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ assert(qc != NULL);
+ assert(qc->flags & ATA_QCFLAG_ACTIVE);
+
+ /* if polling, we will stay in the work queue after sending the data.
+ * otherwise, interrupt handler takes over after sending the data.
+ */
+ has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
+
+ /* sleep-wait for BSY to clear */
+ DPRINTK("busy wait\n");
+ if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ ap->hsm_task_state = HSM_ST_TMOUT;
+ goto err_out;
+ }
+
+ /* make sure DRQ is set */
+ status = ata_chk_status(ap);
+ if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
+ /* device status error */
+ qc->err_mask |= AC_ERR_HSM;
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto err_out;
+ }
+
+ /* Send the CDB (atapi) or the first data block (ata pio out).
+ * During the state transition, interrupt handler shouldn't
+ * be invoked before the data transfer is complete and
+ * hsm_task_state is changed. Hence, the following locking.
+ */
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+
+ if (qc->tf.protocol == ATA_PROT_PIO) {
+ /* PIO data out protocol.
+ * send first data block.
+ */
+
+ /* ata_pio_sectors() might change the state to HSM_ST_LAST.
+ * so, the state is changed here before ata_pio_sectors().
+ */
+ ap->hsm_task_state = HSM_ST;
+ ata_pio_sectors(qc);
+ ata_altstatus(ap); /* flush */
+ } else
+ /* send CDB */
+ atapi_send_cdb(ap, qc);
+
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ /* if polling, ata_pio_task() handles the rest.
+ * otherwise, interrupt handler takes over from here.
+ */
+ return has_next;
+
+err_out:
+ return 1; /* has next */
+}
+
+/**
+ * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * @qc: Command on going
+ * @bytes: number of bytes
+ *
+ * Transfer Transfer data from/to the ATAPI device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
+static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+{
+ int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ struct scatterlist *sg = qc->__sg;
+ struct ata_port *ap = qc->ap;
+ struct page *page;
+ unsigned char *buf;
unsigned int offset, count;
if (qc->curbytes + bytes >= qc->nbytes)
/* don't cross page boundaries */
count = min(count, (unsigned int)PAGE_SIZE - offset);
- buf = kmap(page) + offset;
+ DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+ if (PageHighMem(page)) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ buf = kmap_atomic(page, KM_IRQ0);
+
+ /* do the actual data transfer */
+ ata_data_xfer(ap, buf + offset, count, do_write);
+
+ kunmap_atomic(buf, KM_IRQ0);
+ local_irq_restore(flags);
+ } else {
+ buf = page_address(page);
+ ata_data_xfer(ap, buf + offset, count, do_write);
+ }
bytes -= count;
qc->curbytes += count;
qc->cursg_ofs = 0;
}
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-
- /* do the actual data transfer */
- ata_data_xfer(ap, buf, count, do_write);
-
- kunmap(page);
-
if (bytes)
goto next_sg;
}
if (do_write != i_write)
goto err_out;
+ VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
+
__atapi_pio_bytes(qc, bytes);
return;
err_out:
printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
ap->id, dev->devno);
- qc->err_mask |= AC_ERR_ATA_BUS;
+ qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
}
} else {
/* handle BSY=0, DRQ=0 as error */
if ((status & ATA_DRQ) == 0) {
- qc->err_mask |= AC_ERR_ATA_BUS;
+ qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
return;
}
- ata_pio_sector(qc);
+ ata_pio_sectors(qc);
}
+
+ ata_altstatus(ap); /* flush */
}
static void ata_pio_error(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
- printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
-
qc = ata_qc_from_tag(ap, ap->active_tag);
assert(qc != NULL);
+ if (qc->tf.command != ATA_CMD_PACKET)
+ printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
+
/* make sure qc->err_mask is available to
* know what's wrong and recover
*/
{
struct ata_port *ap = _data;
unsigned long timeout;
- int qc_completed;
+ int has_next;
fsm_start:
timeout = 0;
- qc_completed = 0;
+ has_next = 1;
switch (ap->hsm_task_state) {
- case HSM_ST_IDLE:
- return;
+ case HSM_ST_FIRST:
+ has_next = ata_pio_first_block(ap);
+ break;
case HSM_ST:
ata_pio_block(ap);
break;
case HSM_ST_LAST:
- qc_completed = ata_pio_complete(ap);
+ has_next = ata_pio_complete(ap);
break;
case HSM_ST_POLL:
case HSM_ST_ERR:
ata_pio_error(ap);
return;
+
+ default:
+ BUG();
+ return;
}
if (timeout)
- queue_delayed_work(ata_wq, &ap->pio_task, timeout);
- else if (!qc_completed)
+ ata_queue_delayed_pio_task(ap, timeout);
+ else if (has_next)
goto fsm_start;
}
DPRINTK("ENTER\n");
- spin_lock_irqsave(&host_set->lock, flags);
+ ata_flush_pio_tasks(ap);
+ ap->hsm_task_state = HSM_ST_IDLE;
- /* hack alert! We cannot use the supplied completion
- * function from inside the ->eh_strategy_handler() thread.
- * libata is the only user of ->eh_strategy_handler() in
- * any kernel, so the default scsi_done() assumes it is
- * not being called from the SCSI EH.
- */
- qc->scsidone = scsi_finish_command;
+ spin_lock_irqsave(&host_set->lock, flags);
switch (qc->tf.protocol) {
printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
ap->id, qc->tf.command, drv_stat, host_stat);
+ ap->hsm_task_state = HSM_ST_IDLE;
+
/* complete taskfile transaction */
- qc->err_mask |= ac_err_mask(drv_stat);
- ata_qc_complete(qc);
+ qc->err_mask |= AC_ERR_TIMEOUT;
break;
}
spin_unlock_irqrestore(&host_set->lock, flags);
+ ata_eh_qc_complete(qc);
+
DPRINTK("EXIT\n");
}
return qc;
}
-static void __ata_qc_complete(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- unsigned int tag;
-
- qc->flags = 0;
- tag = qc->tag;
- if (likely(ata_tag_valid(tag))) {
- if (tag == ap->active_tag)
- ap->active_tag = ATA_TAG_POISON;
- qc->tag = ATA_TAG_POISON;
- clear_bit(tag, &ap->qactive);
- }
-}
-
/**
* ata_qc_free - free unused ata_queued_cmd
* @qc: Command to complete
*/
void ata_qc_free(struct ata_queued_cmd *qc)
{
+ struct ata_port *ap = qc->ap;
+ unsigned int tag;
+
assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
- __ata_qc_complete(qc);
+ qc->flags = 0;
+ tag = qc->tag;
+ if (likely(ata_tag_valid(tag))) {
+ if (tag == ap->active_tag)
+ ap->active_tag = ATA_TAG_POISON;
+ qc->tag = ATA_TAG_POISON;
+ clear_bit(tag, &ap->qactive);
+ }
}
/**
void ata_qc_complete(struct ata_queued_cmd *qc)
{
- int rc;
-
assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
assert(qc->flags & ATA_QCFLAG_ACTIVE);
qc->flags &= ~ATA_QCFLAG_ACTIVE;
/* call completion callback */
- rc = qc->complete_fn(qc);
-
- /* if callback indicates not to complete command (non-zero),
- * return immediately
- */
- if (rc != 0)
- return;
-
- __ata_qc_complete(qc);
-
- VPRINTK("EXIT\n");
+ qc->complete_fn(qc);
}
static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
- * Zero on success, negative on error.
+ * Zero on success, AC_ERR_* mask on failure
*/
-int ata_qc_issue(struct ata_queued_cmd *qc)
+unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
if (ata_should_dma_map(qc)) {
if (qc->flags & ATA_QCFLAG_SG) {
if (ata_sg_setup(qc))
- goto err_out;
+ goto sg_err;
} else if (qc->flags & ATA_QCFLAG_SINGLE) {
if (ata_sg_setup_one(qc))
- goto err_out;
+ goto sg_err;
}
} else {
qc->flags &= ~ATA_QCFLAG_DMAMAP;
return ap->ops->qc_issue(qc);
-err_out:
- return -1;
+sg_err:
+ qc->flags &= ~ATA_QCFLAG_DMAMAP;
+ return AC_ERR_SYSTEM;
}
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
- * Zero on success, negative on error.
+ * Zero on success, AC_ERR_* mask on failure
*/
-int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ /* Use polling pio if the LLD doesn't handle
+ * interrupt driven pio and atapi CDB interrupt.
+ */
+ if (ap->flags & ATA_FLAG_PIO_POLLING) {
+ switch (qc->tf.protocol) {
+ case ATA_PROT_PIO:
+ case ATA_PROT_ATAPI:
+ case ATA_PROT_ATAPI_NODATA:
+ qc->tf.flags |= ATA_TFLAG_POLLING;
+ break;
+ case ATA_PROT_ATAPI_DMA:
+ if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+ BUG();
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* select the device */
ata_dev_select(ap, qc->dev->devno, 1, 0);
+ /* start the command */
switch (qc->tf.protocol) {
case ATA_PROT_NODATA:
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_qc_set_polling(qc);
+
ata_tf_to_host(ap, &qc->tf);
+ ap->hsm_task_state = HSM_ST_LAST;
+
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_queue_pio_task(ap);
+
break;
case ATA_PROT_DMA:
+ assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
+
ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_setup(qc); /* set up bmdma */
ap->ops->bmdma_start(qc); /* initiate bmdma */
+ ap->hsm_task_state = HSM_ST_LAST;
break;
- case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
- ata_qc_set_polling(qc);
- ata_tf_to_host(ap, &qc->tf);
- ap->hsm_task_state = HSM_ST;
- queue_work(ata_wq, &ap->pio_task);
- break;
+ case ATA_PROT_PIO:
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_qc_set_polling(qc);
- case ATA_PROT_ATAPI:
- ata_qc_set_polling(qc);
ata_tf_to_host(ap, &qc->tf);
- queue_work(ata_wq, &ap->packet_task);
+
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ /* PIO data out protocol */
+ ap->hsm_task_state = HSM_ST_FIRST;
+ ata_queue_pio_task(ap);
+
+ /* always send first data block using
+ * the ata_pio_task() codepath.
+ */
+ } else {
+ /* PIO data in protocol */
+ ap->hsm_task_state = HSM_ST;
+
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_queue_pio_task(ap);
+
+ /* if polling, ata_pio_task() handles the rest.
+ * otherwise, interrupt handler takes over from here.
+ */
+ }
+
break;
+ case ATA_PROT_ATAPI:
case ATA_PROT_ATAPI_NODATA:
- ap->flags |= ATA_FLAG_NOINTR;
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_qc_set_polling(qc);
+
ata_tf_to_host(ap, &qc->tf);
- queue_work(ata_wq, &ap->packet_task);
+
+ ap->hsm_task_state = HSM_ST_FIRST;
+
+ /* send cdb by polling if no cdb interrupt */
+ if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
+ (qc->tf.flags & ATA_TFLAG_POLLING))
+ ata_queue_pio_task(ap);
break;
case ATA_PROT_ATAPI_DMA:
- ap->flags |= ATA_FLAG_NOINTR;
+ assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
+
ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_setup(qc); /* set up bmdma */
- queue_work(ata_wq, &ap->packet_task);
+ ap->hsm_task_state = HSM_ST_FIRST;
+
+ /* send cdb by polling if no cdb interrupt */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ ata_queue_pio_task(ap);
break;
default:
WARN_ON(1);
- return -1;
+ return AC_ERR_SYSTEM;
}
return 0;
inline unsigned int ata_host_intr (struct ata_port *ap,
struct ata_queued_cmd *qc)
{
- u8 status, host_stat;
+ u8 status, host_stat = 0;
- switch (qc->tf.protocol) {
-
- case ATA_PROT_DMA:
- case ATA_PROT_ATAPI_DMA:
- case ATA_PROT_ATAPI:
- /* check status of DMA engine */
- host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
+ VPRINTK("ata%u: protocol %d task_state %d\n",
+ ap->id, qc->tf.protocol, ap->hsm_task_state);
- /* if it's not our irq... */
- if (!(host_stat & ATA_DMA_INTR))
+ /* Check whether we are expecting interrupt in this state */
+ switch (ap->hsm_task_state) {
+ case HSM_ST_FIRST:
+ /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+ * The flag was turned on only for atapi devices.
+ * No need to check is_atapi_taskfile(&qc->tf) again.
+ */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
goto idle_irq;
+ break;
+ case HSM_ST_LAST:
+ if (qc->tf.protocol == ATA_PROT_DMA ||
+ qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
+ /* check status of DMA engine */
+ host_stat = ap->ops->bmdma_status(ap);
+ VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
+
+ /* if it's not our irq... */
+ if (!(host_stat & ATA_DMA_INTR))
+ goto idle_irq;
+
+ /* before we do anything else, clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+ }
+ break;
+ case HSM_ST:
+ break;
+ default:
+ goto idle_irq;
+ }
- /* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(qc);
+ /* check altstatus */
+ status = ata_altstatus(ap);
+ if (status & ATA_BUSY)
+ goto idle_irq;
- /* fall through */
+ /* check main status, clearing INTRQ */
+ status = ata_chk_status(ap);
+ if (unlikely(status & ATA_BUSY))
+ goto idle_irq;
- case ATA_PROT_ATAPI_NODATA:
- case ATA_PROT_NODATA:
- /* check altstatus */
- status = ata_altstatus(ap);
- if (status & ATA_BUSY)
- goto idle_irq;
+ DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
+ ap->id, qc->tf.protocol, ap->hsm_task_state, status);
- /* check main status, clearing INTRQ */
- status = ata_chk_status(ap);
- if (unlikely(status & ATA_BUSY))
- goto idle_irq;
- DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
- ap->id, qc->tf.protocol, status);
+ /* ack bmdma irq events */
+ ap->ops->irq_clear(ap);
- /* ack bmdma irq events */
- ap->ops->irq_clear(ap);
+ /* check error */
+ if (unlikely(status & (ATA_ERR | ATA_DF))) {
+ qc->err_mask |= AC_ERR_DEV;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+
+fsm_start:
+ switch (ap->hsm_task_state) {
+ case HSM_ST_FIRST:
+ /* Some pre-ATAPI-4 devices assert INTRQ
+ * at this state when ready to receive CDB.
+ */
+
+ /* check device status */
+ if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
+ /* Wrong status. Let EH handle this */
+ qc->err_mask |= AC_ERR_HSM;
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ atapi_send_cdb(ap, qc);
+
+ break;
+
+ case HSM_ST:
+ /* complete command or read/write the data register */
+ if (qc->tf.protocol == ATA_PROT_ATAPI) {
+ /* ATAPI PIO protocol */
+ if ((status & ATA_DRQ) == 0) {
+ /* no more data to transfer */
+ ap->hsm_task_state = HSM_ST_LAST;
+ goto fsm_start;
+ }
+
+ atapi_pio_bytes(qc);
+
+ if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
+ /* bad ireason reported by device */
+ goto fsm_start;
+
+ } else {
+ /* ATA PIO protocol */
+ if (unlikely((status & ATA_DRQ) == 0)) {
+ /* handle BSY=0, DRQ=0 as error */
+ qc->err_mask |= AC_ERR_HSM;
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ ata_pio_sectors(qc);
+
+ if (ap->hsm_task_state == HSM_ST_LAST &&
+ (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
+ /* all data read */
+ ata_altstatus(ap);
+ status = ata_chk_status(ap);
+ goto fsm_start;
+ }
+ }
+
+ ata_altstatus(ap); /* flush */
+ break;
+
+ case HSM_ST_LAST:
+ if (unlikely(status & ATA_DRQ)) {
+ /* handle DRQ=1 as error */
+ qc->err_mask |= AC_ERR_HSM;
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ /* no more data to transfer */
+ DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
+ ap->id, status);
+
+ ap->hsm_task_state = HSM_ST_IDLE;
/* complete taskfile transaction */
qc->err_mask |= ac_err_mask(status);
ata_qc_complete(qc);
break;
+ case HSM_ST_ERR:
+ if (qc->tf.command != ATA_CMD_PACKET)
+ printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
+ ap->id, status, host_stat);
+
+ /* make sure qc->err_mask is available to
+ * know what's wrong and recover
+ */
+ assert(qc->err_mask);
+
+ ap->hsm_task_state = HSM_ST_IDLE;
+ ata_qc_complete(qc);
+ break;
default:
goto idle_irq;
}
ap = host_set->ports[i];
if (ap &&
- !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
+ !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
- if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
(qc->flags & ATA_QCFLAG_ACTIVE))
handled |= ata_host_intr(ap, qc);
}
return IRQ_RETVAL(handled);
}
-/**
- * atapi_packet_task - Write CDB bytes to hardware
- * @_data: Port to which ATAPI device is attached.
- *
- * When device has indicated its readiness to accept
- * a CDB, this function is called. Send the CDB.
- * If DMA is to be performed, exit immediately.
- * Otherwise, we are in polling mode, so poll
- * status under operation succeeds or fails.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- */
-
-static void atapi_packet_task(void *_data)
-{
- struct ata_port *ap = _data;
- struct ata_queued_cmd *qc;
- u8 status;
-
- qc = ata_qc_from_tag(ap, ap->active_tag);
- assert(qc != NULL);
- assert(qc->flags & ATA_QCFLAG_ACTIVE);
-
- /* sleep-wait for BSY to clear */
- DPRINTK("busy wait\n");
- if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
- qc->err_mask |= AC_ERR_ATA_BUS;
- goto err_out;
- }
-
- /* make sure DRQ is set */
- status = ata_chk_status(ap);
- if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
- qc->err_mask |= AC_ERR_ATA_BUS;
- goto err_out;
- }
-
- /* send SCSI cdb */
- DPRINTK("send cdb\n");
- assert(ap->cdb_len >= 12);
-
- if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
- qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
- unsigned long flags;
-
- /* Once we're done issuing command and kicking bmdma,
- * irq handler takes over. To not lose irq, we need
- * to clear NOINTR flag before sending cdb, but
- * interrupt handler shouldn't be invoked before we're
- * finished. Hence, the following locking.
- */
- spin_lock_irqsave(&ap->host_set->lock, flags);
- ap->flags &= ~ATA_FLAG_NOINTR;
- ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
- if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
- ap->ops->bmdma_start(qc); /* initiate bmdma */
- spin_unlock_irqrestore(&ap->host_set->lock, flags);
- } else {
- ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
-
- /* PIO commands are handled by polling */
- ap->hsm_task_state = HSM_ST;
- queue_work(ata_wq, &ap->pio_task);
- }
-
- return;
-
-err_out:
- ata_poll_qc_complete(qc);
-}
-
-
-/**
- * ata_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
/*
* Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
* without filling any other registers
/**
* ata_device_resume - wakeup a previously suspended devices
+ * @ap: port the device is connected to
+ * @dev: the device to resume
*
* Kick the drive back into action, by sending it an idle immediate
* command and making sure its transfer mode matches between drive
/**
* ata_device_suspend - prepare a device for suspend
+ * @ap: port the device is connected to
+ * @dev: the device to suspend
*
* Flush the cache on the drive, if appropriate, then issue a
* standbynow command.
- *
*/
int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
{
return 0;
}
+/**
+ * ata_port_start - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized. Allocates space for PRD table.
+ *
+ * May be used as the port_start() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
int ata_port_start (struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
ap->active_tag = ATA_TAG_POISON;
ap->last_ctl = 0xFF;
- INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
INIT_WORK(&ap->pio_task, ata_pio_task, ap);
+ INIT_LIST_HEAD(&ap->eh_done_q);
for (i = 0; i < ATA_MAX_DEVICES; i++)
ap->device[i].devno = i;
ap = host_set->ports[i];
- DPRINTK("ata%u: probe begin\n", ap->id);
+ DPRINTK("ata%u: bus probe begin\n", ap->id);
rc = ata_bus_probe(ap);
- DPRINTK("ata%u: probe end\n", ap->id);
+ DPRINTK("ata%u: bus probe end\n", ap->id);
if (rc) {
/* FIXME: do something useful here?
}
/* probes are done, now scan each port's disk(s) */
- DPRINTK("probe begin\n");
+ DPRINTK("host probe begin\n");
for (i = 0; i < count; i++) {
struct ata_port *ap = host_set->ports[i];
EXPORT_SYMBOL_GPL(sata_phy_reset);
EXPORT_SYMBOL_GPL(__sata_phy_reset);
EXPORT_SYMBOL_GPL(ata_bus_reset);
+EXPORT_SYMBOL_GPL(ata_std_probeinit);
+EXPORT_SYMBOL_GPL(ata_std_softreset);
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
+EXPORT_SYMBOL_GPL(ata_std_postreset);
+EXPORT_SYMBOL_GPL(ata_std_probe_reset);
+EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
+EXPORT_SYMBOL_GPL(ata_busy_sleep);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
EXPORT_SYMBOL_GPL(ata_scsi_error);
EXPORT_SYMBOL_GPL(ata_dev_id_string);
EXPORT_SYMBOL_GPL(ata_dev_config);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
+EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
EXPORT_SYMBOL_GPL(ata_timing_compute);