2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <scsi/scsi.h>
53 #include "scsi_priv.h"
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 static unsigned int ata_busy_sleep (struct ata_port *ap,
63 unsigned long tmout_pat,
65 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
69 static int fgb(u32 bitmap);
70 static int ata_choose_xfer_mode(struct ata_port *ap,
72 unsigned int *xfer_shift_out);
73 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
74 static void __ata_qc_complete(struct ata_queued_cmd *qc);
76 static unsigned int ata_unique_id = 1;
77 static struct workqueue_struct *ata_wq;
79 MODULE_AUTHOR("Jeff Garzik");
80 MODULE_DESCRIPTION("Library module for ATA devices");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_VERSION);
85 * ata_tf_load - send taskfile registers to host controller
86 * @ap: Port to which output is sent
87 * @tf: ATA taskfile register set
89 * Outputs ATA taskfile to standard ATA host controller.
92 * Inherited from caller.
95 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
97 struct ata_ioports *ioaddr = &ap->ioaddr;
98 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
100 if (tf->ctl != ap->last_ctl) {
101 outb(tf->ctl, ioaddr->ctl_addr);
102 ap->last_ctl = tf->ctl;
106 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
107 outb(tf->hob_feature, ioaddr->feature_addr);
108 outb(tf->hob_nsect, ioaddr->nsect_addr);
109 outb(tf->hob_lbal, ioaddr->lbal_addr);
110 outb(tf->hob_lbam, ioaddr->lbam_addr);
111 outb(tf->hob_lbah, ioaddr->lbah_addr);
112 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
121 outb(tf->feature, ioaddr->feature_addr);
122 outb(tf->nsect, ioaddr->nsect_addr);
123 outb(tf->lbal, ioaddr->lbal_addr);
124 outb(tf->lbam, ioaddr->lbam_addr);
125 outb(tf->lbah, ioaddr->lbah_addr);
126 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
134 if (tf->flags & ATA_TFLAG_DEVICE) {
135 outb(tf->device, ioaddr->device_addr);
136 VPRINTK("device 0x%X\n", tf->device);
143 * ata_tf_load_mmio - send taskfile registers to host controller
144 * @ap: Port to which output is sent
145 * @tf: ATA taskfile register set
147 * Outputs ATA taskfile to standard ATA host controller using MMIO.
150 * Inherited from caller.
153 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
155 struct ata_ioports *ioaddr = &ap->ioaddr;
156 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
158 if (tf->ctl != ap->last_ctl) {
159 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
160 ap->last_ctl = tf->ctl;
164 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
165 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
166 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
167 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
168 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
169 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
170 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
179 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
180 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
181 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
182 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
183 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
184 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
192 if (tf->flags & ATA_TFLAG_DEVICE) {
193 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
194 VPRINTK("device 0x%X\n", tf->device);
202 * ata_tf_load - send taskfile registers to host controller
203 * @ap: Port to which output is sent
204 * @tf: ATA taskfile register set
206 * Outputs ATA taskfile to standard ATA host controller using MMIO
207 * or PIO as indicated by the ATA_FLAG_MMIO flag.
208 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
209 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
210 * hob_lbal, hob_lbam, and hob_lbah.
212 * This function waits for idle (!BUSY and !DRQ) after writing
213 * registers. If the control register has a new value, this
214 * function also waits for idle after writing control and before
215 * writing the remaining registers.
217 * May be used as the tf_load() entry in ata_port_operations.
220 * Inherited from caller.
222 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
224 if (ap->flags & ATA_FLAG_MMIO)
225 ata_tf_load_mmio(ap, tf);
227 ata_tf_load_pio(ap, tf);
231 * ata_exec_command_pio - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
235 * Issues PIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
239 * spin_lock_irqsave(host_set lock)
242 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
244 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
246 outb(tf->command, ap->ioaddr.command_addr);
252 * ata_exec_command_mmio - issue ATA command to host controller
253 * @ap: port to which command is being issued
254 * @tf: ATA taskfile register set
256 * Issues MMIO write to ATA command register, with proper
257 * synchronization with interrupt handler / other threads.
260 * spin_lock_irqsave(host_set lock)
263 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
265 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
267 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
273 * ata_exec_command - issue ATA command to host controller
274 * @ap: port to which command is being issued
275 * @tf: ATA taskfile register set
277 * Issues PIO/MMIO write to ATA command register, with proper
278 * synchronization with interrupt handler / other threads.
281 * spin_lock_irqsave(host_set lock)
283 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
285 if (ap->flags & ATA_FLAG_MMIO)
286 ata_exec_command_mmio(ap, tf);
288 ata_exec_command_pio(ap, tf);
292 * ata_exec - issue ATA command to host controller
293 * @ap: port to which command is being issued
294 * @tf: ATA taskfile register set
296 * Issues PIO/MMIO write to ATA command register, with proper
297 * synchronization with interrupt handler / other threads.
300 * Obtains host_set lock.
303 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
307 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
308 spin_lock_irqsave(&ap->host_set->lock, flags);
309 ap->ops->exec_command(ap, tf);
310 spin_unlock_irqrestore(&ap->host_set->lock, flags);
314 * ata_tf_to_host - issue ATA taskfile to host controller
315 * @ap: port to which command is being issued
316 * @tf: ATA taskfile register set
318 * Issues ATA taskfile register set to ATA host controller,
319 * with proper synchronization with interrupt handler and
323 * Obtains host_set lock.
326 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
328 ap->ops->tf_load(ap, tf);
334 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
335 * @ap: port to which command is being issued
336 * @tf: ATA taskfile register set
338 * Issues ATA taskfile register set to ATA host controller,
339 * with proper synchronization with interrupt handler and
343 * spin_lock_irqsave(host_set lock)
346 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
348 ap->ops->tf_load(ap, tf);
349 ap->ops->exec_command(ap, tf);
353 * ata_tf_read_pio - input device's ATA taskfile shadow registers
354 * @ap: Port from which input is read
355 * @tf: ATA taskfile register set for storing input
357 * Reads ATA taskfile registers for currently-selected device
361 * Inherited from caller.
364 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
366 struct ata_ioports *ioaddr = &ap->ioaddr;
368 tf->nsect = inb(ioaddr->nsect_addr);
369 tf->lbal = inb(ioaddr->lbal_addr);
370 tf->lbam = inb(ioaddr->lbam_addr);
371 tf->lbah = inb(ioaddr->lbah_addr);
372 tf->device = inb(ioaddr->device_addr);
374 if (tf->flags & ATA_TFLAG_LBA48) {
375 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
376 tf->hob_feature = inb(ioaddr->error_addr);
377 tf->hob_nsect = inb(ioaddr->nsect_addr);
378 tf->hob_lbal = inb(ioaddr->lbal_addr);
379 tf->hob_lbam = inb(ioaddr->lbam_addr);
380 tf->hob_lbah = inb(ioaddr->lbah_addr);
385 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
386 * @ap: Port from which input is read
387 * @tf: ATA taskfile register set for storing input
389 * Reads ATA taskfile registers for currently-selected device
393 * Inherited from caller.
396 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
398 struct ata_ioports *ioaddr = &ap->ioaddr;
400 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
401 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
402 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
403 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
404 tf->device = readb((void __iomem *)ioaddr->device_addr);
406 if (tf->flags & ATA_TFLAG_LBA48) {
407 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
408 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
409 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
410 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
411 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
412 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
418 * ata_tf_read - input device's ATA taskfile shadow registers
419 * @ap: Port from which input is read
420 * @tf: ATA taskfile register set for storing input
422 * Reads ATA taskfile registers for currently-selected device
425 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
426 * is set, also reads the hob registers.
428 * May be used as the tf_read() entry in ata_port_operations.
431 * Inherited from caller.
433 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
435 if (ap->flags & ATA_FLAG_MMIO)
436 ata_tf_read_mmio(ap, tf);
438 ata_tf_read_pio(ap, tf);
442 * ata_check_status_pio - Read device status reg & clear interrupt
443 * @ap: port where the device is
445 * Reads ATA taskfile status register for currently-selected device
446 * and return its value. This also clears pending interrupts
450 * Inherited from caller.
452 static u8 ata_check_status_pio(struct ata_port *ap)
454 return inb(ap->ioaddr.status_addr);
458 * ata_check_status_mmio - Read device status reg & clear interrupt
459 * @ap: port where the device is
461 * Reads ATA taskfile status register for currently-selected device
462 * via MMIO and return its value. This also clears pending interrupts
466 * Inherited from caller.
468 static u8 ata_check_status_mmio(struct ata_port *ap)
470 return readb((void __iomem *) ap->ioaddr.status_addr);
475 * ata_check_status - Read device status reg & clear interrupt
476 * @ap: port where the device is
478 * Reads ATA taskfile status register for currently-selected device
479 * and return its value. This also clears pending interrupts
482 * May be used as the check_status() entry in ata_port_operations.
485 * Inherited from caller.
487 u8 ata_check_status(struct ata_port *ap)
489 if (ap->flags & ATA_FLAG_MMIO)
490 return ata_check_status_mmio(ap);
491 return ata_check_status_pio(ap);
496 * ata_altstatus - Read device alternate status reg
497 * @ap: port where the device is
499 * Reads ATA taskfile alternate status register for
500 * currently-selected device and return its value.
502 * Note: may NOT be used as the check_altstatus() entry in
503 * ata_port_operations.
506 * Inherited from caller.
508 u8 ata_altstatus(struct ata_port *ap)
510 if (ap->ops->check_altstatus)
511 return ap->ops->check_altstatus(ap);
513 if (ap->flags & ATA_FLAG_MMIO)
514 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
515 return inb(ap->ioaddr.altstatus_addr);
520 * ata_chk_err - Read device error reg
521 * @ap: port where the device is
523 * Reads ATA taskfile error register for
524 * currently-selected device and return its value.
526 * Note: may NOT be used as the check_err() entry in
527 * ata_port_operations.
530 * Inherited from caller.
532 u8 ata_chk_err(struct ata_port *ap)
534 if (ap->ops->check_err)
535 return ap->ops->check_err(ap);
537 if (ap->flags & ATA_FLAG_MMIO) {
538 return readb((void __iomem *) ap->ioaddr.error_addr);
540 return inb(ap->ioaddr.error_addr);
544 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
545 * @tf: Taskfile to convert
546 * @fis: Buffer into which data will output
547 * @pmp: Port multiplier port
549 * Converts a standard ATA taskfile to a Serial ATA
550 * FIS structure (Register - Host to Device).
553 * Inherited from caller.
556 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
558 fis[0] = 0x27; /* Register - Host to Device FIS */
559 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
560 bit 7 indicates Command FIS */
561 fis[2] = tf->command;
562 fis[3] = tf->feature;
569 fis[8] = tf->hob_lbal;
570 fis[9] = tf->hob_lbam;
571 fis[10] = tf->hob_lbah;
572 fis[11] = tf->hob_feature;
575 fis[13] = tf->hob_nsect;
586 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
587 * @fis: Buffer from which data will be input
588 * @tf: Taskfile to output
590 * Converts a standard ATA taskfile to a Serial ATA
591 * FIS structure (Register - Host to Device).
594 * Inherited from caller.
597 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
599 tf->command = fis[2]; /* status */
600 tf->feature = fis[3]; /* error */
607 tf->hob_lbal = fis[8];
608 tf->hob_lbam = fis[9];
609 tf->hob_lbah = fis[10];
612 tf->hob_nsect = fis[13];
616 * ata_prot_to_cmd - determine which read/write opcodes to use
617 * @protocol: ATA_PROT_xxx taskfile protocol
618 * @lba48: true is lba48 is present
620 * Given necessary input, determine which read/write commands
621 * to use to transfer data.
626 static int ata_prot_to_cmd(int protocol, int lba48)
628 int rcmd = 0, wcmd = 0;
633 rcmd = ATA_CMD_PIO_READ_EXT;
634 wcmd = ATA_CMD_PIO_WRITE_EXT;
636 rcmd = ATA_CMD_PIO_READ;
637 wcmd = ATA_CMD_PIO_WRITE;
643 rcmd = ATA_CMD_READ_EXT;
644 wcmd = ATA_CMD_WRITE_EXT;
647 wcmd = ATA_CMD_WRITE;
655 return rcmd | (wcmd << 8);
659 * ata_dev_set_protocol - set taskfile protocol and r/w commands
660 * @dev: device to examine and configure
662 * Examine the device configuration, after we have
663 * read the identify-device page and configured the
664 * data transfer mode. Set internal state related to
665 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
666 * and calculate the proper read/write commands to use.
671 static void ata_dev_set_protocol(struct ata_device *dev)
673 int pio = (dev->flags & ATA_DFLAG_PIO);
674 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
678 proto = dev->xfer_protocol = ATA_PROT_PIO;
680 proto = dev->xfer_protocol = ATA_PROT_DMA;
682 cmd = ata_prot_to_cmd(proto, lba48);
686 dev->read_cmd = cmd & 0xff;
687 dev->write_cmd = (cmd >> 8) & 0xff;
690 static const char * xfer_mode_str[] = {
710 * ata_udma_string - convert UDMA bit offset to string
711 * @mask: mask of bits supported; only highest bit counts.
713 * Determine string which represents the highest speed
714 * (highest bit in @udma_mask).
720 * Constant C string representing highest speed listed in
721 * @udma_mask, or the constant C string "<n/a>".
724 static const char *ata_mode_string(unsigned int mask)
728 for (i = 7; i >= 0; i--)
731 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
734 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
741 return xfer_mode_str[i];
745 * ata_pio_devchk - PATA device presence detection
746 * @ap: ATA channel to examine
747 * @device: Device to examine (starting at zero)
749 * This technique was originally described in
750 * Hale Landis's ATADRVR (www.ata-atapi.com), and
751 * later found its way into the ATA/ATAPI spec.
753 * Write a pattern to the ATA shadow registers,
754 * and if a device is present, it will respond by
755 * correctly storing and echoing back the
756 * ATA shadow register contents.
762 static unsigned int ata_pio_devchk(struct ata_port *ap,
765 struct ata_ioports *ioaddr = &ap->ioaddr;
768 ap->ops->dev_select(ap, device);
770 outb(0x55, ioaddr->nsect_addr);
771 outb(0xaa, ioaddr->lbal_addr);
773 outb(0xaa, ioaddr->nsect_addr);
774 outb(0x55, ioaddr->lbal_addr);
776 outb(0x55, ioaddr->nsect_addr);
777 outb(0xaa, ioaddr->lbal_addr);
779 nsect = inb(ioaddr->nsect_addr);
780 lbal = inb(ioaddr->lbal_addr);
782 if ((nsect == 0x55) && (lbal == 0xaa))
783 return 1; /* we found a device */
785 return 0; /* nothing found */
789 * ata_mmio_devchk - PATA device presence detection
790 * @ap: ATA channel to examine
791 * @device: Device to examine (starting at zero)
793 * This technique was originally described in
794 * Hale Landis's ATADRVR (www.ata-atapi.com), and
795 * later found its way into the ATA/ATAPI spec.
797 * Write a pattern to the ATA shadow registers,
798 * and if a device is present, it will respond by
799 * correctly storing and echoing back the
800 * ATA shadow register contents.
806 static unsigned int ata_mmio_devchk(struct ata_port *ap,
809 struct ata_ioports *ioaddr = &ap->ioaddr;
812 ap->ops->dev_select(ap, device);
814 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
815 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
817 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
818 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
820 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
821 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
823 nsect = readb((void __iomem *) ioaddr->nsect_addr);
824 lbal = readb((void __iomem *) ioaddr->lbal_addr);
826 if ((nsect == 0x55) && (lbal == 0xaa))
827 return 1; /* we found a device */
829 return 0; /* nothing found */
833 * ata_devchk - PATA device presence detection
834 * @ap: ATA channel to examine
835 * @device: Device to examine (starting at zero)
837 * Dispatch ATA device presence detection, depending
838 * on whether we are using PIO or MMIO to talk to the
839 * ATA shadow registers.
845 static unsigned int ata_devchk(struct ata_port *ap,
848 if (ap->flags & ATA_FLAG_MMIO)
849 return ata_mmio_devchk(ap, device);
850 return ata_pio_devchk(ap, device);
854 * ata_dev_classify - determine device type based on ATA-spec signature
855 * @tf: ATA taskfile register set for device to be identified
857 * Determine from taskfile register contents whether a device is
858 * ATA or ATAPI, as per "Signature and persistence" section
859 * of ATA/PI spec (volume 1, sect 5.14).
865 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
866 * the event of failure.
869 unsigned int ata_dev_classify(struct ata_taskfile *tf)
871 /* Apple's open source Darwin code hints that some devices only
872 * put a proper signature into the LBA mid/high registers,
873 * So, we only check those. It's sufficient for uniqueness.
876 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
877 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
878 DPRINTK("found ATA device by sig\n");
882 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
883 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
884 DPRINTK("found ATAPI device by sig\n");
885 return ATA_DEV_ATAPI;
888 DPRINTK("unknown device\n");
889 return ATA_DEV_UNKNOWN;
893 * ata_dev_try_classify - Parse returned ATA device signature
894 * @ap: ATA channel to examine
895 * @device: Device to examine (starting at zero)
897 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
898 * an ATA/ATAPI-defined set of values is placed in the ATA
899 * shadow registers, indicating the results of device detection
902 * Select the ATA device, and read the values from the ATA shadow
903 * registers. Then parse according to the Error register value,
904 * and the spec-defined values examined by ata_dev_classify().
910 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
912 struct ata_device *dev = &ap->device[device];
913 struct ata_taskfile tf;
917 ap->ops->dev_select(ap, device);
919 memset(&tf, 0, sizeof(tf));
921 err = ata_chk_err(ap);
922 ap->ops->tf_read(ap, &tf);
924 dev->class = ATA_DEV_NONE;
926 /* see if device passed diags */
929 else if ((device == 0) && (err == 0x81))
934 /* determine if device if ATA or ATAPI */
935 class = ata_dev_classify(&tf);
936 if (class == ATA_DEV_UNKNOWN)
938 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
947 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
948 * @id: IDENTIFY DEVICE results we will examine
949 * @s: string into which data is output
950 * @ofs: offset into identify device page
951 * @len: length of string to return. must be an even number.
953 * The strings in the IDENTIFY DEVICE page are broken up into
954 * 16-bit chunks. Run through the string, and output each
955 * 8-bit chunk linearly, regardless of platform.
961 void ata_dev_id_string(u16 *id, unsigned char *s,
962 unsigned int ofs, unsigned int len)
982 * ata_noop_dev_select - Select device 0/1 on ATA bus
983 * @ap: ATA channel to manipulate
984 * @device: ATA device (numbered from zero) to select
986 * This function performs no actual function.
988 * May be used as the dev_select() entry in ata_port_operations.
993 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
999 * ata_std_dev_select - Select device 0/1 on ATA bus
1000 * @ap: ATA channel to manipulate
1001 * @device: ATA device (numbered from zero) to select
1003 * Use the method defined in the ATA specification to
1004 * make either device 0, or device 1, active on the
1005 * ATA channel. Works with both PIO and MMIO.
1007 * May be used as the dev_select() entry in ata_port_operations.
1013 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1018 tmp = ATA_DEVICE_OBS;
1020 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1022 if (ap->flags & ATA_FLAG_MMIO) {
1023 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
1025 outb(tmp, ap->ioaddr.device_addr);
1027 ata_pause(ap); /* needed; also flushes, for mmio */
1031 * ata_dev_select - Select device 0/1 on ATA bus
1032 * @ap: ATA channel to manipulate
1033 * @device: ATA device (numbered from zero) to select
1034 * @wait: non-zero to wait for Status register BSY bit to clear
1035 * @can_sleep: non-zero if context allows sleeping
1037 * Use the method defined in the ATA specification to
1038 * make either device 0, or device 1, active on the
1041 * This is a high-level version of ata_std_dev_select(),
1042 * which additionally provides the services of inserting
1043 * the proper pauses and status polling, where needed.
1049 void ata_dev_select(struct ata_port *ap, unsigned int device,
1050 unsigned int wait, unsigned int can_sleep)
1052 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
1053 ap->id, device, wait);
1058 ap->ops->dev_select(ap, device);
1061 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1068 * ata_dump_id - IDENTIFY DEVICE info debugging output
1069 * @dev: Device whose IDENTIFY DEVICE page we will dump
1071 * Dump selected 16-bit words from a detected device's
1072 * IDENTIFY PAGE page.
1078 static inline void ata_dump_id(struct ata_device *dev)
1080 DPRINTK("49==0x%04x "
1090 DPRINTK("80==0x%04x "
1100 DPRINTK("88==0x%04x "
1107 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1108 * @ap: port on which device we wish to probe resides
1109 * @device: device bus address, starting at zero
1111 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1112 * command, and read back the 512-byte device information page.
1113 * The device information page is fed to us via the standard
1114 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1115 * using standard PIO-IN paths)
1117 * After reading the device information page, we use several
1118 * bits of information from it to initialize data structures
1119 * that will be used during the lifetime of the ata_device.
1120 * Other data from the info page is used to disqualify certain
1121 * older ATA devices we do not wish to support.
1124 * Inherited from caller. Some functions called by this function
1125 * obtain the host_set lock.
1128 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1130 struct ata_device *dev = &ap->device[device];
1131 unsigned int major_version;
1133 unsigned long xfer_modes;
1135 unsigned int using_edd;
1136 DECLARE_COMPLETION(wait);
1137 struct ata_queued_cmd *qc;
1138 unsigned long flags;
1141 if (!ata_dev_present(dev)) {
1142 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1147 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1152 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1154 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1155 dev->class == ATA_DEV_NONE);
1157 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1159 qc = ata_qc_new_init(ap, dev);
1162 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1163 qc->dma_dir = DMA_FROM_DEVICE;
1164 qc->tf.protocol = ATA_PROT_PIO;
1168 if (dev->class == ATA_DEV_ATA) {
1169 qc->tf.command = ATA_CMD_ID_ATA;
1170 DPRINTK("do ATA identify\n");
1172 qc->tf.command = ATA_CMD_ID_ATAPI;
1173 DPRINTK("do ATAPI identify\n");
1176 qc->waiting = &wait;
1177 qc->complete_fn = ata_qc_complete_noop;
1179 spin_lock_irqsave(&ap->host_set->lock, flags);
1180 rc = ata_qc_issue(qc);
1181 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1186 wait_for_completion(&wait);
1188 status = ata_chk_status(ap);
1189 if (status & ATA_ERR) {
1191 * arg! EDD works for all test cases, but seems to return
1192 * the ATA signature for some ATAPI devices. Until the
1193 * reason for this is found and fixed, we fix up the mess
1194 * here. If IDENTIFY DEVICE returns command aborted
1195 * (as ATAPI devices do), then we issue an
1196 * IDENTIFY PACKET DEVICE.
1198 * ATA software reset (SRST, the default) does not appear
1199 * to have this problem.
1201 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1202 u8 err = ata_chk_err(ap);
1203 if (err & ATA_ABORTED) {
1204 dev->class = ATA_DEV_ATAPI;
1215 swap_buf_le16(dev->id, ATA_ID_WORDS);
1217 /* print device capabilities */
1218 printk(KERN_DEBUG "ata%u: dev %u cfg "
1219 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1220 ap->id, device, dev->id[49],
1221 dev->id[82], dev->id[83], dev->id[84],
1222 dev->id[85], dev->id[86], dev->id[87],
1226 * common ATA, ATAPI feature tests
1229 /* we require DMA support (bits 8 of word 49) */
1230 if (!ata_id_has_dma(dev->id)) {
1231 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1235 /* quick-n-dirty find max transfer mode; for printk only */
1236 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1238 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1240 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1241 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1246 /* ATA-specific feature tests */
1247 if (dev->class == ATA_DEV_ATA) {
1248 if (!ata_id_is_ata(dev->id)) /* sanity check */
1251 /* get major version */
1252 tmp = dev->id[ATA_ID_MAJOR_VER];
1253 for (major_version = 14; major_version >= 1; major_version--)
1254 if (tmp & (1 << major_version))
1258 * The exact sequence expected by certain pre-ATA4 drives is:
1261 * INITIALIZE DEVICE PARAMETERS
1263 * Some drives were very specific about that exact sequence.
1265 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1266 ata_dev_init_params(ap, dev);
1268 if (ata_id_has_lba(dev->id)) {
1269 dev->flags |= ATA_DFLAG_LBA;
1271 if (ata_id_has_lba48(dev->id)) {
1272 dev->flags |= ATA_DFLAG_LBA48;
1273 dev->n_sectors = ata_id_u64(dev->id, 100);
1275 dev->n_sectors = ata_id_u32(dev->id, 60);
1278 /* print device info to dmesg */
1279 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1282 ata_mode_string(xfer_modes),
1283 (unsigned long long)dev->n_sectors,
1284 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1288 /* Default translation */
1289 dev->cylinders = dev->id[1];
1290 dev->heads = dev->id[3];
1291 dev->sectors = dev->id[6];
1292 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1294 if (ata_id_current_chs_valid(dev->id)) {
1295 /* Current CHS translation is valid. */
1296 dev->cylinders = dev->id[54];
1297 dev->heads = dev->id[55];
1298 dev->sectors = dev->id[56];
1300 dev->n_sectors = ata_id_u32(dev->id, 57);
1303 /* print device info to dmesg */
1304 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1307 ata_mode_string(xfer_modes),
1308 (unsigned long long)dev->n_sectors,
1309 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1313 ap->host->max_cmd_len = 16;
1316 /* ATAPI-specific feature tests */
1318 if (ata_id_is_ata(dev->id)) /* sanity check */
1321 rc = atapi_cdb_len(dev->id);
1322 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1323 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1326 ap->cdb_len = (unsigned int) rc;
1327 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1329 /* print device info to dmesg */
1330 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1332 ata_mode_string(xfer_modes));
1335 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1339 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1342 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1343 DPRINTK("EXIT, err\n");
1347 static inline u8 ata_dev_knobble(struct ata_port *ap)
1349 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1353 * ata_dev_config - Run device specific handlers and check for
1354 * SATA->PATA bridges
1361 void ata_dev_config(struct ata_port *ap, unsigned int i)
1363 /* limit bridge transfers to udma5, 200 sectors */
1364 if (ata_dev_knobble(ap)) {
1365 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1366 ap->id, ap->device->devno);
1367 ap->udma_mask &= ATA_UDMA5;
1368 ap->host->max_sectors = ATA_MAX_SECTORS;
1369 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1370 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
1373 if (ap->ops->dev_config)
1374 ap->ops->dev_config(ap, &ap->device[i]);
1378 * ata_bus_probe - Reset and probe ATA bus
1381 * Master ATA bus probing function. Initiates a hardware-dependent
1382 * bus reset, then attempts to identify any devices found on
1386 * PCI/etc. bus probe sem.
1389 * Zero on success, non-zero on error.
1392 static int ata_bus_probe(struct ata_port *ap)
1394 unsigned int i, found = 0;
1396 ap->ops->phy_reset(ap);
1397 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1400 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1401 ata_dev_identify(ap, i);
1402 if (ata_dev_present(&ap->device[i])) {
1404 ata_dev_config(ap,i);
1408 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1409 goto err_out_disable;
1412 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1413 goto err_out_disable;
1418 ap->ops->port_disable(ap);
1424 * ata_port_probe - Mark port as enabled
1425 * @ap: Port for which we indicate enablement
1427 * Modify @ap data structure such that the system
1428 * thinks that the entire port is enabled.
1430 * LOCKING: host_set lock, or some other form of
1434 void ata_port_probe(struct ata_port *ap)
1436 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1440 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1441 * @ap: SATA port associated with target SATA PHY.
1443 * This function issues commands to standard SATA Sxxx
1444 * PHY registers, to wake up the phy (and device), and
1445 * clear any reset condition.
1448 * PCI/etc. bus probe sem.
1451 void __sata_phy_reset(struct ata_port *ap)
1454 unsigned long timeout = jiffies + (HZ * 5);
1456 if (ap->flags & ATA_FLAG_SATA_RESET) {
1457 /* issue phy wake/reset */
1458 scr_write_flush(ap, SCR_CONTROL, 0x301);
1459 /* Couldn't find anything in SATA I/II specs, but
1460 * AHCI-1.1 10.4.2 says at least 1 ms. */
1463 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1465 /* wait for phy to become ready, if necessary */
1468 sstatus = scr_read(ap, SCR_STATUS);
1469 if ((sstatus & 0xf) != 1)
1471 } while (time_before(jiffies, timeout));
1473 /* TODO: phy layer with polling, timeouts, etc. */
1474 if (sata_dev_present(ap))
1477 sstatus = scr_read(ap, SCR_STATUS);
1478 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1480 ata_port_disable(ap);
1483 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1486 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1487 ata_port_disable(ap);
1491 ap->cbl = ATA_CBL_SATA;
1495 * sata_phy_reset - Reset SATA bus.
1496 * @ap: SATA port associated with target SATA PHY.
1498 * This function resets the SATA bus, and then probes
1499 * the bus for devices.
1502 * PCI/etc. bus probe sem.
1505 void sata_phy_reset(struct ata_port *ap)
1507 __sata_phy_reset(ap);
1508 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1514 * ata_port_disable - Disable port.
1515 * @ap: Port to be disabled.
1517 * Modify @ap data structure such that the system
1518 * thinks that the entire port is disabled, and should
1519 * never attempt to probe or communicate with devices
1522 * LOCKING: host_set lock, or some other form of
1526 void ata_port_disable(struct ata_port *ap)
1528 ap->device[0].class = ATA_DEV_NONE;
1529 ap->device[1].class = ATA_DEV_NONE;
1530 ap->flags |= ATA_FLAG_PORT_DISABLED;
1536 } xfer_mode_classes[] = {
1537 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1538 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1539 { ATA_SHIFT_PIO, XFER_PIO_0 },
1542 static inline u8 base_from_shift(unsigned int shift)
1546 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1547 if (xfer_mode_classes[i].shift == shift)
1548 return xfer_mode_classes[i].base;
1553 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1558 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1561 if (dev->xfer_shift == ATA_SHIFT_PIO)
1562 dev->flags |= ATA_DFLAG_PIO;
1564 ata_dev_set_xfermode(ap, dev);
1566 base = base_from_shift(dev->xfer_shift);
1567 ofs = dev->xfer_mode - base;
1568 idx = ofs + dev->xfer_shift;
1569 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1571 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1572 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1574 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1575 ap->id, dev->devno, xfer_mode_str[idx]);
1578 static int ata_host_set_pio(struct ata_port *ap)
1584 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1587 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1591 base = base_from_shift(ATA_SHIFT_PIO);
1592 xfer_mode = base + x;
1594 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1595 (int)base, (int)xfer_mode, mask, x);
1597 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1598 struct ata_device *dev = &ap->device[i];
1599 if (ata_dev_present(dev)) {
1600 dev->pio_mode = xfer_mode;
1601 dev->xfer_mode = xfer_mode;
1602 dev->xfer_shift = ATA_SHIFT_PIO;
1603 if (ap->ops->set_piomode)
1604 ap->ops->set_piomode(ap, dev);
1611 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1612 unsigned int xfer_shift)
1616 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1617 struct ata_device *dev = &ap->device[i];
1618 if (ata_dev_present(dev)) {
1619 dev->dma_mode = xfer_mode;
1620 dev->xfer_mode = xfer_mode;
1621 dev->xfer_shift = xfer_shift;
1622 if (ap->ops->set_dmamode)
1623 ap->ops->set_dmamode(ap, dev);
1629 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1630 * @ap: port on which timings will be programmed
1632 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1635 * PCI/etc. bus probe sem.
1638 static void ata_set_mode(struct ata_port *ap)
1640 unsigned int i, xfer_shift;
1644 /* step 1: always set host PIO timings */
1645 rc = ata_host_set_pio(ap);
1649 /* step 2: choose the best data xfer mode */
1650 xfer_mode = xfer_shift = 0;
1651 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1655 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1656 if (xfer_shift != ATA_SHIFT_PIO)
1657 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1659 /* step 4: update devices' xfer mode */
1660 ata_dev_set_mode(ap, &ap->device[0]);
1661 ata_dev_set_mode(ap, &ap->device[1]);
1663 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1666 if (ap->ops->post_set_mode)
1667 ap->ops->post_set_mode(ap);
1669 for (i = 0; i < 2; i++) {
1670 struct ata_device *dev = &ap->device[i];
1671 ata_dev_set_protocol(dev);
1677 ata_port_disable(ap);
1681 * ata_busy_sleep - sleep until BSY clears, or timeout
1682 * @ap: port containing status register to be polled
1683 * @tmout_pat: impatience timeout
1684 * @tmout: overall timeout
1686 * Sleep until ATA Status register bit BSY clears,
1687 * or a timeout occurs.
1693 static unsigned int ata_busy_sleep (struct ata_port *ap,
1694 unsigned long tmout_pat,
1695 unsigned long tmout)
1697 unsigned long timer_start, timeout;
1700 status = ata_busy_wait(ap, ATA_BUSY, 300);
1701 timer_start = jiffies;
1702 timeout = timer_start + tmout_pat;
1703 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1705 status = ata_busy_wait(ap, ATA_BUSY, 3);
1708 if (status & ATA_BUSY)
1709 printk(KERN_WARNING "ata%u is slow to respond, "
1710 "please be patient\n", ap->id);
1712 timeout = timer_start + tmout;
1713 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1715 status = ata_chk_status(ap);
1718 if (status & ATA_BUSY) {
1719 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1720 ap->id, tmout / HZ);
1727 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1729 struct ata_ioports *ioaddr = &ap->ioaddr;
1730 unsigned int dev0 = devmask & (1 << 0);
1731 unsigned int dev1 = devmask & (1 << 1);
1732 unsigned long timeout;
1734 /* if device 0 was found in ata_devchk, wait for its
1738 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1740 /* if device 1 was found in ata_devchk, wait for
1741 * register access, then wait for BSY to clear
1743 timeout = jiffies + ATA_TMOUT_BOOT;
1747 ap->ops->dev_select(ap, 1);
1748 if (ap->flags & ATA_FLAG_MMIO) {
1749 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1750 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1752 nsect = inb(ioaddr->nsect_addr);
1753 lbal = inb(ioaddr->lbal_addr);
1755 if ((nsect == 1) && (lbal == 1))
1757 if (time_after(jiffies, timeout)) {
1761 msleep(50); /* give drive a breather */
1764 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1766 /* is all this really necessary? */
1767 ap->ops->dev_select(ap, 0);
1769 ap->ops->dev_select(ap, 1);
1771 ap->ops->dev_select(ap, 0);
1775 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1776 * @ap: Port to reset and probe
1778 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1779 * probe the bus. Not often used these days.
1782 * PCI/etc. bus probe sem.
1786 static unsigned int ata_bus_edd(struct ata_port *ap)
1788 struct ata_taskfile tf;
1790 /* set up execute-device-diag (bus reset) taskfile */
1791 /* also, take interrupts to a known state (disabled) */
1792 DPRINTK("execute-device-diag\n");
1793 ata_tf_init(ap, &tf, 0);
1795 tf.command = ATA_CMD_EDD;
1796 tf.protocol = ATA_PROT_NODATA;
1799 ata_tf_to_host(ap, &tf);
1801 /* spec says at least 2ms. but who knows with those
1802 * crazy ATAPI devices...
1806 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1809 static unsigned int ata_bus_softreset(struct ata_port *ap,
1810 unsigned int devmask)
1812 struct ata_ioports *ioaddr = &ap->ioaddr;
1814 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1816 /* software reset. causes dev0 to be selected */
1817 if (ap->flags & ATA_FLAG_MMIO) {
1818 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1819 udelay(20); /* FIXME: flush */
1820 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1821 udelay(20); /* FIXME: flush */
1822 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1824 outb(ap->ctl, ioaddr->ctl_addr);
1826 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1828 outb(ap->ctl, ioaddr->ctl_addr);
1831 /* spec mandates ">= 2ms" before checking status.
1832 * We wait 150ms, because that was the magic delay used for
1833 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1834 * between when the ATA command register is written, and then
1835 * status is checked. Because waiting for "a while" before
1836 * checking status is fine, post SRST, we perform this magic
1837 * delay here as well.
1841 ata_bus_post_reset(ap, devmask);
1847 * ata_bus_reset - reset host port and associated ATA channel
1848 * @ap: port to reset
1850 * This is typically the first time we actually start issuing
1851 * commands to the ATA channel. We wait for BSY to clear, then
1852 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1853 * result. Determine what devices, if any, are on the channel
1854 * by looking at the device 0/1 error register. Look at the signature
1855 * stored in each device's taskfile registers, to determine if
1856 * the device is ATA or ATAPI.
1859 * PCI/etc. bus probe sem.
1860 * Obtains host_set lock.
1863 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1866 void ata_bus_reset(struct ata_port *ap)
1868 struct ata_ioports *ioaddr = &ap->ioaddr;
1869 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1871 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1873 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1875 /* determine if device 0/1 are present */
1876 if (ap->flags & ATA_FLAG_SATA_RESET)
1879 dev0 = ata_devchk(ap, 0);
1881 dev1 = ata_devchk(ap, 1);
1885 devmask |= (1 << 0);
1887 devmask |= (1 << 1);
1889 /* select device 0 again */
1890 ap->ops->dev_select(ap, 0);
1892 /* issue bus reset */
1893 if (ap->flags & ATA_FLAG_SRST)
1894 rc = ata_bus_softreset(ap, devmask);
1895 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1896 /* set up device control */
1897 if (ap->flags & ATA_FLAG_MMIO)
1898 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1900 outb(ap->ctl, ioaddr->ctl_addr);
1901 rc = ata_bus_edd(ap);
1908 * determine by signature whether we have ATA or ATAPI devices
1910 err = ata_dev_try_classify(ap, 0);
1911 if ((slave_possible) && (err != 0x81))
1912 ata_dev_try_classify(ap, 1);
1914 /* re-enable interrupts */
1915 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1918 /* is double-select really necessary? */
1919 if (ap->device[1].class != ATA_DEV_NONE)
1920 ap->ops->dev_select(ap, 1);
1921 if (ap->device[0].class != ATA_DEV_NONE)
1922 ap->ops->dev_select(ap, 0);
1924 /* if no devices were detected, disable this port */
1925 if ((ap->device[0].class == ATA_DEV_NONE) &&
1926 (ap->device[1].class == ATA_DEV_NONE))
1929 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1930 /* set up device control for ATA_FLAG_SATA_RESET */
1931 if (ap->flags & ATA_FLAG_MMIO)
1932 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1934 outb(ap->ctl, ioaddr->ctl_addr);
1941 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1942 ap->ops->port_disable(ap);
1947 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1949 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1950 ap->id, dev->devno);
1953 static const char * ata_dma_blacklist [] = {
1972 "Toshiba CD-ROM XM-6202B",
1973 "TOSHIBA CD-ROM XM-1702BC",
1975 "E-IDE CD-ROM CR-840",
1978 "SAMSUNG CD-ROM SC-148C",
1979 "SAMSUNG CD-ROM SC",
1981 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1985 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1987 unsigned char model_num[40];
1992 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1995 len = strnlen(s, sizeof(model_num));
1997 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1998 while ((len > 0) && (s[len - 1] == ' ')) {
2003 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2004 if (!strncmp(ata_dma_blacklist[i], s, len))
2010 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
2012 struct ata_device *master, *slave;
2015 master = &ap->device[0];
2016 slave = &ap->device[1];
2018 assert (ata_dev_present(master) || ata_dev_present(slave));
2020 if (shift == ATA_SHIFT_UDMA) {
2021 mask = ap->udma_mask;
2022 if (ata_dev_present(master)) {
2023 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2024 if (ata_dma_blacklisted(ap, master)) {
2026 ata_pr_blacklisted(ap, master);
2029 if (ata_dev_present(slave)) {
2030 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2031 if (ata_dma_blacklisted(ap, slave)) {
2033 ata_pr_blacklisted(ap, slave);
2037 else if (shift == ATA_SHIFT_MWDMA) {
2038 mask = ap->mwdma_mask;
2039 if (ata_dev_present(master)) {
2040 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2041 if (ata_dma_blacklisted(ap, master)) {
2043 ata_pr_blacklisted(ap, master);
2046 if (ata_dev_present(slave)) {
2047 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2048 if (ata_dma_blacklisted(ap, slave)) {
2050 ata_pr_blacklisted(ap, slave);
2054 else if (shift == ATA_SHIFT_PIO) {
2055 mask = ap->pio_mask;
2056 if (ata_dev_present(master)) {
2057 /* spec doesn't return explicit support for
2058 * PIO0-2, so we fake it
2060 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2065 if (ata_dev_present(slave)) {
2066 /* spec doesn't return explicit support for
2067 * PIO0-2, so we fake it
2069 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2076 mask = 0xffffffff; /* shut up compiler warning */
2083 /* find greatest bit */
2084 static int fgb(u32 bitmap)
2089 for (i = 0; i < 32; i++)
2090 if (bitmap & (1 << i))
2097 * ata_choose_xfer_mode - attempt to find best transfer mode
2098 * @ap: Port for which an xfer mode will be selected
2099 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2100 * @xfer_shift_out: (output) bit shift that selects this mode
2102 * Based on host and device capabilities, determine the
2103 * maximum transfer mode that is amenable to all.
2106 * PCI/etc. bus probe sem.
2109 * Zero on success, negative on error.
2112 static int ata_choose_xfer_mode(struct ata_port *ap,
2114 unsigned int *xfer_shift_out)
2116 unsigned int mask, shift;
2119 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2120 shift = xfer_mode_classes[i].shift;
2121 mask = ata_get_mode_mask(ap, shift);
2125 *xfer_mode_out = xfer_mode_classes[i].base + x;
2126 *xfer_shift_out = shift;
2135 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2136 * @ap: Port associated with device @dev
2137 * @dev: Device to which command will be sent
2139 * Issue SET FEATURES - XFER MODE command to device @dev
2143 * PCI/etc. bus probe sem.
2146 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2148 DECLARE_COMPLETION(wait);
2149 struct ata_queued_cmd *qc;
2151 unsigned long flags;
2153 /* set up set-features taskfile */
2154 DPRINTK("set features - xfer mode\n");
2156 qc = ata_qc_new_init(ap, dev);
2159 qc->tf.command = ATA_CMD_SET_FEATURES;
2160 qc->tf.feature = SETFEATURES_XFER;
2161 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2162 qc->tf.protocol = ATA_PROT_NODATA;
2163 qc->tf.nsect = dev->xfer_mode;
2165 qc->waiting = &wait;
2166 qc->complete_fn = ata_qc_complete_noop;
2168 spin_lock_irqsave(&ap->host_set->lock, flags);
2169 rc = ata_qc_issue(qc);
2170 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2173 ata_port_disable(ap);
2175 wait_for_completion(&wait);
2181 * ata_dev_init_params - Issue INIT DEV PARAMS command
2182 * @ap: Port associated with device @dev
2183 * @dev: Device to which command will be sent
2188 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2190 DECLARE_COMPLETION(wait);
2191 struct ata_queued_cmd *qc;
2193 unsigned long flags;
2194 u16 sectors = dev->id[6];
2195 u16 heads = dev->id[3];
2197 /* Number of sectors per track 1-255. Number of heads 1-16 */
2198 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2201 /* set up init dev params taskfile */
2202 DPRINTK("init dev params \n");
2204 qc = ata_qc_new_init(ap, dev);
2207 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2208 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2209 qc->tf.protocol = ATA_PROT_NODATA;
2210 qc->tf.nsect = sectors;
2211 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2213 qc->waiting = &wait;
2214 qc->complete_fn = ata_qc_complete_noop;
2216 spin_lock_irqsave(&ap->host_set->lock, flags);
2217 rc = ata_qc_issue(qc);
2218 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2221 ata_port_disable(ap);
2223 wait_for_completion(&wait);
2229 * ata_sg_clean - Unmap DMA memory associated with command
2230 * @qc: Command containing DMA memory to be released
2232 * Unmap all mapped DMA memory associated with this command.
2235 * spin_lock_irqsave(host_set lock)
2238 static void ata_sg_clean(struct ata_queued_cmd *qc)
2240 struct ata_port *ap = qc->ap;
2241 struct scatterlist *sg = qc->sg;
2242 int dir = qc->dma_dir;
2244 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2247 if (qc->flags & ATA_QCFLAG_SINGLE)
2248 assert(qc->n_elem == 1);
2250 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2252 if (qc->flags & ATA_QCFLAG_SG)
2253 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2255 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2256 sg_dma_len(&sg[0]), dir);
2258 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2263 * ata_fill_sg - Fill PCI IDE PRD table
2264 * @qc: Metadata associated with taskfile to be transferred
2266 * Fill PCI IDE PRD (scatter-gather) table with segments
2267 * associated with the current disk command.
2270 * spin_lock_irqsave(host_set lock)
2273 static void ata_fill_sg(struct ata_queued_cmd *qc)
2275 struct scatterlist *sg = qc->sg;
2276 struct ata_port *ap = qc->ap;
2277 unsigned int idx, nelem;
2280 assert(qc->n_elem > 0);
2283 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2287 /* determine if physical DMA addr spans 64K boundary.
2288 * Note h/w doesn't support 64-bit, so we unconditionally
2289 * truncate dma_addr_t to u32.
2291 addr = (u32) sg_dma_address(sg);
2292 sg_len = sg_dma_len(sg);
2295 offset = addr & 0xffff;
2297 if ((offset + sg_len) > 0x10000)
2298 len = 0x10000 - offset;
2300 ap->prd[idx].addr = cpu_to_le32(addr);
2301 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2302 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2311 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2314 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2315 * @qc: Metadata associated with taskfile to check
2317 * Allow low-level driver to filter ATA PACKET commands, returning
2318 * a status indicating whether or not it is OK to use DMA for the
2319 * supplied PACKET command.
2322 * spin_lock_irqsave(host_set lock)
2324 * RETURNS: 0 when ATAPI DMA can be used
2327 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2329 struct ata_port *ap = qc->ap;
2330 int rc = 0; /* Assume ATAPI DMA is OK by default */
2332 if (ap->ops->check_atapi_dma)
2333 rc = ap->ops->check_atapi_dma(qc);
2338 * ata_qc_prep - Prepare taskfile for submission
2339 * @qc: Metadata associated with taskfile to be prepared
2341 * Prepare ATA taskfile for submission.
2344 * spin_lock_irqsave(host_set lock)
2346 void ata_qc_prep(struct ata_queued_cmd *qc)
2348 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2355 * ata_sg_init_one - Associate command with memory buffer
2356 * @qc: Command to be associated
2357 * @buf: Memory buffer
2358 * @buflen: Length of memory buffer, in bytes.
2360 * Initialize the data-related elements of queued_cmd @qc
2361 * to point to a single memory buffer, @buf of byte length @buflen.
2364 * spin_lock_irqsave(host_set lock)
2367 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2369 struct scatterlist *sg;
2371 qc->flags |= ATA_QCFLAG_SINGLE;
2373 memset(&qc->sgent, 0, sizeof(qc->sgent));
2374 qc->sg = &qc->sgent;
2379 sg->page = virt_to_page(buf);
2380 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2381 sg->length = buflen;
2385 * ata_sg_init - Associate command with scatter-gather table.
2386 * @qc: Command to be associated
2387 * @sg: Scatter-gather table.
2388 * @n_elem: Number of elements in s/g table.
2390 * Initialize the data-related elements of queued_cmd @qc
2391 * to point to a scatter-gather table @sg, containing @n_elem
2395 * spin_lock_irqsave(host_set lock)
2398 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2399 unsigned int n_elem)
2401 qc->flags |= ATA_QCFLAG_SG;
2403 qc->n_elem = n_elem;
2407 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2408 * @qc: Command with memory buffer to be mapped.
2410 * DMA-map the memory buffer associated with queued_cmd @qc.
2413 * spin_lock_irqsave(host_set lock)
2416 * Zero on success, negative on error.
2419 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2421 struct ata_port *ap = qc->ap;
2422 int dir = qc->dma_dir;
2423 struct scatterlist *sg = qc->sg;
2424 dma_addr_t dma_address;
2426 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2428 if (dma_mapping_error(dma_address))
2431 sg_dma_address(sg) = dma_address;
2432 sg_dma_len(sg) = sg->length;
2434 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2435 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2441 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2442 * @qc: Command with scatter-gather table to be mapped.
2444 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2447 * spin_lock_irqsave(host_set lock)
2450 * Zero on success, negative on error.
2454 static int ata_sg_setup(struct ata_queued_cmd *qc)
2456 struct ata_port *ap = qc->ap;
2457 struct scatterlist *sg = qc->sg;
2460 VPRINTK("ENTER, ata%u\n", ap->id);
2461 assert(qc->flags & ATA_QCFLAG_SG);
2464 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2468 DPRINTK("%d sg elements mapped\n", n_elem);
2470 qc->n_elem = n_elem;
2476 * ata_poll_qc_complete - turn irq back on and finish qc
2477 * @qc: Command to complete
2478 * @drv_stat: ATA status register content
2481 * None. (grabs host lock)
2484 void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2486 struct ata_port *ap = qc->ap;
2487 unsigned long flags;
2489 spin_lock_irqsave(&ap->host_set->lock, flags);
2490 ap->flags &= ~ATA_FLAG_NOINTR;
2492 ata_qc_complete(qc, drv_stat);
2493 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2501 * None. (executing in kernel thread context)
2507 static unsigned long ata_pio_poll(struct ata_port *ap)
2510 unsigned int poll_state = PIO_ST_UNKNOWN;
2511 unsigned int reg_state = PIO_ST_UNKNOWN;
2512 const unsigned int tmout_state = PIO_ST_TMOUT;
2514 switch (ap->pio_task_state) {
2517 poll_state = PIO_ST_POLL;
2521 case PIO_ST_LAST_POLL:
2522 poll_state = PIO_ST_LAST_POLL;
2523 reg_state = PIO_ST_LAST;
2530 status = ata_chk_status(ap);
2531 if (status & ATA_BUSY) {
2532 if (time_after(jiffies, ap->pio_task_timeout)) {
2533 ap->pio_task_state = tmout_state;
2536 ap->pio_task_state = poll_state;
2537 return ATA_SHORT_PAUSE;
2540 ap->pio_task_state = reg_state;
2545 * ata_pio_complete -
2549 * None. (executing in kernel thread context)
2552 static void ata_pio_complete (struct ata_port *ap)
2554 struct ata_queued_cmd *qc;
2558 * This is purely heuristic. This is a fast path. Sometimes when
2559 * we enter, BSY will be cleared in a chk-status or two. If not,
2560 * the drive is probably seeking or something. Snooze for a couple
2561 * msecs, then chk-status again. If still busy, fall back to
2562 * PIO_ST_POLL state.
2564 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2565 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2567 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2568 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2569 ap->pio_task_state = PIO_ST_LAST_POLL;
2570 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2575 drv_stat = ata_wait_idle(ap);
2576 if (!ata_ok(drv_stat)) {
2577 ap->pio_task_state = PIO_ST_ERR;
2581 qc = ata_qc_from_tag(ap, ap->active_tag);
2584 ap->pio_task_state = PIO_ST_IDLE;
2586 ata_poll_qc_complete(qc, drv_stat);
2592 * @buf: Buffer to swap
2593 * @buf_words: Number of 16-bit words in buffer.
2595 * Swap halves of 16-bit words if needed to convert from
2596 * little-endian byte order to native cpu byte order, or
2601 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2606 for (i = 0; i < buf_words; i++)
2607 buf[i] = le16_to_cpu(buf[i]);
2608 #endif /* __BIG_ENDIAN */
2612 * ata_mmio_data_xfer - Transfer data by MMIO
2613 * @ap: port to read/write
2615 * @buflen: buffer length
2616 * @do_write: read/write
2618 * Transfer data from/to the device data register by MMIO.
2621 * Inherited from caller.
2625 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2626 unsigned int buflen, int write_data)
2629 unsigned int words = buflen >> 1;
2630 u16 *buf16 = (u16 *) buf;
2631 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2633 /* Transfer multiple of 2 bytes */
2635 for (i = 0; i < words; i++)
2636 writew(le16_to_cpu(buf16[i]), mmio);
2638 for (i = 0; i < words; i++)
2639 buf16[i] = cpu_to_le16(readw(mmio));
2642 /* Transfer trailing 1 byte, if any. */
2643 if (unlikely(buflen & 0x01)) {
2644 u16 align_buf[1] = { 0 };
2645 unsigned char *trailing_buf = buf + buflen - 1;
2648 memcpy(align_buf, trailing_buf, 1);
2649 writew(le16_to_cpu(align_buf[0]), mmio);
2651 align_buf[0] = cpu_to_le16(readw(mmio));
2652 memcpy(trailing_buf, align_buf, 1);
2658 * ata_pio_data_xfer - Transfer data by PIO
2659 * @ap: port to read/write
2661 * @buflen: buffer length
2662 * @do_write: read/write
2664 * Transfer data from/to the device data register by PIO.
2667 * Inherited from caller.
2671 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2672 unsigned int buflen, int write_data)
2674 unsigned int words = buflen >> 1;
2676 /* Transfer multiple of 2 bytes */
2678 outsw(ap->ioaddr.data_addr, buf, words);
2680 insw(ap->ioaddr.data_addr, buf, words);
2682 /* Transfer trailing 1 byte, if any. */
2683 if (unlikely(buflen & 0x01)) {
2684 u16 align_buf[1] = { 0 };
2685 unsigned char *trailing_buf = buf + buflen - 1;
2688 memcpy(align_buf, trailing_buf, 1);
2689 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
2691 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
2692 memcpy(trailing_buf, align_buf, 1);
2698 * ata_data_xfer - Transfer data from/to the data register.
2699 * @ap: port to read/write
2701 * @buflen: buffer length
2702 * @do_write: read/write
2704 * Transfer data from/to the device data register.
2707 * Inherited from caller.
2711 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2712 unsigned int buflen, int do_write)
2714 if (ap->flags & ATA_FLAG_MMIO)
2715 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2717 ata_pio_data_xfer(ap, buf, buflen, do_write);
2721 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
2722 * @qc: Command on going
2724 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
2727 * Inherited from caller.
2730 static void ata_pio_sector(struct ata_queued_cmd *qc)
2732 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2733 struct scatterlist *sg = qc->sg;
2734 struct ata_port *ap = qc->ap;
2736 unsigned int offset;
2739 if (qc->cursect == (qc->nsect - 1))
2740 ap->pio_task_state = PIO_ST_LAST;
2742 page = sg[qc->cursg].page;
2743 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2745 /* get the current page and offset */
2746 page = nth_page(page, (offset >> PAGE_SHIFT));
2747 offset %= PAGE_SIZE;
2749 buf = kmap(page) + offset;
2754 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
2759 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2761 /* do the actual data transfer */
2762 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2763 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2769 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
2770 * @qc: Command on going
2771 * @bytes: number of bytes
2773 * Transfer Transfer data from/to the ATAPI device.
2776 * Inherited from caller.
2780 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2782 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2783 struct scatterlist *sg = qc->sg;
2784 struct ata_port *ap = qc->ap;
2787 unsigned int offset, count;
2789 if (qc->curbytes + bytes >= qc->nbytes)
2790 ap->pio_task_state = PIO_ST_LAST;
2793 if (unlikely(qc->cursg >= qc->n_elem)) {
2795 * The end of qc->sg is reached and the device expects
2796 * more data to transfer. In order not to overrun qc->sg
2797 * and fulfill length specified in the byte count register,
2798 * - for read case, discard trailing data from the device
2799 * - for write case, padding zero data to the device
2801 u16 pad_buf[1] = { 0 };
2802 unsigned int words = bytes >> 1;
2805 if (words) /* warning if bytes > 1 */
2806 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
2809 for (i = 0; i < words; i++)
2810 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
2812 ap->pio_task_state = PIO_ST_LAST;
2816 sg = &qc->sg[qc->cursg];
2819 offset = sg->offset + qc->cursg_ofs;
2821 /* get the current page and offset */
2822 page = nth_page(page, (offset >> PAGE_SHIFT));
2823 offset %= PAGE_SIZE;
2825 /* don't overrun current sg */
2826 count = min(sg->length - qc->cursg_ofs, bytes);
2828 /* don't cross page boundaries */
2829 count = min(count, (unsigned int)PAGE_SIZE - offset);
2831 buf = kmap(page) + offset;
2834 qc->curbytes += count;
2835 qc->cursg_ofs += count;
2837 if (qc->cursg_ofs == sg->length) {
2842 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2844 /* do the actual data transfer */
2845 ata_data_xfer(ap, buf, count, do_write);
2854 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
2855 * @qc: Command on going
2857 * Transfer Transfer data from/to the ATAPI device.
2860 * Inherited from caller.
2864 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2866 struct ata_port *ap = qc->ap;
2867 struct ata_device *dev = qc->dev;
2868 unsigned int ireason, bc_lo, bc_hi, bytes;
2869 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2871 ap->ops->tf_read(ap, &qc->tf);
2872 ireason = qc->tf.nsect;
2873 bc_lo = qc->tf.lbam;
2874 bc_hi = qc->tf.lbah;
2875 bytes = (bc_hi << 8) | bc_lo;
2877 /* shall be cleared to zero, indicating xfer of data */
2878 if (ireason & (1 << 0))
2881 /* make sure transfer direction matches expected */
2882 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2883 if (do_write != i_write)
2886 __atapi_pio_bytes(qc, bytes);
2891 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2892 ap->id, dev->devno);
2893 ap->pio_task_state = PIO_ST_ERR;
2901 * None. (executing in kernel thread context)
2904 static void ata_pio_block(struct ata_port *ap)
2906 struct ata_queued_cmd *qc;
2910 * This is purely hueristic. This is a fast path.
2911 * Sometimes when we enter, BSY will be cleared in
2912 * a chk-status or two. If not, the drive is probably seeking
2913 * or something. Snooze for a couple msecs, then
2914 * chk-status again. If still busy, fall back to
2915 * PIO_ST_POLL state.
2917 status = ata_busy_wait(ap, ATA_BUSY, 5);
2918 if (status & ATA_BUSY) {
2920 status = ata_busy_wait(ap, ATA_BUSY, 10);
2921 if (status & ATA_BUSY) {
2922 ap->pio_task_state = PIO_ST_POLL;
2923 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2928 qc = ata_qc_from_tag(ap, ap->active_tag);
2931 if (is_atapi_taskfile(&qc->tf)) {
2932 /* no more data to transfer or unsupported ATAPI command */
2933 if ((status & ATA_DRQ) == 0) {
2934 ap->pio_task_state = PIO_ST_IDLE;
2936 ata_poll_qc_complete(qc, status);
2940 atapi_pio_bytes(qc);
2942 /* handle BSY=0, DRQ=0 as error */
2943 if ((status & ATA_DRQ) == 0) {
2944 ap->pio_task_state = PIO_ST_ERR;
2952 static void ata_pio_error(struct ata_port *ap)
2954 struct ata_queued_cmd *qc;
2957 qc = ata_qc_from_tag(ap, ap->active_tag);
2960 drv_stat = ata_chk_status(ap);
2961 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2964 ap->pio_task_state = PIO_ST_IDLE;
2966 ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
2969 static void ata_pio_task(void *_data)
2971 struct ata_port *ap = _data;
2972 unsigned long timeout = 0;
2974 switch (ap->pio_task_state) {
2983 ata_pio_complete(ap);
2987 case PIO_ST_LAST_POLL:
2988 timeout = ata_pio_poll(ap);
2998 queue_delayed_work(ata_wq, &ap->pio_task,
3001 queue_work(ata_wq, &ap->pio_task);
3004 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
3005 struct scsi_cmnd *cmd)
3007 DECLARE_COMPLETION(wait);
3008 struct ata_queued_cmd *qc;
3009 unsigned long flags;
3012 DPRINTK("ATAPI request sense\n");
3014 qc = ata_qc_new_init(ap, dev);
3017 /* FIXME: is this needed? */
3018 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
3020 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
3021 qc->dma_dir = DMA_FROM_DEVICE;
3023 memset(&qc->cdb, 0, ap->cdb_len);
3024 qc->cdb[0] = REQUEST_SENSE;
3025 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3027 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3028 qc->tf.command = ATA_CMD_PACKET;
3030 qc->tf.protocol = ATA_PROT_ATAPI;
3031 qc->tf.lbam = (8 * 1024) & 0xff;
3032 qc->tf.lbah = (8 * 1024) >> 8;
3033 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
3035 qc->waiting = &wait;
3036 qc->complete_fn = ata_qc_complete_noop;
3038 spin_lock_irqsave(&ap->host_set->lock, flags);
3039 rc = ata_qc_issue(qc);
3040 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3043 ata_port_disable(ap);
3045 wait_for_completion(&wait);
3051 * ata_qc_timeout - Handle timeout of queued command
3052 * @qc: Command that timed out
3054 * Some part of the kernel (currently, only the SCSI layer)
3055 * has noticed that the active command on port @ap has not
3056 * completed after a specified length of time. Handle this
3057 * condition by disabling DMA (if necessary) and completing
3058 * transactions, with error if necessary.
3060 * This also handles the case of the "lost interrupt", where
3061 * for some reason (possibly hardware bug, possibly driver bug)
3062 * an interrupt was not delivered to the driver, even though the
3063 * transaction completed successfully.
3066 * Inherited from SCSI layer (none, can sleep)
3069 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3071 struct ata_port *ap = qc->ap;
3072 struct ata_host_set *host_set = ap->host_set;
3073 struct ata_device *dev = qc->dev;
3074 u8 host_stat = 0, drv_stat;
3075 unsigned long flags;
3079 /* FIXME: doesn't this conflict with timeout handling? */
3080 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
3081 struct scsi_cmnd *cmd = qc->scsicmd;
3083 if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
3085 /* finish completing original command */
3086 spin_lock_irqsave(&host_set->lock, flags);
3087 __ata_qc_complete(qc);
3088 spin_unlock_irqrestore(&host_set->lock, flags);
3090 atapi_request_sense(ap, dev, cmd);
3092 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
3093 scsi_finish_command(cmd);
3099 spin_lock_irqsave(&host_set->lock, flags);
3101 /* hack alert! We cannot use the supplied completion
3102 * function from inside the ->eh_strategy_handler() thread.
3103 * libata is the only user of ->eh_strategy_handler() in
3104 * any kernel, so the default scsi_done() assumes it is
3105 * not being called from the SCSI EH.
3107 qc->scsidone = scsi_finish_command;
3109 switch (qc->tf.protocol) {
3112 case ATA_PROT_ATAPI_DMA:
3113 host_stat = ap->ops->bmdma_status(ap);
3115 /* before we do anything else, clear DMA-Start bit */
3116 ap->ops->bmdma_stop(qc);
3122 drv_stat = ata_chk_status(ap);
3124 /* ack bmdma irq events */
3125 ap->ops->irq_clear(ap);
3127 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3128 ap->id, qc->tf.command, drv_stat, host_stat);
3130 /* complete taskfile transaction */
3131 ata_qc_complete(qc, drv_stat);
3135 spin_unlock_irqrestore(&host_set->lock, flags);
3142 * ata_eng_timeout - Handle timeout of queued command
3143 * @ap: Port on which timed-out command is active
3145 * Some part of the kernel (currently, only the SCSI layer)
3146 * has noticed that the active command on port @ap has not
3147 * completed after a specified length of time. Handle this
3148 * condition by disabling DMA (if necessary) and completing
3149 * transactions, with error if necessary.
3151 * This also handles the case of the "lost interrupt", where
3152 * for some reason (possibly hardware bug, possibly driver bug)
3153 * an interrupt was not delivered to the driver, even though the
3154 * transaction completed successfully.
3157 * Inherited from SCSI layer (none, can sleep)
3160 void ata_eng_timeout(struct ata_port *ap)
3162 struct ata_queued_cmd *qc;
3166 qc = ata_qc_from_tag(ap, ap->active_tag);
3168 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3180 * ata_qc_new - Request an available ATA command, for queueing
3181 * @ap: Port associated with device @dev
3182 * @dev: Device from whom we request an available command structure
3188 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3190 struct ata_queued_cmd *qc = NULL;
3193 for (i = 0; i < ATA_MAX_QUEUE; i++)
3194 if (!test_and_set_bit(i, &ap->qactive)) {
3195 qc = ata_qc_from_tag(ap, i);
3206 * ata_qc_new_init - Request an available ATA command, and initialize it
3207 * @ap: Port associated with device @dev
3208 * @dev: Device from whom we request an available command structure
3214 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3215 struct ata_device *dev)
3217 struct ata_queued_cmd *qc;
3219 qc = ata_qc_new(ap);
3226 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
3228 qc->nbytes = qc->curbytes = 0;
3230 ata_tf_init(ap, &qc->tf, dev->devno);
3232 if (dev->flags & ATA_DFLAG_LBA) {
3233 qc->tf.flags |= ATA_TFLAG_LBA;
3235 if (dev->flags & ATA_DFLAG_LBA48)
3236 qc->tf.flags |= ATA_TFLAG_LBA48;
3243 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3248 static void __ata_qc_complete(struct ata_queued_cmd *qc)
3250 struct ata_port *ap = qc->ap;
3251 unsigned int tag, do_clear = 0;
3255 if (likely(ata_tag_valid(tag))) {
3256 if (tag == ap->active_tag)
3257 ap->active_tag = ATA_TAG_POISON;
3258 qc->tag = ATA_TAG_POISON;
3263 struct completion *waiting = qc->waiting;
3268 if (likely(do_clear))
3269 clear_bit(tag, &ap->qactive);
3273 * ata_qc_free - free unused ata_queued_cmd
3274 * @qc: Command to complete
3276 * Designed to free unused ata_queued_cmd object
3277 * in case something prevents using it.
3280 * spin_lock_irqsave(host_set lock)
3283 void ata_qc_free(struct ata_queued_cmd *qc)
3285 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3286 assert(qc->waiting == NULL); /* nothing should be waiting */
3288 __ata_qc_complete(qc);
3292 * ata_qc_complete - Complete an active ATA command
3293 * @qc: Command to complete
3294 * @drv_stat: ATA Status register contents
3296 * Indicate to the mid and upper layers that an ATA
3297 * command has completed, with either an ok or not-ok status.
3300 * spin_lock_irqsave(host_set lock)
3304 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3308 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3309 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3311 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3314 /* atapi: mark qc as inactive to prevent the interrupt handler
3315 * from completing the command twice later, before the error handler
3316 * is called. (when rc != 0 and atapi request sense is needed)
3318 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3320 /* call completion callback */
3321 rc = qc->complete_fn(qc, drv_stat);
3323 /* if callback indicates not to complete command (non-zero),
3324 * return immediately
3329 __ata_qc_complete(qc);
3334 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3336 struct ata_port *ap = qc->ap;
3338 switch (qc->tf.protocol) {
3340 case ATA_PROT_ATAPI_DMA:
3343 case ATA_PROT_ATAPI:
3345 case ATA_PROT_PIO_MULT:
3346 if (ap->flags & ATA_FLAG_PIO_DMA)
3359 * ata_qc_issue - issue taskfile to device
3360 * @qc: command to issue to device
3362 * Prepare an ATA command to submission to device.
3363 * This includes mapping the data into a DMA-able
3364 * area, filling in the S/G table, and finally
3365 * writing the taskfile to hardware, starting the command.
3368 * spin_lock_irqsave(host_set lock)
3371 * Zero on success, negative on error.
3374 int ata_qc_issue(struct ata_queued_cmd *qc)
3376 struct ata_port *ap = qc->ap;
3378 if (ata_should_dma_map(qc)) {
3379 if (qc->flags & ATA_QCFLAG_SG) {
3380 if (ata_sg_setup(qc))
3382 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3383 if (ata_sg_setup_one(qc))
3387 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3390 ap->ops->qc_prep(qc);
3392 qc->ap->active_tag = qc->tag;
3393 qc->flags |= ATA_QCFLAG_ACTIVE;
3395 return ap->ops->qc_issue(qc);
3403 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3404 * @qc: command to issue to device
3406 * Using various libata functions and hooks, this function
3407 * starts an ATA command. ATA commands are grouped into
3408 * classes called "protocols", and issuing each type of protocol
3409 * is slightly different.
3411 * May be used as the qc_issue() entry in ata_port_operations.
3414 * spin_lock_irqsave(host_set lock)
3417 * Zero on success, negative on error.
3420 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3422 struct ata_port *ap = qc->ap;
3424 ata_dev_select(ap, qc->dev->devno, 1, 0);
3426 switch (qc->tf.protocol) {
3427 case ATA_PROT_NODATA:
3428 ata_tf_to_host_nolock(ap, &qc->tf);
3432 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3433 ap->ops->bmdma_setup(qc); /* set up bmdma */
3434 ap->ops->bmdma_start(qc); /* initiate bmdma */
3437 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */