Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[linux-2.6.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <linux/libata.h>
75 #include <linux/hdreg.h>
76 #include <linux/reboot.h>
77 #include <linux/stringify.h>
78 #include <asm/io.h>
79 #include <asm/irq.h>
80 #include <asm/processor.h>
81 #include <scsi/scsi.h>
82 #include <scsi/scsi_host.h>
83 #include <scsi/scsi_tcq.h>
84 #include <scsi/scsi_eh.h>
85 #include <scsi/scsi_cmnd.h>
86 #include "ipr.h"
87
88 /*
89  *   Global Data
90  */
91 static LIST_HEAD(ipr_ioa_head);
92 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93 static unsigned int ipr_max_speed = 1;
94 static int ipr_testmode = 0;
95 static unsigned int ipr_fastfail = 0;
96 static unsigned int ipr_transop_timeout = 0;
97 static unsigned int ipr_debug = 0;
98 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
99 static unsigned int ipr_dual_ioa_raid = 1;
100 static DEFINE_SPINLOCK(ipr_driver_lock);
101
102 /* This table describes the differences between DMA controller chips */
103 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105                 .mailbox = 0x0042C,
106                 .cache_line_size = 0x20,
107                 {
108                         .set_interrupt_mask_reg = 0x0022C,
109                         .clr_interrupt_mask_reg = 0x00230,
110                         .clr_interrupt_mask_reg32 = 0x00230,
111                         .sense_interrupt_mask_reg = 0x0022C,
112                         .sense_interrupt_mask_reg32 = 0x0022C,
113                         .clr_interrupt_reg = 0x00228,
114                         .clr_interrupt_reg32 = 0x00228,
115                         .sense_interrupt_reg = 0x00224,
116                         .sense_interrupt_reg32 = 0x00224,
117                         .ioarrin_reg = 0x00404,
118                         .sense_uproc_interrupt_reg = 0x00214,
119                         .sense_uproc_interrupt_reg32 = 0x00214,
120                         .set_uproc_interrupt_reg = 0x00214,
121                         .set_uproc_interrupt_reg32 = 0x00214,
122                         .clr_uproc_interrupt_reg = 0x00218,
123                         .clr_uproc_interrupt_reg32 = 0x00218
124                 }
125         },
126         { /* Snipe and Scamp */
127                 .mailbox = 0x0052C,
128                 .cache_line_size = 0x20,
129                 {
130                         .set_interrupt_mask_reg = 0x00288,
131                         .clr_interrupt_mask_reg = 0x0028C,
132                         .clr_interrupt_mask_reg32 = 0x0028C,
133                         .sense_interrupt_mask_reg = 0x00288,
134                         .sense_interrupt_mask_reg32 = 0x00288,
135                         .clr_interrupt_reg = 0x00284,
136                         .clr_interrupt_reg32 = 0x00284,
137                         .sense_interrupt_reg = 0x00280,
138                         .sense_interrupt_reg32 = 0x00280,
139                         .ioarrin_reg = 0x00504,
140                         .sense_uproc_interrupt_reg = 0x00290,
141                         .sense_uproc_interrupt_reg32 = 0x00290,
142                         .set_uproc_interrupt_reg = 0x00290,
143                         .set_uproc_interrupt_reg32 = 0x00290,
144                         .clr_uproc_interrupt_reg = 0x00294,
145                         .clr_uproc_interrupt_reg32 = 0x00294
146                 }
147         },
148         { /* CRoC */
149                 .mailbox = 0x00040,
150                 .cache_line_size = 0x20,
151                 {
152                         .set_interrupt_mask_reg = 0x00010,
153                         .clr_interrupt_mask_reg = 0x00018,
154                         .clr_interrupt_mask_reg32 = 0x0001C,
155                         .sense_interrupt_mask_reg = 0x00010,
156                         .sense_interrupt_mask_reg32 = 0x00014,
157                         .clr_interrupt_reg = 0x00008,
158                         .clr_interrupt_reg32 = 0x0000C,
159                         .sense_interrupt_reg = 0x00000,
160                         .sense_interrupt_reg32 = 0x00004,
161                         .ioarrin_reg = 0x00070,
162                         .sense_uproc_interrupt_reg = 0x00020,
163                         .sense_uproc_interrupt_reg32 = 0x00024,
164                         .set_uproc_interrupt_reg = 0x00020,
165                         .set_uproc_interrupt_reg32 = 0x00024,
166                         .clr_uproc_interrupt_reg = 0x00028,
167                         .clr_uproc_interrupt_reg32 = 0x0002C,
168                         .init_feedback_reg = 0x0005C,
169                         .dump_addr_reg = 0x00064,
170                         .dump_data_reg = 0x00068
171                 }
172         },
173 };
174
175 static const struct ipr_chip_t ipr_chip[] = {
176         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
180         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
181         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
182         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
185 };
186
187 static int ipr_max_bus_speeds [] = {
188         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189 };
190
191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193 module_param_named(max_speed, ipr_max_speed, uint, 0);
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195 module_param_named(log_level, ipr_log_level, uint, 0);
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197 module_param_named(testmode, ipr_testmode, int, 0);
198 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
199 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
200 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
203 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
204 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
205 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207 module_param_named(max_devs, ipr_max_devs, int, 0);
208 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
210 MODULE_LICENSE("GPL");
211 MODULE_VERSION(IPR_DRIVER_VERSION);
212
213 /*  A constant array of IOASCs/URCs/Error Messages */
214 static const
215 struct ipr_error_table_t ipr_error_table[] = {
216         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
217         "8155: An unknown error was received"},
218         {0x00330000, 0, 0,
219         "Soft underlength error"},
220         {0x005A0000, 0, 0,
221         "Command to be cancelled not found"},
222         {0x00808000, 0, 0,
223         "Qualified success"},
224         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
225         "FFFE: Soft device bus error recovered by the IOA"},
226         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
227         "4101: Soft device bus fabric error"},
228         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229         "FFFC: Logical block guard error recovered by the device"},
230         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231         "FFFC: Logical block reference tag error recovered by the device"},
232         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233         "4171: Recovered scatter list tag / sequence number error"},
234         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239         "FFFD: Recovered logical block reference tag error detected by the IOA"},
240         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "FFFD: Logical block guard error recovered by the IOA"},
242         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFF9: Device sector reassign successful"},
244         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFF7: Media error recovered by device rewrite procedures"},
246         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
247         "7001: IOA sector reassignment successful"},
248         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFF9: Soft media error. Sector reassignment recommended"},
250         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
251         "FFF7: Media error recovered by IOA rewrite procedures"},
252         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Soft PCI bus error recovered by the IOA"},
254         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
255         "FFF6: Device hardware error recovered by the IOA"},
256         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF6: Device hardware error recovered by the device"},
258         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
259         "FF3D: Soft IOA error recovered by the IOA"},
260         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFFA: Undefined device response recovered by the IOA"},
262         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
263         "FFF6: Device bus error, message or command phase"},
264         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFFE: Task Management Function failed"},
266         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF6: Failure prediction threshold exceeded"},
268         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
269         "8009: Impending cache battery pack failure"},
270         {0x02040400, 0, 0,
271         "34FF: Disk device format in progress"},
272         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
273         "9070: IOA requested reset"},
274         {0x023F0000, 0, 0,
275         "Synchronization required"},
276         {0x024E0000, 0, 0,
277         "No ready, IOA shutdown"},
278         {0x025A0000, 0, 0,
279         "Not ready, IOA has been shutdown"},
280         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
281         "3020: Storage subsystem configuration error"},
282         {0x03110B00, 0, 0,
283         "FFF5: Medium error, data unreadable, recommend reassign"},
284         {0x03110C00, 0, 0,
285         "7000: Medium error, data unreadable, do not reassign"},
286         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
287         "FFF3: Disk media format bad"},
288         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
289         "3002: Addressed device failed to respond to selection"},
290         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
291         "3100: Device bus error"},
292         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
293         "3109: IOA timed out a device command"},
294         {0x04088000, 0, 0,
295         "3120: SCSI bus is not operational"},
296         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
297         "4100: Hard device bus fabric error"},
298         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299         "310C: Logical block guard error detected by the device"},
300         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301         "310C: Logical block reference tag error detected by the device"},
302         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303         "4170: Scatter list tag / sequence number error"},
304         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305         "8150: Logical block CRC error on IOA to Host transfer"},
306         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307         "4170: Logical block sequence number error on IOA to Host transfer"},
308         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309         "310D: Logical block reference tag error detected by the IOA"},
310         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311         "310D: Logical block guard error detected by the IOA"},
312         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
313         "9000: IOA reserved area data check"},
314         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
315         "9001: IOA reserved area invalid data pattern"},
316         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
317         "9002: IOA reserved area LRC error"},
318         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319         "Hardware Error, IOA metadata access error"},
320         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
321         "102E: Out of alternate sectors for disk storage"},
322         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
323         "FFF4: Data transfer underlength error"},
324         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
325         "FFF4: Data transfer overlength error"},
326         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "3400: Logical unit failure"},
328         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
329         "FFF4: Device microcode is corrupt"},
330         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
331         "8150: PCI bus error"},
332         {0x04430000, 1, 0,
333         "Unsupported device bus message received"},
334         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
335         "FFF4: Disk device problem"},
336         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
337         "8150: Permanent IOA failure"},
338         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
339         "3010: Disk device returned wrong response to IOA"},
340         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
341         "8151: IOA microcode error"},
342         {0x04448500, 0, 0,
343         "Device bus status error"},
344         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
345         "8157: IOA error requiring IOA reset to recover"},
346         {0x04448700, 0, 0,
347         "ATA device status error"},
348         {0x04490000, 0, 0,
349         "Message reject received from the device"},
350         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
351         "8008: A permanent cache battery pack failure occurred"},
352         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
353         "9090: Disk unit has been modified after the last known status"},
354         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
355         "9081: IOA detected device error"},
356         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
357         "9082: IOA detected device error"},
358         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "3110: Device bus error, message or command phase"},
360         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
361         "3110: SAS Command / Task Management Function failed"},
362         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
363         "9091: Incorrect hardware configuration change has been detected"},
364         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
365         "9073: Invalid multi-adapter configuration"},
366         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
367         "4010: Incorrect connection between cascaded expanders"},
368         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
369         "4020: Connections exceed IOA design limits"},
370         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
371         "4030: Incorrect multipath connection"},
372         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
373         "4110: Unsupported enclosure function"},
374         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
375         "FFF4: Command to logical unit failed"},
376         {0x05240000, 1, 0,
377         "Illegal request, invalid request type or request packet"},
378         {0x05250000, 0, 0,
379         "Illegal request, invalid resource handle"},
380         {0x05258000, 0, 0,
381         "Illegal request, commands not allowed to this device"},
382         {0x05258100, 0, 0,
383         "Illegal request, command not allowed to a secondary adapter"},
384         {0x05258200, 0, 0,
385         "Illegal request, command not allowed to a non-optimized resource"},
386         {0x05260000, 0, 0,
387         "Illegal request, invalid field in parameter list"},
388         {0x05260100, 0, 0,
389         "Illegal request, parameter not supported"},
390         {0x05260200, 0, 0,
391         "Illegal request, parameter value invalid"},
392         {0x052C0000, 0, 0,
393         "Illegal request, command sequence error"},
394         {0x052C8000, 1, 0,
395         "Illegal request, dual adapter support not enabled"},
396         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
397         "9031: Array protection temporarily suspended, protection resuming"},
398         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
399         "9040: Array protection temporarily suspended, protection resuming"},
400         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "3140: Device bus not ready to ready transition"},
402         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
403         "FFFB: SCSI bus was reset"},
404         {0x06290500, 0, 0,
405         "FFFE: SCSI bus transition to single ended"},
406         {0x06290600, 0, 0,
407         "FFFE: SCSI bus transition to LVD"},
408         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
409         "FFFB: SCSI bus was reset by another initiator"},
410         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
411         "3029: A device replacement has occurred"},
412         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
413         "9051: IOA cache data exists for a missing or failed device"},
414         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
415         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
416         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
417         "9025: Disk unit is not supported at its physical location"},
418         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
419         "3020: IOA detected a SCSI bus configuration error"},
420         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
421         "3150: SCSI bus configuration error"},
422         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
423         "9074: Asymmetric advanced function disk configuration"},
424         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
425         "4040: Incomplete multipath connection between IOA and enclosure"},
426         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
427         "4041: Incomplete multipath connection between enclosure and device"},
428         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9075: Incomplete multipath connection between IOA and remote IOA"},
430         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9076: Configuration error, missing remote IOA"},
432         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
433         "4050: Enclosure does not support a required multipath function"},
434         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4070: Logically bad block written on device"},
436         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
437         "9041: Array protection temporarily suspended"},
438         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
439         "9042: Corrupt array parity detected on specified device"},
440         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
441         "9030: Array no longer protected due to missing or failed disk unit"},
442         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "9071: Link operational transition"},
444         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
445         "9072: Link not operational transition"},
446         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
447         "9032: Array exposed but still protected"},
448         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
449         "70DD: Device forced failed by disrupt device command"},
450         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4061: Multipath redundancy level got better"},
452         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4060: Multipath redundancy level got worse"},
454         {0x07270000, 0, 0,
455         "Failure due to other device"},
456         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9008: IOA does not support functions expected by devices"},
458         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9010: Cache data associated with attached devices cannot be found"},
460         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
461         "9011: Cache data belongs to devices other than those attached"},
462         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9020: Array missing 2 or more devices with only 1 device present"},
464         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9021: Array missing 2 or more devices with 2 or more devices present"},
466         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
467         "9022: Exposed array is missing a required device"},
468         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9023: Array member(s) not at required physical locations"},
470         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9024: Array not functional due to present hardware configuration"},
472         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9026: Array not functional due to present hardware configuration"},
474         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
475         "9027: Array is missing a device and parity is out of sync"},
476         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
477         "9028: Maximum number of arrays already exist"},
478         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "9050: Required cache data cannot be located for a disk unit"},
480         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9052: Cache data exists for a device that has been modified"},
482         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9054: IOA resources not available due to previous problems"},
484         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9092: Disk unit requires initialization before use"},
486         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9029: Incorrect hardware configuration change has been detected"},
488         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9060: One or more disk pairs are missing from an array"},
490         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9061: One or more disks are missing from an array"},
492         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9062: One or more disks are missing from an array"},
494         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9063: Maximum number of functional arrays has been exceeded"},
496         {0x0B260000, 0, 0,
497         "Aborted command, invalid descriptor"},
498         {0x0B5A0000, 0, 0,
499         "Command terminated by host"}
500 };
501
502 static const struct ipr_ses_table_entry ipr_ses_table[] = {
503         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
504         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
505         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
506         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
507         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
508         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
509         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
510         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
511         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
514         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
515         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
516 };
517
518 /*
519  *  Function Prototypes
520  */
521 static int ipr_reset_alert(struct ipr_cmnd *);
522 static void ipr_process_ccn(struct ipr_cmnd *);
523 static void ipr_process_error(struct ipr_cmnd *);
524 static void ipr_reset_ioa_job(struct ipr_cmnd *);
525 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
526                                    enum ipr_shutdown_type);
527
528 #ifdef CONFIG_SCSI_IPR_TRACE
529 /**
530  * ipr_trc_hook - Add a trace entry to the driver trace
531  * @ipr_cmd:    ipr command struct
532  * @type:               trace type
533  * @add_data:   additional data
534  *
535  * Return value:
536  *      none
537  **/
538 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
539                          u8 type, u32 add_data)
540 {
541         struct ipr_trace_entry *trace_entry;
542         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
543
544         trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
545         trace_entry->time = jiffies;
546         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
547         trace_entry->type = type;
548         if (ipr_cmd->ioa_cfg->sis64)
549                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550         else
551                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
552         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
553         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
554         trace_entry->u.add_data = add_data;
555 }
556 #else
557 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
558 #endif
559
560 /**
561  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
562  * @ipr_cmd:    ipr command struct
563  *
564  * Return value:
565  *      none
566  **/
567 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568 {
569         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
570         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
572         dma_addr_t dma_addr = ipr_cmd->dma_addr;
573
574         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
575         ioarcb->data_transfer_length = 0;
576         ioarcb->read_data_transfer_length = 0;
577         ioarcb->ioadl_len = 0;
578         ioarcb->read_ioadl_len = 0;
579
580         if (ipr_cmd->ioa_cfg->sis64) {
581                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
582                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
583                 ioasa64->u.gata.status = 0;
584         } else {
585                 ioarcb->write_ioadl_addr =
586                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
587                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
588                 ioasa->u.gata.status = 0;
589         }
590
591         ioasa->hdr.ioasc = 0;
592         ioasa->hdr.residual_data_len = 0;
593         ipr_cmd->scsi_cmd = NULL;
594         ipr_cmd->qc = NULL;
595         ipr_cmd->sense_buffer[0] = 0;
596         ipr_cmd->dma_use_sg = 0;
597 }
598
599 /**
600  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
601  * @ipr_cmd:    ipr command struct
602  *
603  * Return value:
604  *      none
605  **/
606 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
607 {
608         ipr_reinit_ipr_cmnd(ipr_cmd);
609         ipr_cmd->u.scratch = 0;
610         ipr_cmd->sibling = NULL;
611         init_timer(&ipr_cmd->timer);
612 }
613
614 /**
615  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
616  * @ioa_cfg:    ioa config struct
617  *
618  * Return value:
619  *      pointer to ipr command struct
620  **/
621 static
622 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
623 {
624         struct ipr_cmnd *ipr_cmd;
625
626         ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
627         list_del(&ipr_cmd->queue);
628         ipr_init_ipr_cmnd(ipr_cmd);
629
630         return ipr_cmd;
631 }
632
633 /**
634  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
635  * @ioa_cfg:    ioa config struct
636  * @clr_ints:     interrupts to clear
637  *
638  * This function masks all interrupts on the adapter, then clears the
639  * interrupts specified in the mask
640  *
641  * Return value:
642  *      none
643  **/
644 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
645                                           u32 clr_ints)
646 {
647         volatile u32 int_reg;
648
649         /* Stop new interrupts */
650         ioa_cfg->allow_interrupts = 0;
651
652         /* Set interrupt mask to stop all new interrupts */
653         if (ioa_cfg->sis64)
654                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
655         else
656                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
657
658         /* Clear any pending interrupts */
659         if (ioa_cfg->sis64)
660                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
661         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
662         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
663 }
664
665 /**
666  * ipr_save_pcix_cmd_reg - Save PCI-X command register
667  * @ioa_cfg:    ioa config struct
668  *
669  * Return value:
670  *      0 on success / -EIO on failure
671  **/
672 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
673 {
674         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
675
676         if (pcix_cmd_reg == 0)
677                 return 0;
678
679         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
680                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
681                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
682                 return -EIO;
683         }
684
685         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
686         return 0;
687 }
688
689 /**
690  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
691  * @ioa_cfg:    ioa config struct
692  *
693  * Return value:
694  *      0 on success / -EIO on failure
695  **/
696 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
697 {
698         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
699
700         if (pcix_cmd_reg) {
701                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
702                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
703                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
704                         return -EIO;
705                 }
706         }
707
708         return 0;
709 }
710
711 /**
712  * ipr_sata_eh_done - done function for aborted SATA commands
713  * @ipr_cmd:    ipr command struct
714  *
715  * This function is invoked for ops generated to SATA
716  * devices which are being aborted.
717  *
718  * Return value:
719  *      none
720  **/
721 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
722 {
723         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
724         struct ata_queued_cmd *qc = ipr_cmd->qc;
725         struct ipr_sata_port *sata_port = qc->ap->private_data;
726
727         qc->err_mask |= AC_ERR_OTHER;
728         sata_port->ioasa.status |= ATA_BUSY;
729         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
730         ata_qc_complete(qc);
731 }
732
733 /**
734  * ipr_scsi_eh_done - mid-layer done function for aborted ops
735  * @ipr_cmd:    ipr command struct
736  *
737  * This function is invoked by the interrupt handler for
738  * ops generated by the SCSI mid-layer which are being aborted.
739  *
740  * Return value:
741  *      none
742  **/
743 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
744 {
745         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
747
748         scsi_cmd->result |= (DID_ERROR << 16);
749
750         scsi_dma_unmap(ipr_cmd->scsi_cmd);
751         scsi_cmd->scsi_done(scsi_cmd);
752         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
753 }
754
755 /**
756  * ipr_fail_all_ops - Fails all outstanding ops.
757  * @ioa_cfg:    ioa config struct
758  *
759  * This function fails all outstanding ops.
760  *
761  * Return value:
762  *      none
763  **/
764 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
765 {
766         struct ipr_cmnd *ipr_cmd, *temp;
767
768         ENTER;
769         list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
770                 list_del(&ipr_cmd->queue);
771
772                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
773                 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
774
775                 if (ipr_cmd->scsi_cmd)
776                         ipr_cmd->done = ipr_scsi_eh_done;
777                 else if (ipr_cmd->qc)
778                         ipr_cmd->done = ipr_sata_eh_done;
779
780                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
781                 del_timer(&ipr_cmd->timer);
782                 ipr_cmd->done(ipr_cmd);
783         }
784
785         LEAVE;
786 }
787
788 /**
789  * ipr_send_command -  Send driver initiated requests.
790  * @ipr_cmd:            ipr command struct
791  *
792  * This function sends a command to the adapter using the correct write call.
793  * In the case of sis64, calculate the ioarcb size required. Then or in the
794  * appropriate bits.
795  *
796  * Return value:
797  *      none
798  **/
799 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
800 {
801         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
802         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
803
804         if (ioa_cfg->sis64) {
805                 /* The default size is 256 bytes */
806                 send_dma_addr |= 0x1;
807
808                 /* If the number of ioadls * size of ioadl > 128 bytes,
809                    then use a 512 byte ioarcb */
810                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
811                         send_dma_addr |= 0x4;
812                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813         } else
814                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
815 }
816
817 /**
818  * ipr_do_req -  Send driver initiated requests.
819  * @ipr_cmd:            ipr command struct
820  * @done:                       done function
821  * @timeout_func:       timeout function
822  * @timeout:            timeout value
823  *
824  * This function sends the specified command to the adapter with the
825  * timeout given. The done function is invoked on command completion.
826  *
827  * Return value:
828  *      none
829  **/
830 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
831                        void (*done) (struct ipr_cmnd *),
832                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
833 {
834         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
835
836         list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
837
838         ipr_cmd->done = done;
839
840         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
841         ipr_cmd->timer.expires = jiffies + timeout;
842         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
843
844         add_timer(&ipr_cmd->timer);
845
846         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
847
848         mb();
849
850         ipr_send_command(ipr_cmd);
851 }
852
853 /**
854  * ipr_internal_cmd_done - Op done function for an internally generated op.
855  * @ipr_cmd:    ipr command struct
856  *
857  * This function is the op done function for an internally generated,
858  * blocking op. It simply wakes the sleeping thread.
859  *
860  * Return value:
861  *      none
862  **/
863 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
864 {
865         if (ipr_cmd->sibling)
866                 ipr_cmd->sibling = NULL;
867         else
868                 complete(&ipr_cmd->completion);
869 }
870
871 /**
872  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
873  * @ipr_cmd:    ipr command struct
874  * @dma_addr:   dma address
875  * @len:        transfer length
876  * @flags:      ioadl flag value
877  *
878  * This function initializes an ioadl in the case where there is only a single
879  * descriptor.
880  *
881  * Return value:
882  *      nothing
883  **/
884 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
885                            u32 len, int flags)
886 {
887         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
888         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
889
890         ipr_cmd->dma_use_sg = 1;
891
892         if (ipr_cmd->ioa_cfg->sis64) {
893                 ioadl64->flags = cpu_to_be32(flags);
894                 ioadl64->data_len = cpu_to_be32(len);
895                 ioadl64->address = cpu_to_be64(dma_addr);
896
897                 ipr_cmd->ioarcb.ioadl_len =
898                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
899                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
900         } else {
901                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
902                 ioadl->address = cpu_to_be32(dma_addr);
903
904                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
905                         ipr_cmd->ioarcb.read_ioadl_len =
906                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
907                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
908                 } else {
909                         ipr_cmd->ioarcb.ioadl_len =
910                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
911                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
912                 }
913         }
914 }
915
916 /**
917  * ipr_send_blocking_cmd - Send command and sleep on its completion.
918  * @ipr_cmd:    ipr command struct
919  * @timeout_func:       function to invoke if command times out
920  * @timeout:    timeout
921  *
922  * Return value:
923  *      none
924  **/
925 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
926                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
927                                   u32 timeout)
928 {
929         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
930
931         init_completion(&ipr_cmd->completion);
932         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
933
934         spin_unlock_irq(ioa_cfg->host->host_lock);
935         wait_for_completion(&ipr_cmd->completion);
936         spin_lock_irq(ioa_cfg->host->host_lock);
937 }
938
939 /**
940  * ipr_send_hcam - Send an HCAM to the adapter.
941  * @ioa_cfg:    ioa config struct
942  * @type:               HCAM type
943  * @hostrcb:    hostrcb struct
944  *
945  * This function will send a Host Controlled Async command to the adapter.
946  * If HCAMs are currently not allowed to be issued to the adapter, it will
947  * place the hostrcb on the free queue.
948  *
949  * Return value:
950  *      none
951  **/
952 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
953                           struct ipr_hostrcb *hostrcb)
954 {
955         struct ipr_cmnd *ipr_cmd;
956         struct ipr_ioarcb *ioarcb;
957
958         if (ioa_cfg->allow_cmds) {
959                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
960                 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
961                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
962
963                 ipr_cmd->u.hostrcb = hostrcb;
964                 ioarcb = &ipr_cmd->ioarcb;
965
966                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
967                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
968                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
969                 ioarcb->cmd_pkt.cdb[1] = type;
970                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
971                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
972
973                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
974                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
975
976                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
977                         ipr_cmd->done = ipr_process_ccn;
978                 else
979                         ipr_cmd->done = ipr_process_error;
980
981                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
982
983                 mb();
984
985                 ipr_send_command(ipr_cmd);
986         } else {
987                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
988         }
989 }
990
991 /**
992  * ipr_update_ata_class - Update the ata class in the resource entry
993  * @res:        resource entry struct
994  * @proto:      cfgte device bus protocol value
995  *
996  * Return value:
997  *      none
998  **/
999 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1000 {
1001         switch(proto) {
1002         case IPR_PROTO_SATA:
1003         case IPR_PROTO_SAS_STP:
1004                 res->ata_class = ATA_DEV_ATA;
1005                 break;
1006         case IPR_PROTO_SATA_ATAPI:
1007         case IPR_PROTO_SAS_STP_ATAPI:
1008                 res->ata_class = ATA_DEV_ATAPI;
1009                 break;
1010         default:
1011                 res->ata_class = ATA_DEV_UNKNOWN;
1012                 break;
1013         };
1014 }
1015
1016 /**
1017  * ipr_init_res_entry - Initialize a resource entry struct.
1018  * @res:        resource entry struct
1019  * @cfgtew:     config table entry wrapper struct
1020  *
1021  * Return value:
1022  *      none
1023  **/
1024 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1025                                struct ipr_config_table_entry_wrapper *cfgtew)
1026 {
1027         int found = 0;
1028         unsigned int proto;
1029         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1030         struct ipr_resource_entry *gscsi_res = NULL;
1031
1032         res->needs_sync_complete = 0;
1033         res->in_erp = 0;
1034         res->add_to_ml = 0;
1035         res->del_from_ml = 0;
1036         res->resetting_device = 0;
1037         res->sdev = NULL;
1038         res->sata_port = NULL;
1039
1040         if (ioa_cfg->sis64) {
1041                 proto = cfgtew->u.cfgte64->proto;
1042                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1043                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1044                 res->type = cfgtew->u.cfgte64->res_type;
1045
1046                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1047                         sizeof(res->res_path));
1048
1049                 res->bus = 0;
1050                 res->lun = scsilun_to_int(&res->dev_lun);
1051
1052                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1053                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1054                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1055                                         found = 1;
1056                                         res->target = gscsi_res->target;
1057                                         break;
1058                                 }
1059                         }
1060                         if (!found) {
1061                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1062                                                                   ioa_cfg->max_devs_supported);
1063                                 set_bit(res->target, ioa_cfg->target_ids);
1064                         }
1065
1066                         memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1067                                 sizeof(res->dev_lun.scsi_lun));
1068                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1069                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070                         res->target = 0;
1071                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1072                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074                                                           ioa_cfg->max_devs_supported);
1075                         set_bit(res->target, ioa_cfg->array_ids);
1076                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077                         res->bus = IPR_VSET_VIRTUAL_BUS;
1078                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079                                                           ioa_cfg->max_devs_supported);
1080                         set_bit(res->target, ioa_cfg->vset_ids);
1081                 } else {
1082                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083                                                           ioa_cfg->max_devs_supported);
1084                         set_bit(res->target, ioa_cfg->target_ids);
1085                 }
1086         } else {
1087                 proto = cfgtew->u.cfgte->proto;
1088                 res->qmodel = IPR_QUEUEING_MODEL(res);
1089                 res->flags = cfgtew->u.cfgte->flags;
1090                 if (res->flags & IPR_IS_IOA_RESOURCE)
1091                         res->type = IPR_RES_TYPE_IOAFP;
1092                 else
1093                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094
1095                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1096                 res->target = cfgtew->u.cfgte->res_addr.target;
1097                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1098         }
1099
1100         ipr_update_ata_class(res, proto);
1101 }
1102
1103 /**
1104  * ipr_is_same_device - Determine if two devices are the same.
1105  * @res:        resource entry struct
1106  * @cfgtew:     config table entry wrapper struct
1107  *
1108  * Return value:
1109  *      1 if the devices are the same / 0 otherwise
1110  **/
1111 static int ipr_is_same_device(struct ipr_resource_entry *res,
1112                               struct ipr_config_table_entry_wrapper *cfgtew)
1113 {
1114         if (res->ioa_cfg->sis64) {
1115                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1116                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1117                         !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1118                                         sizeof(cfgtew->u.cfgte64->lun))) {
1119                         return 1;
1120                 }
1121         } else {
1122                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1123                     res->target == cfgtew->u.cfgte->res_addr.target &&
1124                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1125                         return 1;
1126         }
1127
1128         return 0;
1129 }
1130
1131 /**
1132  * ipr_format_res_path - Format the resource path for printing.
1133  * @res_path:   resource path
1134  * @buf:        buffer
1135  *
1136  * Return value:
1137  *      pointer to buffer
1138  **/
1139 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1140 {
1141         int i;
1142         char *p = buffer;
1143
1144         res_path[0] = '\0';
1145         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1146         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1147                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1148
1149         return buffer;
1150 }
1151
1152 /**
1153  * ipr_update_res_entry - Update the resource entry.
1154  * @res:        resource entry struct
1155  * @cfgtew:     config table entry wrapper struct
1156  *
1157  * Return value:
1158  *      none
1159  **/
1160 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1161                                  struct ipr_config_table_entry_wrapper *cfgtew)
1162 {
1163         char buffer[IPR_MAX_RES_PATH_LENGTH];
1164         unsigned int proto;
1165         int new_path = 0;
1166
1167         if (res->ioa_cfg->sis64) {
1168                 res->flags = cfgtew->u.cfgte64->flags;
1169                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1170                 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1171
1172                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1173                         sizeof(struct ipr_std_inq_data));
1174
1175                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1176                 proto = cfgtew->u.cfgte64->proto;
1177                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1178                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1179
1180                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1181                         sizeof(res->dev_lun.scsi_lun));
1182
1183                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1184                                         sizeof(res->res_path))) {
1185                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1186                                 sizeof(res->res_path));
1187                         new_path = 1;
1188                 }
1189
1190                 if (res->sdev && new_path)
1191                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1192                                     ipr_format_res_path(res->res_path, buffer,
1193                                                         sizeof(buffer)));
1194         } else {
1195                 res->flags = cfgtew->u.cfgte->flags;
1196                 if (res->flags & IPR_IS_IOA_RESOURCE)
1197                         res->type = IPR_RES_TYPE_IOAFP;
1198                 else
1199                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1200
1201                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1202                         sizeof(struct ipr_std_inq_data));
1203
1204                 res->qmodel = IPR_QUEUEING_MODEL(res);
1205                 proto = cfgtew->u.cfgte->proto;
1206                 res->res_handle = cfgtew->u.cfgte->res_handle;
1207         }
1208
1209         ipr_update_ata_class(res, proto);
1210 }
1211
1212 /**
1213  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1214  *                        for the resource.
1215  * @res:        resource entry struct
1216  * @cfgtew:     config table entry wrapper struct
1217  *
1218  * Return value:
1219  *      none
1220  **/
1221 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1222 {
1223         struct ipr_resource_entry *gscsi_res = NULL;
1224         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1225
1226         if (!ioa_cfg->sis64)
1227                 return;
1228
1229         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1230                 clear_bit(res->target, ioa_cfg->array_ids);
1231         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1232                 clear_bit(res->target, ioa_cfg->vset_ids);
1233         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1234                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1235                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1236                                 return;
1237                 clear_bit(res->target, ioa_cfg->target_ids);
1238
1239         } else if (res->bus == 0)
1240                 clear_bit(res->target, ioa_cfg->target_ids);
1241 }
1242
1243 /**
1244  * ipr_handle_config_change - Handle a config change from the adapter
1245  * @ioa_cfg:    ioa config struct
1246  * @hostrcb:    hostrcb
1247  *
1248  * Return value:
1249  *      none
1250  **/
1251 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1252                                      struct ipr_hostrcb *hostrcb)
1253 {
1254         struct ipr_resource_entry *res = NULL;
1255         struct ipr_config_table_entry_wrapper cfgtew;
1256         __be32 cc_res_handle;
1257
1258         u32 is_ndn = 1;
1259
1260         if (ioa_cfg->sis64) {
1261                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1262                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1263         } else {
1264                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1265                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1266         }
1267
1268         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1269                 if (res->res_handle == cc_res_handle) {
1270                         is_ndn = 0;
1271                         break;
1272                 }
1273         }
1274
1275         if (is_ndn) {
1276                 if (list_empty(&ioa_cfg->free_res_q)) {
1277                         ipr_send_hcam(ioa_cfg,
1278                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1279                                       hostrcb);
1280                         return;
1281                 }
1282
1283                 res = list_entry(ioa_cfg->free_res_q.next,
1284                                  struct ipr_resource_entry, queue);
1285
1286                 list_del(&res->queue);
1287                 ipr_init_res_entry(res, &cfgtew);
1288                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1289         }
1290
1291         ipr_update_res_entry(res, &cfgtew);
1292
1293         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1294                 if (res->sdev) {
1295                         res->del_from_ml = 1;
1296                         res->res_handle = IPR_INVALID_RES_HANDLE;
1297                         if (ioa_cfg->allow_ml_add_del)
1298                                 schedule_work(&ioa_cfg->work_q);
1299                 } else {
1300                         ipr_clear_res_target(res);
1301                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1302                 }
1303         } else if (!res->sdev) {
1304                 res->add_to_ml = 1;
1305                 if (ioa_cfg->allow_ml_add_del)
1306                         schedule_work(&ioa_cfg->work_q);
1307         }
1308
1309         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1310 }
1311
1312 /**
1313  * ipr_process_ccn - Op done function for a CCN.
1314  * @ipr_cmd:    ipr command struct
1315  *
1316  * This function is the op done function for a configuration
1317  * change notification host controlled async from the adapter.
1318  *
1319  * Return value:
1320  *      none
1321  **/
1322 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1323 {
1324         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1325         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1326         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1327
1328         list_del(&hostrcb->queue);
1329         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1330
1331         if (ioasc) {
1332                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1333                         dev_err(&ioa_cfg->pdev->dev,
1334                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1335
1336                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1337         } else {
1338                 ipr_handle_config_change(ioa_cfg, hostrcb);
1339         }
1340 }
1341
1342 /**
1343  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1344  * @i:          index into buffer
1345  * @buf:                string to modify
1346  *
1347  * This function will strip all trailing whitespace, pad the end
1348  * of the string with a single space, and NULL terminate the string.
1349  *
1350  * Return value:
1351  *      new length of string
1352  **/
1353 static int strip_and_pad_whitespace(int i, char *buf)
1354 {
1355         while (i && buf[i] == ' ')
1356                 i--;
1357         buf[i+1] = ' ';
1358         buf[i+2] = '\0';
1359         return i + 2;
1360 }
1361
1362 /**
1363  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1364  * @prefix:             string to print at start of printk
1365  * @hostrcb:    hostrcb pointer
1366  * @vpd:                vendor/product id/sn struct
1367  *
1368  * Return value:
1369  *      none
1370  **/
1371 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1372                                 struct ipr_vpd *vpd)
1373 {
1374         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1375         int i = 0;
1376
1377         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1378         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1379
1380         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1381         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1382
1383         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1384         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1385
1386         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1387 }
1388
1389 /**
1390  * ipr_log_vpd - Log the passed VPD to the error log.
1391  * @vpd:                vendor/product id/sn struct
1392  *
1393  * Return value:
1394  *      none
1395  **/
1396 static void ipr_log_vpd(struct ipr_vpd *vpd)
1397 {
1398         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1399                     + IPR_SERIAL_NUM_LEN];
1400
1401         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1402         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1403                IPR_PROD_ID_LEN);
1404         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1405         ipr_err("Vendor/Product ID: %s\n", buffer);
1406
1407         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1408         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1409         ipr_err("    Serial Number: %s\n", buffer);
1410 }
1411
1412 /**
1413  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1414  * @prefix:             string to print at start of printk
1415  * @hostrcb:    hostrcb pointer
1416  * @vpd:                vendor/product id/sn/wwn struct
1417  *
1418  * Return value:
1419  *      none
1420  **/
1421 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1422                                     struct ipr_ext_vpd *vpd)
1423 {
1424         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1425         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1426                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1427 }
1428
1429 /**
1430  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1431  * @vpd:                vendor/product id/sn/wwn struct
1432  *
1433  * Return value:
1434  *      none
1435  **/
1436 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1437 {
1438         ipr_log_vpd(&vpd->vpd);
1439         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1440                 be32_to_cpu(vpd->wwid[1]));
1441 }
1442
1443 /**
1444  * ipr_log_enhanced_cache_error - Log a cache error.
1445  * @ioa_cfg:    ioa config struct
1446  * @hostrcb:    hostrcb struct
1447  *
1448  * Return value:
1449  *      none
1450  **/
1451 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1452                                          struct ipr_hostrcb *hostrcb)
1453 {
1454         struct ipr_hostrcb_type_12_error *error;
1455
1456         if (ioa_cfg->sis64)
1457                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1458         else
1459                 error = &hostrcb->hcam.u.error.u.type_12_error;
1460
1461         ipr_err("-----Current Configuration-----\n");
1462         ipr_err("Cache Directory Card Information:\n");
1463         ipr_log_ext_vpd(&error->ioa_vpd);
1464         ipr_err("Adapter Card Information:\n");
1465         ipr_log_ext_vpd(&error->cfc_vpd);
1466
1467         ipr_err("-----Expected Configuration-----\n");
1468         ipr_err("Cache Directory Card Information:\n");
1469         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1470         ipr_err("Adapter Card Information:\n");
1471         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1472
1473         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1474                      be32_to_cpu(error->ioa_data[0]),
1475                      be32_to_cpu(error->ioa_data[1]),
1476                      be32_to_cpu(error->ioa_data[2]));
1477 }
1478
1479 /**
1480  * ipr_log_cache_error - Log a cache error.
1481  * @ioa_cfg:    ioa config struct
1482  * @hostrcb:    hostrcb struct
1483  *
1484  * Return value:
1485  *      none
1486  **/
1487 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1488                                 struct ipr_hostrcb *hostrcb)
1489 {
1490         struct ipr_hostrcb_type_02_error *error =
1491                 &hostrcb->hcam.u.error.u.type_02_error;
1492
1493         ipr_err("-----Current Configuration-----\n");
1494         ipr_err("Cache Directory Card Information:\n");
1495         ipr_log_vpd(&error->ioa_vpd);
1496         ipr_err("Adapter Card Information:\n");
1497         ipr_log_vpd(&error->cfc_vpd);
1498
1499         ipr_err("-----Expected Configuration-----\n");
1500         ipr_err("Cache Directory Card Information:\n");
1501         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1502         ipr_err("Adapter Card Information:\n");
1503         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1504
1505         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1506                      be32_to_cpu(error->ioa_data[0]),
1507                      be32_to_cpu(error->ioa_data[1]),
1508                      be32_to_cpu(error->ioa_data[2]));
1509 }
1510
1511 /**
1512  * ipr_log_enhanced_config_error - Log a configuration error.
1513  * @ioa_cfg:    ioa config struct
1514  * @hostrcb:    hostrcb struct
1515  *
1516  * Return value:
1517  *      none
1518  **/
1519 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1520                                           struct ipr_hostrcb *hostrcb)
1521 {
1522         int errors_logged, i;
1523         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1524         struct ipr_hostrcb_type_13_error *error;
1525
1526         error = &hostrcb->hcam.u.error.u.type_13_error;
1527         errors_logged = be32_to_cpu(error->errors_logged);
1528
1529         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1530                 be32_to_cpu(error->errors_detected), errors_logged);
1531
1532         dev_entry = error->dev;
1533
1534         for (i = 0; i < errors_logged; i++, dev_entry++) {
1535                 ipr_err_separator;
1536
1537                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1538                 ipr_log_ext_vpd(&dev_entry->vpd);
1539
1540                 ipr_err("-----New Device Information-----\n");
1541                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1542
1543                 ipr_err("Cache Directory Card Information:\n");
1544                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1545
1546                 ipr_err("Adapter Card Information:\n");
1547                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1548         }
1549 }
1550
1551 /**
1552  * ipr_log_sis64_config_error - Log a device error.
1553  * @ioa_cfg:    ioa config struct
1554  * @hostrcb:    hostrcb struct
1555  *
1556  * Return value:
1557  *      none
1558  **/
1559 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1560                                        struct ipr_hostrcb *hostrcb)
1561 {
1562         int errors_logged, i;
1563         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1564         struct ipr_hostrcb_type_23_error *error;
1565         char buffer[IPR_MAX_RES_PATH_LENGTH];
1566
1567         error = &hostrcb->hcam.u.error64.u.type_23_error;
1568         errors_logged = be32_to_cpu(error->errors_logged);
1569
1570         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1571                 be32_to_cpu(error->errors_detected), errors_logged);
1572
1573         dev_entry = error->dev;
1574
1575         for (i = 0; i < errors_logged; i++, dev_entry++) {
1576                 ipr_err_separator;
1577
1578                 ipr_err("Device %d : %s", i + 1,
1579                          ipr_format_res_path(dev_entry->res_path, buffer,
1580                                              sizeof(buffer)));
1581                 ipr_log_ext_vpd(&dev_entry->vpd);
1582
1583                 ipr_err("-----New Device Information-----\n");
1584                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1585
1586                 ipr_err("Cache Directory Card Information:\n");
1587                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1588
1589                 ipr_err("Adapter Card Information:\n");
1590                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1591         }
1592 }
1593
1594 /**
1595  * ipr_log_config_error - Log a configuration error.
1596  * @ioa_cfg:    ioa config struct
1597  * @hostrcb:    hostrcb struct
1598  *
1599  * Return value:
1600  *      none
1601  **/
1602 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1603                                  struct ipr_hostrcb *hostrcb)
1604 {
1605         int errors_logged, i;
1606         struct ipr_hostrcb_device_data_entry *dev_entry;
1607         struct ipr_hostrcb_type_03_error *error;
1608
1609         error = &hostrcb->hcam.u.error.u.type_03_error;
1610         errors_logged = be32_to_cpu(error->errors_logged);
1611
1612         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1613                 be32_to_cpu(error->errors_detected), errors_logged);
1614
1615         dev_entry = error->dev;
1616
1617         for (i = 0; i < errors_logged; i++, dev_entry++) {
1618                 ipr_err_separator;
1619
1620                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1621                 ipr_log_vpd(&dev_entry->vpd);
1622
1623                 ipr_err("-----New Device Information-----\n");
1624                 ipr_log_vpd(&dev_entry->new_vpd);
1625
1626                 ipr_err("Cache Directory Card Information:\n");
1627                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1628
1629                 ipr_err("Adapter Card Information:\n");
1630                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1631
1632                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1633                         be32_to_cpu(dev_entry->ioa_data[0]),
1634                         be32_to_cpu(dev_entry->ioa_data[1]),
1635                         be32_to_cpu(dev_entry->ioa_data[2]),
1636                         be32_to_cpu(dev_entry->ioa_data[3]),
1637                         be32_to_cpu(dev_entry->ioa_data[4]));
1638         }
1639 }
1640
1641 /**
1642  * ipr_log_enhanced_array_error - Log an array configuration error.
1643  * @ioa_cfg:    ioa config struct
1644  * @hostrcb:    hostrcb struct
1645  *
1646  * Return value:
1647  *      none
1648  **/
1649 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1650                                          struct ipr_hostrcb *hostrcb)
1651 {
1652         int i, num_entries;
1653         struct ipr_hostrcb_type_14_error *error;
1654         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1655         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1656
1657         error = &hostrcb->hcam.u.error.u.type_14_error;
1658
1659         ipr_err_separator;
1660
1661         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1662                 error->protection_level,
1663                 ioa_cfg->host->host_no,
1664                 error->last_func_vset_res_addr.bus,
1665                 error->last_func_vset_res_addr.target,
1666                 error->last_func_vset_res_addr.lun);
1667
1668         ipr_err_separator;
1669
1670         array_entry = error->array_member;
1671         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1672                             sizeof(error->array_member));
1673
1674         for (i = 0; i < num_entries; i++, array_entry++) {
1675                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1676                         continue;
1677
1678                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1679                         ipr_err("Exposed Array Member %d:\n", i);
1680                 else
1681                         ipr_err("Array Member %d:\n", i);
1682
1683                 ipr_log_ext_vpd(&array_entry->vpd);
1684                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1685                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1686                                  "Expected Location");
1687
1688                 ipr_err_separator;
1689         }
1690 }
1691
1692 /**
1693  * ipr_log_array_error - Log an array configuration error.
1694  * @ioa_cfg:    ioa config struct
1695  * @hostrcb:    hostrcb struct
1696  *
1697  * Return value:
1698  *      none
1699  **/
1700 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1701                                 struct ipr_hostrcb *hostrcb)
1702 {
1703         int i;
1704         struct ipr_hostrcb_type_04_error *error;
1705         struct ipr_hostrcb_array_data_entry *array_entry;
1706         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1707
1708         error = &hostrcb->hcam.u.error.u.type_04_error;
1709
1710         ipr_err_separator;
1711
1712         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1713                 error->protection_level,
1714                 ioa_cfg->host->host_no,
1715                 error->last_func_vset_res_addr.bus,
1716                 error->last_func_vset_res_addr.target,
1717                 error->last_func_vset_res_addr.lun);
1718
1719         ipr_err_separator;
1720
1721         array_entry = error->array_member;
1722
1723         for (i = 0; i < 18; i++) {
1724                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1725                         continue;
1726
1727                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1728                         ipr_err("Exposed Array Member %d:\n", i);
1729                 else
1730                         ipr_err("Array Member %d:\n", i);
1731
1732                 ipr_log_vpd(&array_entry->vpd);
1733
1734                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1735                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1736                                  "Expected Location");
1737
1738                 ipr_err_separator;
1739
1740                 if (i == 9)
1741                         array_entry = error->array_member2;
1742                 else
1743                         array_entry++;
1744         }
1745 }
1746
1747 /**
1748  * ipr_log_hex_data - Log additional hex IOA error data.
1749  * @ioa_cfg:    ioa config struct
1750  * @data:               IOA error data
1751  * @len:                data length
1752  *
1753  * Return value:
1754  *      none
1755  **/
1756 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1757 {
1758         int i;
1759
1760         if (len == 0)
1761                 return;
1762
1763         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1764                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1765
1766         for (i = 0; i < len / 4; i += 4) {
1767                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1768                         be32_to_cpu(data[i]),
1769                         be32_to_cpu(data[i+1]),
1770                         be32_to_cpu(data[i+2]),
1771                         be32_to_cpu(data[i+3]));
1772         }
1773 }
1774
1775 /**
1776  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1777  * @ioa_cfg:    ioa config struct
1778  * @hostrcb:    hostrcb struct
1779  *
1780  * Return value:
1781  *      none
1782  **/
1783 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1784                                             struct ipr_hostrcb *hostrcb)
1785 {
1786         struct ipr_hostrcb_type_17_error *error;
1787
1788         if (ioa_cfg->sis64)
1789                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1790         else
1791                 error = &hostrcb->hcam.u.error.u.type_17_error;
1792
1793         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1794         strim(error->failure_reason);
1795
1796         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1797                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1798         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1799         ipr_log_hex_data(ioa_cfg, error->data,
1800                          be32_to_cpu(hostrcb->hcam.length) -
1801                          (offsetof(struct ipr_hostrcb_error, u) +
1802                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1803 }
1804
1805 /**
1806  * ipr_log_dual_ioa_error - Log a dual adapter error.
1807  * @ioa_cfg:    ioa config struct
1808  * @hostrcb:    hostrcb struct
1809  *
1810  * Return value:
1811  *      none
1812  **/
1813 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1814                                    struct ipr_hostrcb *hostrcb)
1815 {
1816         struct ipr_hostrcb_type_07_error *error;
1817
1818         error = &hostrcb->hcam.u.error.u.type_07_error;
1819         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1820         strim(error->failure_reason);
1821
1822         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1823                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1824         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1825         ipr_log_hex_data(ioa_cfg, error->data,
1826                          be32_to_cpu(hostrcb->hcam.length) -
1827                          (offsetof(struct ipr_hostrcb_error, u) +
1828                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1829 }
1830
1831 static const struct {
1832         u8 active;
1833         char *desc;
1834 } path_active_desc[] = {
1835         { IPR_PATH_NO_INFO, "Path" },
1836         { IPR_PATH_ACTIVE, "Active path" },
1837         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1838 };
1839
1840 static const struct {
1841         u8 state;
1842         char *desc;
1843 } path_state_desc[] = {
1844         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1845         { IPR_PATH_HEALTHY, "is healthy" },
1846         { IPR_PATH_DEGRADED, "is degraded" },
1847         { IPR_PATH_FAILED, "is failed" }
1848 };
1849
1850 /**
1851  * ipr_log_fabric_path - Log a fabric path error
1852  * @hostrcb:    hostrcb struct
1853  * @fabric:             fabric descriptor
1854  *
1855  * Return value:
1856  *      none
1857  **/
1858 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1859                                 struct ipr_hostrcb_fabric_desc *fabric)
1860 {
1861         int i, j;
1862         u8 path_state = fabric->path_state;
1863         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1864         u8 state = path_state & IPR_PATH_STATE_MASK;
1865
1866         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1867                 if (path_active_desc[i].active != active)
1868                         continue;
1869
1870                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1871                         if (path_state_desc[j].state != state)
1872                                 continue;
1873
1874                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1875                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1876                                              path_active_desc[i].desc, path_state_desc[j].desc,
1877                                              fabric->ioa_port);
1878                         } else if (fabric->cascaded_expander == 0xff) {
1879                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1880                                              path_active_desc[i].desc, path_state_desc[j].desc,
1881                                              fabric->ioa_port, fabric->phy);
1882                         } else if (fabric->phy == 0xff) {
1883                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1884                                              path_active_desc[i].desc, path_state_desc[j].desc,
1885                                              fabric->ioa_port, fabric->cascaded_expander);
1886                         } else {
1887                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1888                                              path_active_desc[i].desc, path_state_desc[j].desc,
1889                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1890                         }
1891                         return;
1892                 }
1893         }
1894
1895         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1896                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1897 }
1898
1899 /**
1900  * ipr_log64_fabric_path - Log a fabric path error
1901  * @hostrcb:    hostrcb struct
1902  * @fabric:             fabric descriptor
1903  *
1904  * Return value:
1905  *      none
1906  **/
1907 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1908                                   struct ipr_hostrcb64_fabric_desc *fabric)
1909 {
1910         int i, j;
1911         u8 path_state = fabric->path_state;
1912         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1913         u8 state = path_state & IPR_PATH_STATE_MASK;
1914         char buffer[IPR_MAX_RES_PATH_LENGTH];
1915
1916         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1917                 if (path_active_desc[i].active != active)
1918                         continue;
1919
1920                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1921                         if (path_state_desc[j].state != state)
1922                                 continue;
1923
1924                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1925                                      path_active_desc[i].desc, path_state_desc[j].desc,
1926                                      ipr_format_res_path(fabric->res_path, buffer,
1927                                                          sizeof(buffer)));
1928                         return;
1929                 }
1930         }
1931
1932         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1933                 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1934 }
1935
1936 static const struct {
1937         u8 type;
1938         char *desc;
1939 } path_type_desc[] = {
1940         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1941         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1942         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1943         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1944 };
1945
1946 static const struct {
1947         u8 status;
1948         char *desc;
1949 } path_status_desc[] = {
1950         { IPR_PATH_CFG_NO_PROB, "Functional" },
1951         { IPR_PATH_CFG_DEGRADED, "Degraded" },
1952         { IPR_PATH_CFG_FAILED, "Failed" },
1953         { IPR_PATH_CFG_SUSPECT, "Suspect" },
1954         { IPR_PATH_NOT_DETECTED, "Missing" },
1955         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1956 };
1957
1958 static const char *link_rate[] = {
1959         "unknown",
1960         "disabled",
1961         "phy reset problem",
1962         "spinup hold",
1963         "port selector",
1964         "unknown",
1965         "unknown",
1966         "unknown",
1967         "1.5Gbps",
1968         "3.0Gbps",
1969         "unknown",
1970         "unknown",
1971         "unknown",
1972         "unknown",
1973         "unknown",
1974         "unknown"
1975 };
1976
1977 /**
1978  * ipr_log_path_elem - Log a fabric path element.
1979  * @hostrcb:    hostrcb struct
1980  * @cfg:                fabric path element struct
1981  *
1982  * Return value:
1983  *      none
1984  **/
1985 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1986                               struct ipr_hostrcb_config_element *cfg)
1987 {
1988         int i, j;
1989         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1990         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1991
1992         if (type == IPR_PATH_CFG_NOT_EXIST)
1993                 return;
1994
1995         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1996                 if (path_type_desc[i].type != type)
1997                         continue;
1998
1999                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2000                         if (path_status_desc[j].status != status)
2001                                 continue;
2002
2003                         if (type == IPR_PATH_CFG_IOA_PORT) {
2004                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2005                                              path_status_desc[j].desc, path_type_desc[i].desc,
2006                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2007                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2008                         } else {
2009                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2010                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2011                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2012                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2013                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2014                                 } else if (cfg->cascaded_expander == 0xff) {
2015                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2016                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2017                                                      path_type_desc[i].desc, cfg->phy,
2018                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2019                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2020                                 } else if (cfg->phy == 0xff) {
2021                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2022                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2023                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2024                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2025                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2026                                 } else {
2027                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2028                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2029                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2030                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2031                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2032                                 }
2033                         }
2034                         return;
2035                 }
2036         }
2037
2038         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2039                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2040                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2041                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2042 }
2043
2044 /**
2045  * ipr_log64_path_elem - Log a fabric path element.
2046  * @hostrcb:    hostrcb struct
2047  * @cfg:                fabric path element struct
2048  *
2049  * Return value:
2050  *      none
2051  **/
2052 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2053                                 struct ipr_hostrcb64_config_element *cfg)
2054 {
2055         int i, j;
2056         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2057         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2058         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2059         char buffer[IPR_MAX_RES_PATH_LENGTH];
2060
2061         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2062                 return;
2063
2064         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2065                 if (path_type_desc[i].type != type)
2066                         continue;
2067
2068                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2069                         if (path_status_desc[j].status != status)
2070                                 continue;
2071
2072                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2073                                      path_status_desc[j].desc, path_type_desc[i].desc,
2074                                      ipr_format_res_path(cfg->res_path, buffer,
2075                                                          sizeof(buffer)),
2076                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2077                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2078                         return;
2079                 }
2080         }
2081         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2082                      "WWN=%08X%08X\n", cfg->type_status,
2083                      ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2084                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2085                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2086 }
2087
2088 /**
2089  * ipr_log_fabric_error - Log a fabric error.
2090  * @ioa_cfg:    ioa config struct
2091  * @hostrcb:    hostrcb struct
2092  *
2093  * Return value:
2094  *      none
2095  **/
2096 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2097                                  struct ipr_hostrcb *hostrcb)
2098 {
2099         struct ipr_hostrcb_type_20_error *error;
2100         struct ipr_hostrcb_fabric_desc *fabric;
2101         struct ipr_hostrcb_config_element *cfg;
2102         int i, add_len;
2103
2104         error = &hostrcb->hcam.u.error.u.type_20_error;
2105         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2106         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2107
2108         add_len = be32_to_cpu(hostrcb->hcam.length) -
2109                 (offsetof(struct ipr_hostrcb_error, u) +
2110                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2111
2112         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2113                 ipr_log_fabric_path(hostrcb, fabric);
2114                 for_each_fabric_cfg(fabric, cfg)
2115                         ipr_log_path_elem(hostrcb, cfg);
2116
2117                 add_len -= be16_to_cpu(fabric->length);
2118                 fabric = (struct ipr_hostrcb_fabric_desc *)
2119                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2120         }
2121
2122         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2123 }
2124
2125 /**
2126  * ipr_log_sis64_array_error - Log a sis64 array error.
2127  * @ioa_cfg:    ioa config struct
2128  * @hostrcb:    hostrcb struct
2129  *
2130  * Return value:
2131  *      none
2132  **/
2133 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2134                                       struct ipr_hostrcb *hostrcb)
2135 {
2136         int i, num_entries;
2137         struct ipr_hostrcb_type_24_error *error;
2138         struct ipr_hostrcb64_array_data_entry *array_entry;
2139         char buffer[IPR_MAX_RES_PATH_LENGTH];
2140         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2141
2142         error = &hostrcb->hcam.u.error64.u.type_24_error;
2143
2144         ipr_err_separator;
2145
2146         ipr_err("RAID %s Array Configuration: %s\n",
2147                 error->protection_level,
2148                 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2149
2150         ipr_err_separator;
2151
2152         array_entry = error->array_member;
2153         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2154                             sizeof(error->array_member));
2155
2156         for (i = 0; i < num_entries; i++, array_entry++) {
2157
2158                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2159                         continue;
2160
2161                 if (error->exposed_mode_adn == i)
2162                         ipr_err("Exposed Array Member %d:\n", i);
2163                 else
2164                         ipr_err("Array Member %d:\n", i);
2165
2166                 ipr_err("Array Member %d:\n", i);
2167                 ipr_log_ext_vpd(&array_entry->vpd);
2168                 ipr_err("Current Location: %s",
2169                          ipr_format_res_path(array_entry->res_path, buffer,
2170                                              sizeof(buffer)));
2171                 ipr_err("Expected Location: %s",
2172                          ipr_format_res_path(array_entry->expected_res_path,
2173                                              buffer, sizeof(buffer)));
2174
2175                 ipr_err_separator;
2176         }
2177 }
2178
2179 /**
2180  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2181  * @ioa_cfg:    ioa config struct
2182  * @hostrcb:    hostrcb struct
2183  *
2184  * Return value:
2185  *      none
2186  **/
2187 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2188                                        struct ipr_hostrcb *hostrcb)
2189 {
2190         struct ipr_hostrcb_type_30_error *error;
2191         struct ipr_hostrcb64_fabric_desc *fabric;
2192         struct ipr_hostrcb64_config_element *cfg;
2193         int i, add_len;
2194
2195         error = &hostrcb->hcam.u.error64.u.type_30_error;
2196
2197         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2198         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2199
2200         add_len = be32_to_cpu(hostrcb->hcam.length) -
2201                 (offsetof(struct ipr_hostrcb64_error, u) +
2202                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2203
2204         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2205                 ipr_log64_fabric_path(hostrcb, fabric);
2206                 for_each_fabric_cfg(fabric, cfg)
2207                         ipr_log64_path_elem(hostrcb, cfg);
2208
2209                 add_len -= be16_to_cpu(fabric->length);
2210                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2211                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2212         }
2213
2214         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2215 }
2216
2217 /**
2218  * ipr_log_generic_error - Log an adapter error.
2219  * @ioa_cfg:    ioa config struct
2220  * @hostrcb:    hostrcb struct
2221  *
2222  * Return value:
2223  *      none
2224  **/
2225 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2226                                   struct ipr_hostrcb *hostrcb)
2227 {
2228         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2229                          be32_to_cpu(hostrcb->hcam.length));
2230 }
2231
2232 /**
2233  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2234  * @ioasc:      IOASC
2235  *
2236  * This function will return the index of into the ipr_error_table
2237  * for the specified IOASC. If the IOASC is not in the table,
2238  * 0 will be returned, which points to the entry used for unknown errors.
2239  *
2240  * Return value:
2241  *      index into the ipr_error_table
2242  **/
2243 static u32 ipr_get_error(u32 ioasc)
2244 {
2245         int i;
2246
2247         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2248                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2249                         return i;
2250
2251         return 0;
2252 }
2253
2254 /**
2255  * ipr_handle_log_data - Log an adapter error.
2256  * @ioa_cfg:    ioa config struct
2257  * @hostrcb:    hostrcb struct
2258  *
2259  * This function logs an adapter error to the system.
2260  *
2261  * Return value:
2262  *      none
2263  **/
2264 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2265                                 struct ipr_hostrcb *hostrcb)
2266 {
2267         u32 ioasc;
2268         int error_index;
2269
2270         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2271                 return;
2272
2273         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2274                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2275
2276         if (ioa_cfg->sis64)
2277                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2278         else
2279                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2280
2281         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2282             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2283                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2284                 scsi_report_bus_reset(ioa_cfg->host,
2285                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2286         }
2287
2288         error_index = ipr_get_error(ioasc);
2289
2290         if (!ipr_error_table[error_index].log_hcam)
2291                 return;
2292
2293         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2294
2295         /* Set indication we have logged an error */
2296         ioa_cfg->errors_logged++;
2297
2298         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2299                 return;
2300         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2301                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2302
2303         switch (hostrcb->hcam.overlay_id) {
2304         case IPR_HOST_RCB_OVERLAY_ID_2:
2305                 ipr_log_cache_error(ioa_cfg, hostrcb);
2306                 break;
2307         case IPR_HOST_RCB_OVERLAY_ID_3:
2308                 ipr_log_config_error(ioa_cfg, hostrcb);
2309                 break;
2310         case IPR_HOST_RCB_OVERLAY_ID_4:
2311         case IPR_HOST_RCB_OVERLAY_ID_6:
2312                 ipr_log_array_error(ioa_cfg, hostrcb);
2313                 break;
2314         case IPR_HOST_RCB_OVERLAY_ID_7:
2315                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2316                 break;
2317         case IPR_HOST_RCB_OVERLAY_ID_12:
2318                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2319                 break;
2320         case IPR_HOST_RCB_OVERLAY_ID_13:
2321                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2322                 break;
2323         case IPR_HOST_RCB_OVERLAY_ID_14:
2324         case IPR_HOST_RCB_OVERLAY_ID_16:
2325                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2326                 break;
2327         case IPR_HOST_RCB_OVERLAY_ID_17:
2328                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2329                 break;
2330         case IPR_HOST_RCB_OVERLAY_ID_20:
2331                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2332                 break;
2333         case IPR_HOST_RCB_OVERLAY_ID_23:
2334                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2335                 break;
2336         case IPR_HOST_RCB_OVERLAY_ID_24:
2337         case IPR_HOST_RCB_OVERLAY_ID_26:
2338                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2339                 break;
2340         case IPR_HOST_RCB_OVERLAY_ID_30:
2341                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2342                 break;
2343         case IPR_HOST_RCB_OVERLAY_ID_1:
2344         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2345         default:
2346                 ipr_log_generic_error(ioa_cfg, hostrcb);
2347                 break;
2348         }
2349 }
2350
2351 /**
2352  * ipr_process_error - Op done function for an adapter error log.
2353  * @ipr_cmd:    ipr command struct
2354  *
2355  * This function is the op done function for an error log host
2356  * controlled async from the adapter. It will log the error and
2357  * send the HCAM back to the adapter.
2358  *
2359  * Return value:
2360  *      none
2361  **/
2362 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2363 {
2364         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2365         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2366         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2367         u32 fd_ioasc;
2368
2369         if (ioa_cfg->sis64)
2370                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2371         else
2372                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2373
2374         list_del(&hostrcb->queue);
2375         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2376
2377         if (!ioasc) {
2378                 ipr_handle_log_data(ioa_cfg, hostrcb);
2379                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2380                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2381         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2382                 dev_err(&ioa_cfg->pdev->dev,
2383                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2384         }
2385
2386         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2387 }
2388
2389 /**
2390  * ipr_timeout -  An internally generated op has timed out.
2391  * @ipr_cmd:    ipr command struct
2392  *
2393  * This function blocks host requests and initiates an
2394  * adapter reset.
2395  *
2396  * Return value:
2397  *      none
2398  **/
2399 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2400 {
2401         unsigned long lock_flags = 0;
2402         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2403
2404         ENTER;
2405         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2406
2407         ioa_cfg->errors_logged++;
2408         dev_err(&ioa_cfg->pdev->dev,
2409                 "Adapter being reset due to command timeout.\n");
2410
2411         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2412                 ioa_cfg->sdt_state = GET_DUMP;
2413
2414         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2415                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2416
2417         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418         LEAVE;
2419 }
2420
2421 /**
2422  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2423  * @ipr_cmd:    ipr command struct
2424  *
2425  * This function blocks host requests and initiates an
2426  * adapter reset.
2427  *
2428  * Return value:
2429  *      none
2430  **/
2431 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2432 {
2433         unsigned long lock_flags = 0;
2434         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2435
2436         ENTER;
2437         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2438
2439         ioa_cfg->errors_logged++;
2440         dev_err(&ioa_cfg->pdev->dev,
2441                 "Adapter timed out transitioning to operational.\n");
2442
2443         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2444                 ioa_cfg->sdt_state = GET_DUMP;
2445
2446         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2447                 if (ipr_fastfail)
2448                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2449                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2450         }
2451
2452         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2453         LEAVE;
2454 }
2455
2456 /**
2457  * ipr_reset_reload - Reset/Reload the IOA
2458  * @ioa_cfg:            ioa config struct
2459  * @shutdown_type:      shutdown type
2460  *
2461  * This function resets the adapter and re-initializes it.
2462  * This function assumes that all new host commands have been stopped.
2463  * Return value:
2464  *      SUCCESS / FAILED
2465  **/
2466 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2467                             enum ipr_shutdown_type shutdown_type)
2468 {
2469         if (!ioa_cfg->in_reset_reload)
2470                 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2471
2472         spin_unlock_irq(ioa_cfg->host->host_lock);
2473         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2474         spin_lock_irq(ioa_cfg->host->host_lock);
2475
2476         /* If we got hit with a host reset while we were already resetting
2477          the adapter for some reason, and the reset failed. */
2478         if (ioa_cfg->ioa_is_dead) {
2479                 ipr_trace;
2480                 return FAILED;
2481         }
2482
2483         return SUCCESS;
2484 }
2485
2486 /**
2487  * ipr_find_ses_entry - Find matching SES in SES table
2488  * @res:        resource entry struct of SES
2489  *
2490  * Return value:
2491  *      pointer to SES table entry / NULL on failure
2492  **/
2493 static const struct ipr_ses_table_entry *
2494 ipr_find_ses_entry(struct ipr_resource_entry *res)
2495 {
2496         int i, j, matches;
2497         struct ipr_std_inq_vpids *vpids;
2498         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2499
2500         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2501                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2502                         if (ste->compare_product_id_byte[j] == 'X') {
2503                                 vpids = &res->std_inq_data.vpids;
2504                                 if (vpids->product_id[j] == ste->product_id[j])
2505                                         matches++;
2506                                 else
2507                                         break;
2508                         } else
2509                                 matches++;
2510                 }
2511
2512                 if (matches == IPR_PROD_ID_LEN)
2513                         return ste;
2514         }
2515
2516         return NULL;
2517 }
2518
2519 /**
2520  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2521  * @ioa_cfg:    ioa config struct
2522  * @bus:                SCSI bus
2523  * @bus_width:  bus width
2524  *
2525  * Return value:
2526  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2527  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2528  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2529  *      max 160MHz = max 320MB/sec).
2530  **/
2531 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2532 {
2533         struct ipr_resource_entry *res;
2534         const struct ipr_ses_table_entry *ste;
2535         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2536
2537         /* Loop through each config table entry in the config table buffer */
2538         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2539                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2540                         continue;
2541
2542                 if (bus != res->bus)
2543                         continue;
2544
2545                 if (!(ste = ipr_find_ses_entry(res)))
2546                         continue;
2547
2548                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2549         }
2550
2551         return max_xfer_rate;
2552 }
2553
2554 /**
2555  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2556  * @ioa_cfg:            ioa config struct
2557  * @max_delay:          max delay in micro-seconds to wait
2558  *
2559  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2560  *
2561  * Return value:
2562  *      0 on success / other on failure
2563  **/
2564 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2565 {
2566         volatile u32 pcii_reg;
2567         int delay = 1;
2568
2569         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2570         while (delay < max_delay) {
2571                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2572
2573                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2574                         return 0;
2575
2576                 /* udelay cannot be used if delay is more than a few milliseconds */
2577                 if ((delay / 1000) > MAX_UDELAY_MS)
2578                         mdelay(delay / 1000);
2579                 else
2580                         udelay(delay);
2581
2582                 delay += delay;
2583         }
2584         return -EIO;
2585 }
2586
2587 /**
2588  * ipr_get_sis64_dump_data_section - Dump IOA memory
2589  * @ioa_cfg:                    ioa config struct
2590  * @start_addr:                 adapter address to dump
2591  * @dest:                       destination kernel buffer
2592  * @length_in_words:            length to dump in 4 byte words
2593  *
2594  * Return value:
2595  *      0 on success
2596  **/
2597 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2598                                            u32 start_addr,
2599                                            __be32 *dest, u32 length_in_words)
2600 {
2601         int i;
2602
2603         for (i = 0; i < length_in_words; i++) {
2604                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2605                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2606                 dest++;
2607         }
2608
2609         return 0;
2610 }
2611
2612 /**
2613  * ipr_get_ldump_data_section - Dump IOA memory
2614  * @ioa_cfg:                    ioa config struct
2615  * @start_addr:                 adapter address to dump
2616  * @dest:                               destination kernel buffer
2617  * @length_in_words:    length to dump in 4 byte words
2618  *
2619  * Return value:
2620  *      0 on success / -EIO on failure
2621  **/
2622 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2623                                       u32 start_addr,
2624                                       __be32 *dest, u32 length_in_words)
2625 {
2626         volatile u32 temp_pcii_reg;
2627         int i, delay = 0;
2628
2629         if (ioa_cfg->sis64)
2630                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2631                                                        dest, length_in_words);
2632
2633         /* Write IOA interrupt reg starting LDUMP state  */
2634         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2635                ioa_cfg->regs.set_uproc_interrupt_reg32);
2636
2637         /* Wait for IO debug acknowledge */
2638         if (ipr_wait_iodbg_ack(ioa_cfg,
2639                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2640                 dev_err(&ioa_cfg->pdev->dev,
2641                         "IOA dump long data transfer timeout\n");
2642                 return -EIO;
2643         }
2644
2645         /* Signal LDUMP interlocked - clear IO debug ack */
2646         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2647                ioa_cfg->regs.clr_interrupt_reg);
2648
2649         /* Write Mailbox with starting address */
2650         writel(start_addr, ioa_cfg->ioa_mailbox);
2651
2652         /* Signal address valid - clear IOA Reset alert */
2653         writel(IPR_UPROCI_RESET_ALERT,
2654                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2655
2656         for (i = 0; i < length_in_words; i++) {
2657                 /* Wait for IO debug acknowledge */
2658                 if (ipr_wait_iodbg_ack(ioa_cfg,
2659                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2660                         dev_err(&ioa_cfg->pdev->dev,
2661                                 "IOA dump short data transfer timeout\n");
2662                         return -EIO;
2663                 }
2664
2665                 /* Read data from mailbox and increment destination pointer */
2666                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2667                 dest++;
2668
2669                 /* For all but the last word of data, signal data received */
2670                 if (i < (length_in_words - 1)) {
2671                         /* Signal dump data received - Clear IO debug Ack */
2672                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2673                                ioa_cfg->regs.clr_interrupt_reg);
2674                 }
2675         }
2676
2677         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2678         writel(IPR_UPROCI_RESET_ALERT,
2679                ioa_cfg->regs.set_uproc_interrupt_reg32);
2680
2681         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2682                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2683
2684         /* Signal dump data received - Clear IO debug Ack */
2685         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2686                ioa_cfg->regs.clr_interrupt_reg);
2687
2688         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2689         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2690                 temp_pcii_reg =
2691                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2692
2693                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2694                         return 0;
2695
2696                 udelay(10);
2697                 delay += 10;
2698         }
2699
2700         return 0;
2701 }
2702
2703 #ifdef CONFIG_SCSI_IPR_DUMP
2704 /**
2705  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2706  * @ioa_cfg:            ioa config struct
2707  * @pci_address:        adapter address
2708  * @length:                     length of data to copy
2709  *
2710  * Copy data from PCI adapter to kernel buffer.
2711  * Note: length MUST be a 4 byte multiple
2712  * Return value:
2713  *      0 on success / other on failure
2714  **/
2715 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2716                         unsigned long pci_address, u32 length)
2717 {
2718         int bytes_copied = 0;
2719         int cur_len, rc, rem_len, rem_page_len;
2720         __be32 *page;
2721         unsigned long lock_flags = 0;
2722         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2723
2724         while (bytes_copied < length &&
2725                (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2726                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2727                     ioa_dump->page_offset == 0) {
2728                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2729
2730                         if (!page) {
2731                                 ipr_trace;
2732                                 return bytes_copied;
2733                         }
2734
2735                         ioa_dump->page_offset = 0;
2736                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2737                         ioa_dump->next_page_index++;
2738                 } else
2739                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2740
2741                 rem_len = length - bytes_copied;
2742                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2743                 cur_len = min(rem_len, rem_page_len);
2744
2745                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2746                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2747                         rc = -EIO;
2748                 } else {
2749                         rc = ipr_get_ldump_data_section(ioa_cfg,
2750                                                         pci_address + bytes_copied,
2751                                                         &page[ioa_dump->page_offset / 4],
2752                                                         (cur_len / sizeof(u32)));
2753                 }
2754                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2755
2756                 if (!rc) {
2757                         ioa_dump->page_offset += cur_len;
2758                         bytes_copied += cur_len;
2759                 } else {
2760                         ipr_trace;
2761                         break;
2762                 }
2763                 schedule();
2764         }
2765
2766         return bytes_copied;
2767 }
2768
2769 /**
2770  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2771  * @hdr:        dump entry header struct
2772  *
2773  * Return value:
2774  *      nothing
2775  **/
2776 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2777 {
2778         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2779         hdr->num_elems = 1;
2780         hdr->offset = sizeof(*hdr);
2781         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2782 }
2783
2784 /**
2785  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2786  * @ioa_cfg:    ioa config struct
2787  * @driver_dump:        driver dump struct
2788  *
2789  * Return value:
2790  *      nothing
2791  **/
2792 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2793                                    struct ipr_driver_dump *driver_dump)
2794 {
2795         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2796
2797         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2798         driver_dump->ioa_type_entry.hdr.len =
2799                 sizeof(struct ipr_dump_ioa_type_entry) -
2800                 sizeof(struct ipr_dump_entry_header);
2801         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2802         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2803         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2804         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2805                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2806                 ucode_vpd->minor_release[1];
2807         driver_dump->hdr.num_entries++;
2808 }
2809
2810 /**
2811  * ipr_dump_version_data - Fill in the driver version in the dump.
2812  * @ioa_cfg:    ioa config struct
2813  * @driver_dump:        driver dump struct
2814  *
2815  * Return value:
2816  *      nothing
2817  **/
2818 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2819                                   struct ipr_driver_dump *driver_dump)
2820 {
2821         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2822         driver_dump->version_entry.hdr.len =
2823                 sizeof(struct ipr_dump_version_entry) -
2824                 sizeof(struct ipr_dump_entry_header);
2825         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2826         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2827         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2828         driver_dump->hdr.num_entries++;
2829 }
2830
2831 /**
2832  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2833  * @ioa_cfg:    ioa config struct
2834  * @driver_dump:        driver dump struct
2835  *
2836  * Return value:
2837  *      nothing
2838  **/
2839 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2840                                    struct ipr_driver_dump *driver_dump)
2841 {
2842         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2843         driver_dump->trace_entry.hdr.len =
2844                 sizeof(struct ipr_dump_trace_entry) -
2845                 sizeof(struct ipr_dump_entry_header);
2846         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2847         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2848         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2849         driver_dump->hdr.num_entries++;
2850 }
2851
2852 /**
2853  * ipr_dump_location_data - Fill in the IOA location in the dump.
2854  * @ioa_cfg:    ioa config struct
2855  * @driver_dump:        driver dump struct
2856  *
2857  * Return value:
2858  *      nothing
2859  **/
2860 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2861                                    struct ipr_driver_dump *driver_dump)
2862 {
2863         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2864         driver_dump->location_entry.hdr.len =
2865                 sizeof(struct ipr_dump_location_entry) -
2866                 sizeof(struct ipr_dump_entry_header);
2867         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2868         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2869         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2870         driver_dump->hdr.num_entries++;
2871 }
2872
2873 /**
2874  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2875  * @ioa_cfg:    ioa config struct
2876  * @dump:               dump struct
2877  *
2878  * Return value:
2879  *      nothing
2880  **/
2881 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2882 {
2883         unsigned long start_addr, sdt_word;
2884         unsigned long lock_flags = 0;
2885         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2886         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2887         u32 num_entries, start_off, end_off;
2888         u32 bytes_to_copy, bytes_copied, rc;
2889         struct ipr_sdt *sdt;
2890         int valid = 1;
2891         int i;
2892
2893         ENTER;
2894
2895         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2896
2897         if (ioa_cfg->sdt_state != GET_DUMP) {
2898                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2899                 return;
2900         }
2901
2902         start_addr = readl(ioa_cfg->ioa_mailbox);
2903
2904         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2905                 dev_err(&ioa_cfg->pdev->dev,
2906                         "Invalid dump table format: %lx\n", start_addr);
2907                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2908                 return;
2909         }
2910
2911         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2912
2913         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2914
2915         /* Initialize the overall dump header */
2916         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2917         driver_dump->hdr.num_entries = 1;
2918         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2919         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2920         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2921         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2922
2923         ipr_dump_version_data(ioa_cfg, driver_dump);
2924         ipr_dump_location_data(ioa_cfg, driver_dump);
2925         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2926         ipr_dump_trace_data(ioa_cfg, driver_dump);
2927
2928         /* Update dump_header */
2929         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2930
2931         /* IOA Dump entry */
2932         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2933         ioa_dump->hdr.len = 0;
2934         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2935         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2936
2937         /* First entries in sdt are actually a list of dump addresses and
2938          lengths to gather the real dump data.  sdt represents the pointer
2939          to the ioa generated dump table.  Dump data will be extracted based
2940          on entries in this table */
2941         sdt = &ioa_dump->sdt;
2942
2943         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2944                                         sizeof(struct ipr_sdt) / sizeof(__be32));
2945
2946         /* Smart Dump table is ready to use and the first entry is valid */
2947         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2948             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2949                 dev_err(&ioa_cfg->pdev->dev,
2950                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2951                         rc, be32_to_cpu(sdt->hdr.state));
2952                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2953                 ioa_cfg->sdt_state = DUMP_OBTAINED;
2954                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2955                 return;
2956         }
2957
2958         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2959
2960         if (num_entries > IPR_NUM_SDT_ENTRIES)
2961                 num_entries = IPR_NUM_SDT_ENTRIES;
2962
2963         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964
2965         for (i = 0; i < num_entries; i++) {
2966                 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2967                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2968                         break;
2969                 }
2970
2971                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2972                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2973                         if (ioa_cfg->sis64)
2974                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2975                         else {
2976                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2977                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
2978
2979                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2980                                         bytes_to_copy = end_off - start_off;
2981                                 else
2982                                         valid = 0;
2983                         }
2984                         if (valid) {
2985                                 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2986                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2987                                         continue;
2988                                 }
2989
2990                                 /* Copy data from adapter to driver buffers */
2991                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2992                                                             bytes_to_copy);
2993
2994                                 ioa_dump->hdr.len += bytes_copied;
2995
2996                                 if (bytes_copied != bytes_to_copy) {
2997                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2998                                         break;
2999                                 }
3000                         }
3001                 }
3002         }
3003
3004         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3005
3006         /* Update dump_header */
3007         driver_dump->hdr.len += ioa_dump->hdr.len;
3008         wmb();
3009         ioa_cfg->sdt_state = DUMP_OBTAINED;
3010         LEAVE;
3011 }
3012
3013 #else
3014 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3015 #endif
3016
3017 /**
3018  * ipr_release_dump - Free adapter dump memory
3019  * @kref:       kref struct
3020  *
3021  * Return value:
3022  *      nothing
3023  **/
3024 static void ipr_release_dump(struct kref *kref)
3025 {
3026         struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3027         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3028         unsigned long lock_flags = 0;
3029         int i;
3030
3031         ENTER;
3032         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3033         ioa_cfg->dump = NULL;
3034         ioa_cfg->sdt_state = INACTIVE;
3035         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3036
3037         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3038                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3039
3040         kfree(dump);
3041         LEAVE;
3042 }
3043
3044 /**
3045  * ipr_worker_thread - Worker thread
3046  * @work:               ioa config struct
3047  *
3048  * Called at task level from a work thread. This function takes care
3049  * of adding and removing device from the mid-layer as configuration
3050  * changes are detected by the adapter.
3051  *
3052  * Return value:
3053  *      nothing
3054  **/
3055 static void ipr_worker_thread(struct work_struct *work)
3056 {
3057         unsigned long lock_flags;
3058         struct ipr_resource_entry *res;
3059         struct scsi_device *sdev;
3060         struct ipr_dump *dump;
3061         struct ipr_ioa_cfg *ioa_cfg =
3062                 container_of(work, struct ipr_ioa_cfg, work_q);
3063         u8 bus, target, lun;
3064         int did_work;
3065
3066         ENTER;
3067         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068
3069         if (ioa_cfg->sdt_state == GET_DUMP) {
3070                 dump = ioa_cfg->dump;
3071                 if (!dump) {
3072                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3073                         return;
3074                 }
3075                 kref_get(&dump->kref);
3076                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077                 ipr_get_ioa_dump(ioa_cfg, dump);
3078                 kref_put(&dump->kref, ipr_release_dump);
3079
3080                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3081                 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3082                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3083                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084                 return;
3085         }
3086
3087 restart:
3088         do {
3089                 did_work = 0;
3090                 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3091                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092                         return;
3093                 }
3094
3095                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3096                         if (res->del_from_ml && res->sdev) {
3097                                 did_work = 1;
3098                                 sdev = res->sdev;
3099                                 if (!scsi_device_get(sdev)) {
3100                                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3101                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3102                                         scsi_remove_device(sdev);
3103                                         scsi_device_put(sdev);
3104                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3105                                 }
3106                                 break;
3107                         }
3108                 }
3109         } while(did_work);
3110
3111         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3112                 if (res->add_to_ml) {
3113                         bus = res->bus;
3114                         target = res->target;
3115                         lun = res->lun;
3116                         res->add_to_ml = 0;
3117                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3118                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3119                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3120                         goto restart;
3121                 }
3122         }
3123
3124         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3125         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3126         LEAVE;
3127 }
3128
3129 #ifdef CONFIG_SCSI_IPR_TRACE
3130 /**
3131  * ipr_read_trace - Dump the adapter trace
3132  * @filp:               open sysfs file
3133  * @kobj:               kobject struct
3134  * @bin_attr:           bin_attribute struct
3135  * @buf:                buffer
3136  * @off:                offset
3137  * @count:              buffer size
3138  *
3139  * Return value:
3140  *      number of bytes printed to buffer
3141  **/
3142 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3143                               struct bin_attribute *bin_attr,
3144                               char *buf, loff_t off, size_t count)
3145 {
3146         struct device *dev = container_of(kobj, struct device, kobj);
3147         struct Scsi_Host *shost = class_to_shost(dev);
3148         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3149         unsigned long lock_flags = 0;
3150         ssize_t ret;
3151
3152         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3153         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3154                                 IPR_TRACE_SIZE);
3155         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3156
3157         return ret;
3158 }
3159
3160 static struct bin_attribute ipr_trace_attr = {
3161         .attr = {
3162                 .name = "trace",
3163                 .mode = S_IRUGO,
3164         },
3165         .size = 0,
3166         .read = ipr_read_trace,
3167 };
3168 #endif
3169
3170 /**
3171  * ipr_show_fw_version - Show the firmware version
3172  * @dev:        class device struct
3173  * @buf:        buffer
3174  *
3175  * Return value:
3176  *      number of bytes printed to buffer
3177  **/
3178 static ssize_t ipr_show_fw_version(struct device *dev,
3179                                    struct device_attribute *attr, char *buf)
3180 {
3181         struct Scsi_Host *shost = class_to_shost(dev);
3182         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3183         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3184         unsigned long lock_flags = 0;
3185         int len;
3186
3187         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3188         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3189                        ucode_vpd->major_release, ucode_vpd->card_type,
3190                        ucode_vpd->minor_release[0],
3191                        ucode_vpd->minor_release[1]);
3192         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3193         return len;
3194 }
3195
3196 static struct device_attribute ipr_fw_version_attr = {
3197         .attr = {
3198                 .name =         "fw_version",
3199                 .mode =         S_IRUGO,
3200         },
3201         .show = ipr_show_fw_version,
3202 };
3203
3204 /**
3205  * ipr_show_log_level - Show the adapter's error logging level
3206  * @dev:        class device struct
3207  * @buf:        buffer
3208  *
3209  * Return value:
3210  *      number of bytes printed to buffer
3211  **/
3212 static ssize_t ipr_show_log_level(struct device *dev,
3213                                    struct device_attribute *attr, char *buf)
3214 {
3215         struct Scsi_Host *shost = class_to_shost(dev);
3216         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3217         unsigned long lock_flags = 0;
3218         int len;
3219
3220         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3221         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3222         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223         return len;
3224 }
3225
3226 /**
3227  * ipr_store_log_level - Change the adapter's error logging level
3228  * @dev:        class device struct
3229  * @buf:        buffer
3230  *
3231  * Return value:
3232  *      number of bytes printed to buffer
3233  **/
3234 static ssize_t ipr_store_log_level(struct device *dev,
3235                                    struct device_attribute *attr,
3236                                    const char *buf, size_t count)
3237 {
3238         struct Scsi_Host *shost = class_to_shost(dev);
3239         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3240         unsigned long lock_flags = 0;
3241
3242         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3243         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3244         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3245         return strlen(buf);
3246 }
3247
3248 static struct device_attribute ipr_log_level_attr = {
3249         .attr = {
3250                 .name =         "log_level",
3251                 .mode =         S_IRUGO | S_IWUSR,
3252         },
3253         .show = ipr_show_log_level,
3254         .store = ipr_store_log_level
3255 };
3256
3257 /**
3258  * ipr_store_diagnostics - IOA Diagnostics interface
3259  * @dev:        device struct
3260  * @buf:        buffer
3261  * @count:      buffer size
3262  *
3263  * This function will reset the adapter and wait a reasonable
3264  * amount of time for any errors that the adapter might log.
3265  *
3266  * Return value:
3267  *      count on success / other on failure
3268  **/
3269 static ssize_t ipr_store_diagnostics(struct device *dev,
3270                                      struct device_attribute *attr,
3271                                      const char *buf, size_t count)
3272 {
3273         struct Scsi_Host *shost = class_to_shost(dev);
3274         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3275         unsigned long lock_flags = 0;
3276         int rc = count;
3277
3278         if (!capable(CAP_SYS_ADMIN))
3279                 return -EACCES;
3280
3281         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3282         while(ioa_cfg->in_reset_reload) {
3283                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3285                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3286         }
3287
3288         ioa_cfg->errors_logged = 0;
3289         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3290
3291         if (ioa_cfg->in_reset_reload) {
3292                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3293                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3294
3295                 /* Wait for a second for any errors to be logged */
3296                 msleep(1000);
3297         } else {
3298                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3299                 return -EIO;
3300         }
3301
3302         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3303         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3304                 rc = -EIO;
3305         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3306
3307         return rc;
3308 }
3309
3310 static struct device_attribute ipr_diagnostics_attr = {
3311         .attr = {
3312                 .name =         "run_diagnostics",
3313                 .mode =         S_IWUSR,
3314         },
3315         .store = ipr_store_diagnostics
3316 };
3317
3318 /**
3319  * ipr_show_adapter_state - Show the adapter's state
3320  * @class_dev:  device struct
3321  * @buf:        buffer
3322  *
3323  * Return value:
3324  *      number of bytes printed to buffer
3325  **/
3326 static ssize_t ipr_show_adapter_state(struct device *dev,
3327                                       struct device_attribute *attr, char *buf)
3328 {
3329         struct Scsi_Host *shost = class_to_shost(dev);
3330         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3331         unsigned long lock_flags = 0;
3332         int len;
3333
3334         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335         if (ioa_cfg->ioa_is_dead)
3336                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3337         else
3338                 len = snprintf(buf, PAGE_SIZE, "online\n");
3339         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340         return len;
3341 }
3342
3343 /**
3344  * ipr_store_adapter_state - Change adapter state
3345  * @dev:        device struct
3346  * @buf:        buffer
3347  * @count:      buffer size
3348  *
3349  * This function will change the adapter's state.
3350  *
3351  * Return value:
3352  *      count on success / other on failure
3353  **/
3354 static ssize_t ipr_store_adapter_state(struct device *dev,
3355                                        struct device_attribute *attr,
3356                                        const char *buf, size_t count)
3357 {
3358         struct Scsi_Host *shost = class_to_shost(dev);
3359         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3360         unsigned long lock_flags;
3361         int result = count;
3362
3363         if (!capable(CAP_SYS_ADMIN))
3364                 return -EACCES;
3365
3366         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3367         if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3368                 ioa_cfg->ioa_is_dead = 0;
3369                 ioa_cfg->reset_retries = 0;
3370                 ioa_cfg->in_ioa_bringdown = 0;
3371                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3372         }
3373         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3375
3376         return result;
3377 }
3378
3379 static struct device_attribute ipr_ioa_state_attr = {
3380         .attr = {
3381                 .name =         "online_state",
3382                 .mode =         S_IRUGO | S_IWUSR,
3383         },
3384         .show = ipr_show_adapter_state,
3385         .store = ipr_store_adapter_state
3386 };
3387
3388 /**
3389  * ipr_store_reset_adapter - Reset the adapter
3390  * @dev:        device struct
3391  * @buf:        buffer
3392  * @count:      buffer size
3393  *
3394  * This function will reset the adapter.
3395  *
3396  * Return value:
3397  *      count on success / other on failure
3398  **/
3399 static ssize_t ipr_store_reset_adapter(struct device *dev,
3400                                        struct device_attribute *attr,
3401                                        const char *buf, size_t count)
3402 {
3403         struct Scsi_Host *shost = class_to_shost(dev);
3404         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405         unsigned long lock_flags;
3406         int result = count;
3407
3408         if (!capable(CAP_SYS_ADMIN))
3409                 return -EACCES;
3410
3411         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3412         if (!ioa_cfg->in_reset_reload)
3413                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3414         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3415         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3416
3417         return result;
3418 }
3419
3420 static struct device_attribute ipr_ioa_reset_attr = {
3421         .attr = {
3422                 .name =         "reset_host",
3423                 .mode =         S_IWUSR,
3424         },
3425         .store = ipr_store_reset_adapter
3426 };
3427
3428 /**
3429  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3430  * @buf_len:            buffer length
3431  *
3432  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3433  * list to use for microcode download
3434  *
3435  * Return value:
3436  *      pointer to sglist / NULL on failure
3437  **/
3438 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3439 {
3440         int sg_size, order, bsize_elem, num_elem, i, j;
3441         struct ipr_sglist *sglist;
3442         struct scatterlist *scatterlist;
3443         struct page *page;
3444
3445         /* Get the minimum size per scatter/gather element */
3446         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3447
3448         /* Get the actual size per element */
3449         order = get_order(sg_size);
3450
3451         /* Determine the actual number of bytes per element */
3452         bsize_elem = PAGE_SIZE * (1 << order);
3453
3454         /* Determine the actual number of sg entries needed */
3455         if (buf_len % bsize_elem)
3456                 num_elem = (buf_len / bsize_elem) + 1;
3457         else
3458                 num_elem = buf_len / bsize_elem;
3459
3460         /* Allocate a scatter/gather list for the DMA */
3461         sglist = kzalloc(sizeof(struct ipr_sglist) +
3462                          (sizeof(struct scatterlist) * (num_elem - 1)),
3463                          GFP_KERNEL);
3464
3465         if (sglist == NULL) {
3466                 ipr_trace;
3467                 return NULL;
3468         }
3469
3470         scatterlist = sglist->scatterlist;
3471         sg_init_table(scatterlist, num_elem);
3472
3473         sglist->order = order;
3474         sglist->num_sg = num_elem;
3475
3476         /* Allocate a bunch of sg elements */
3477         for (i = 0; i < num_elem; i++) {
3478                 page = alloc_pages(GFP_KERNEL, order);
3479                 if (!page) {
3480                         ipr_trace;
3481
3482                         /* Free up what we already allocated */
3483                         for (j = i - 1; j >= 0; j--)
3484                                 __free_pages(sg_page(&scatterlist[j]), order);
3485                         kfree(sglist);
3486                         return NULL;
3487                 }
3488
3489                 sg_set_page(&scatterlist[i], page, 0, 0);
3490         }
3491
3492         return sglist;
3493 }
3494
3495 /**
3496  * ipr_free_ucode_buffer - Frees a microcode download buffer
3497  * @p_dnld:             scatter/gather list pointer
3498  *
3499  * Free a DMA'able ucode download buffer previously allocated with
3500  * ipr_alloc_ucode_buffer
3501  *
3502  * Return value:
3503  *      nothing
3504  **/
3505 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3506 {
3507         int i;
3508
3509         for (i = 0; i < sglist->num_sg; i++)
3510                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3511
3512         kfree(sglist);
3513 }
3514
3515 /**
3516  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3517  * @sglist:             scatter/gather list pointer
3518  * @buffer:             buffer pointer
3519  * @len:                buffer length
3520  *
3521  * Copy a microcode image from a user buffer into a buffer allocated by
3522  * ipr_alloc_ucode_buffer
3523  *
3524  * Return value:
3525  *      0 on success / other on failure
3526  **/
3527 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3528                                  u8 *buffer, u32 len)
3529 {
3530         int bsize_elem, i, result = 0;
3531         struct scatterlist *scatterlist;
3532         void *kaddr;
3533
3534         /* Determine the actual number of bytes per element */
3535         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3536
3537         scatterlist = sglist->scatterlist;
3538
3539         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3540                 struct page *page = sg_page(&scatterlist[i]);
3541
3542                 kaddr = kmap(page);
3543                 memcpy(kaddr, buffer, bsize_elem);
3544                 kunmap(page);
3545
3546                 scatterlist[i].length = bsize_elem;
3547
3548                 if (result != 0) {
3549                         ipr_trace;
3550                         return result;
3551                 }
3552         }
3553
3554         if (len % bsize_elem) {
3555                 struct page *page = sg_page(&scatterlist[i]);
3556
3557                 kaddr = kmap(page);
3558                 memcpy(kaddr, buffer, len % bsize_elem);
3559                 kunmap(page);
3560
3561                 scatterlist[i].length = len % bsize_elem;
3562         }
3563
3564         sglist->buffer_len = len;
3565         return result;
3566 }
3567
3568 /**
3569  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3570  * @ipr_cmd:            ipr command struct
3571  * @sglist:             scatter/gather list
3572  *
3573  * Builds a microcode download IOA data list (IOADL).
3574  *
3575  **/
3576 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3577                                     struct ipr_sglist *sglist)
3578 {
3579         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3580         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3581         struct scatterlist *scatterlist = sglist->scatterlist;
3582         int i;
3583
3584         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3585         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3586         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3587
3588         ioarcb->ioadl_len =
3589                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3590         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3591                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3592                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3593                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3594         }
3595
3596         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3597 }
3598
3599 /**
3600  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3601  * @ipr_cmd:    ipr command struct
3602  * @sglist:             scatter/gather list
3603  *
3604  * Builds a microcode download IOA data list (IOADL).
3605  *
3606  **/
3607 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3608                                   struct ipr_sglist *sglist)
3609 {
3610         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3611         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3612         struct scatterlist *scatterlist = sglist->scatterlist;
3613         int i;
3614
3615         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3616         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3617         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3618
3619         ioarcb->ioadl_len =
3620                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3621
3622         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3623                 ioadl[i].flags_and_data_len =
3624                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3625                 ioadl[i].address =
3626                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3627         }
3628
3629         ioadl[i-1].flags_and_data_len |=
3630                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3631 }
3632
3633 /**
3634  * ipr_update_ioa_ucode - Update IOA's microcode
3635  * @ioa_cfg:    ioa config struct
3636  * @sglist:             scatter/gather list
3637  *
3638  * Initiate an adapter reset to update the IOA's microcode
3639  *
3640  * Return value:
3641  *      0 on success / -EIO on failure
3642  **/
3643 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3644                                 struct ipr_sglist *sglist)
3645 {
3646         unsigned long lock_flags;
3647
3648         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3649         while(ioa_cfg->in_reset_reload) {
3650                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3651                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3652                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3653         }
3654
3655         if (ioa_cfg->ucode_sglist) {
3656                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657                 dev_err(&ioa_cfg->pdev->dev,
3658                         "Microcode download already in progress\n");
3659                 return -EIO;
3660         }
3661
3662         sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3663                                         sglist->num_sg, DMA_TO_DEVICE);
3664
3665         if (!sglist->num_dma_sg) {
3666                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667                 dev_err(&ioa_cfg->pdev->dev,
3668                         "Failed to map microcode download buffer!\n");
3669                 return -EIO;
3670         }
3671
3672         ioa_cfg->ucode_sglist = sglist;
3673         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3674         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3675         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3676
3677         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3678         ioa_cfg->ucode_sglist = NULL;
3679         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680         return 0;
3681 }
3682
3683 /**
3684  * ipr_store_update_fw - Update the firmware on the adapter
3685  * @class_dev:  device struct
3686  * @buf:        buffer
3687  * @count:      buffer size
3688  *
3689  * This function will update the firmware on the adapter.
3690  *
3691  * Return value:
3692  *      count on success / other on failure
3693  **/
3694 static ssize_t ipr_store_update_fw(struct device *dev,
3695                                    struct device_attribute *attr,
3696                                    const char *buf, size_t count)
3697 {
3698         struct Scsi_Host *shost = class_to_shost(dev);
3699         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700         struct ipr_ucode_image_header *image_hdr;
3701         const struct firmware *fw_entry;
3702         struct ipr_sglist *sglist;
3703         char fname[100];
3704         char *src;
3705         int len, result, dnld_size;
3706
3707         if (!capable(CAP_SYS_ADMIN))
3708                 return -EACCES;
3709
3710         len = snprintf(fname, 99, "%s", buf);
3711         fname[len-1] = '\0';
3712
3713         if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3714                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3715                 return -EIO;
3716         }
3717
3718         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3719
3720         if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3721             (ioa_cfg->vpd_cbs->page3_data.card_type &&
3722              ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3723                 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3724                 release_firmware(fw_entry);
3725                 return -EINVAL;
3726         }
3727
3728         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3729         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3730         sglist = ipr_alloc_ucode_buffer(dnld_size);
3731
3732         if (!sglist) {
3733                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3734                 release_firmware(fw_entry);
3735                 return -ENOMEM;
3736         }
3737
3738         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3739
3740         if (result) {
3741                 dev_err(&ioa_cfg->pdev->dev,
3742                         "Microcode buffer copy to DMA buffer failed\n");
3743                 goto out;
3744         }
3745
3746         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3747
3748         if (!result)
3749                 result = count;
3750 out:
3751         ipr_free_ucode_buffer(sglist);
3752         release_firmware(fw_entry);
3753         return result;
3754 }
3755
3756 static struct device_attribute ipr_update_fw_attr = {
3757         .attr = {
3758                 .name =         "update_fw",
3759                 .mode =         S_IWUSR,
3760         },
3761         .store = ipr_store_update_fw
3762 };
3763
3764 static struct device_attribute *ipr_ioa_attrs[] = {
3765         &ipr_fw_version_attr,
3766         &ipr_log_level_attr,
3767         &ipr_diagnostics_attr,
3768         &ipr_ioa_state_attr,
3769         &ipr_ioa_reset_attr,
3770         &ipr_update_fw_attr,
3771         NULL,
3772 };
3773
3774 #ifdef CONFIG_SCSI_IPR_DUMP
3775 /**
3776  * ipr_read_dump - Dump the adapter
3777  * @filp:               open sysfs file
3778  * @kobj:               kobject struct
3779  * @bin_attr:           bin_attribute struct
3780  * @buf:                buffer
3781  * @off:                offset
3782  * @count:              buffer size
3783  *
3784  * Return value:
3785  *      number of bytes printed to buffer
3786  **/
3787 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3788                              struct bin_attribute *bin_attr,
3789                              char *buf, loff_t off, size_t count)
3790 {
3791         struct device *cdev = container_of(kobj, struct device, kobj);
3792         struct Scsi_Host *shost = class_to_shost(cdev);
3793         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3794         struct ipr_dump *dump;
3795         unsigned long lock_flags = 0;
3796         char *src;
3797         int len;
3798         size_t rc = count;
3799
3800         if (!capable(CAP_SYS_ADMIN))
3801                 return -EACCES;
3802
3803         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3804         dump = ioa_cfg->dump;
3805
3806         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3807                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3808                 return 0;
3809         }
3810         kref_get(&dump->kref);
3811         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3812
3813         if (off > dump->driver_dump.hdr.len) {
3814                 kref_put(&dump->kref, ipr_release_dump);
3815                 return 0;
3816         }
3817
3818         if (off + count > dump->driver_dump.hdr.len) {
3819                 count = dump->driver_dump.hdr.len - off;
3820                 rc = count;
3821         }
3822
3823         if (count && off < sizeof(dump->driver_dump)) {
3824                 if (off + count > sizeof(dump->driver_dump))
3825                         len = sizeof(dump->driver_dump) - off;
3826                 else
3827                         len = count;
3828                 src = (u8 *)&dump->driver_dump + off;
3829                 memcpy(buf, src, len);
3830                 buf += len;
3831                 off += len;
3832                 count -= len;
3833         }
3834
3835         off -= sizeof(dump->driver_dump);
3836
3837         if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3838                 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3839                         len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3840                 else
3841                         len = count;
3842                 src = (u8 *)&dump->ioa_dump + off;
3843                 memcpy(buf, src, len);
3844                 buf += len;
3845                 off += len;
3846                 count -= len;
3847         }
3848
3849         off -= offsetof(struct ipr_ioa_dump, ioa_data);
3850
3851         while (count) {
3852                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3853                         len = PAGE_ALIGN(off) - off;
3854                 else
3855                         len = count;
3856                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3857                 src += off & ~PAGE_MASK;
3858                 memcpy(buf, src, len);
3859                 buf += len;
3860                 off += len;
3861                 count -= len;
3862         }
3863
3864         kref_put(&dump->kref, ipr_release_dump);
3865         return rc;
3866 }
3867
3868 /**
3869  * ipr_alloc_dump - Prepare for adapter dump
3870  * @ioa_cfg:    ioa config struct
3871  *
3872  * Return value:
3873  *      0 on success / other on failure
3874  **/
3875 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3876 {
3877         struct ipr_dump *dump;
3878         unsigned long lock_flags = 0;
3879
3880         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3881
3882         if (!dump) {
3883                 ipr_err("Dump memory allocation failed\n");
3884                 return -ENOMEM;
3885         }
3886
3887         kref_init(&dump->kref);
3888         dump->ioa_cfg = ioa_cfg;
3889
3890         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3891
3892         if (INACTIVE != ioa_cfg->sdt_state) {
3893                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3894                 kfree(dump);
3895                 return 0;
3896         }
3897
3898         ioa_cfg->dump = dump;
3899         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3900         if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3901                 ioa_cfg->dump_taken = 1;
3902                 schedule_work(&ioa_cfg->work_q);
3903         }
3904         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3905
3906         return 0;
3907 }
3908
3909 /**
3910  * ipr_free_dump - Free adapter dump memory
3911  * @ioa_cfg:    ioa config struct
3912  *
3913  * Return value:
3914  *      0 on success / other on failure
3915  **/
3916 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3917 {
3918         struct ipr_dump *dump;
3919         unsigned long lock_flags = 0;
3920
3921         ENTER;
3922
3923         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3924         dump = ioa_cfg->dump;
3925         if (!dump) {
3926                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3927                 return 0;
3928         }
3929
3930         ioa_cfg->dump = NULL;
3931         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932
3933         kref_put(&dump->kref, ipr_release_dump);
3934
3935         LEAVE;
3936         return 0;
3937 }
3938
3939 /**
3940  * ipr_write_dump - Setup dump state of adapter
3941  * @filp:               open sysfs file
3942  * @kobj:               kobject struct
3943  * @bin_attr:           bin_attribute struct
3944  * @buf:                buffer
3945  * @off:                offset
3946  * @count:              buffer size
3947  *
3948  * Return value:
3949  *      number of bytes printed to buffer
3950  **/
3951 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
3952                               struct bin_attribute *bin_attr,
3953                               char *buf, loff_t off, size_t count)
3954 {
3955         struct device *cdev = container_of(kobj, struct device, kobj);
3956         struct Scsi_Host *shost = class_to_shost(cdev);
3957         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3958         int rc;
3959
3960         if (!capable(CAP_SYS_ADMIN))
3961                 return -EACCES;
3962
3963         if (buf[0] == '1')
3964                 rc = ipr_alloc_dump(ioa_cfg);
3965         else if (buf[0] == '0')
3966                 rc = ipr_free_dump(ioa_cfg);
3967         else
3968                 return -EINVAL;
3969
3970         if (rc)
3971                 return rc;
3972         else
3973                 return count;
3974 }
3975
3976 static struct bin_attribute ipr_dump_attr = {
3977         .attr = {
3978                 .name = "dump",
3979                 .mode = S_IRUSR | S_IWUSR,
3980         },
3981         .size = 0,
3982         .read = ipr_read_dump,
3983         .write = ipr_write_dump
3984 };
3985 #else
3986 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3987 #endif
3988
3989 /**
3990  * ipr_change_queue_depth - Change the device's queue depth
3991  * @sdev:       scsi device struct
3992  * @qdepth:     depth to set
3993  * @reason:     calling context
3994  *
3995  * Return value:
3996  *      actual depth set
3997  **/
3998 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3999                                   int reason)
4000 {
4001         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4002         struct ipr_resource_entry *res;
4003         unsigned long lock_flags = 0;
4004
4005         if (reason != SCSI_QDEPTH_DEFAULT)
4006                 return -EOPNOTSUPP;
4007
4008         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4009         res = (struct ipr_resource_entry *)sdev->hostdata;
4010
4011         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4012                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4013         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4014
4015         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4016         return sdev->queue_depth;
4017 }
4018
4019 /**
4020  * ipr_change_queue_type - Change the device's queue type
4021  * @dsev:               scsi device struct
4022  * @tag_type:   type of tags to use
4023  *
4024  * Return value:
4025  *      actual queue type set
4026  **/
4027 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4028 {
4029         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4030         struct ipr_resource_entry *res;
4031         unsigned long lock_flags = 0;
4032
4033         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4034         res = (struct ipr_resource_entry *)sdev->hostdata;
4035
4036         if (res) {
4037                 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4038                         /*
4039                          * We don't bother quiescing the device here since the
4040                          * adapter firmware does it for us.
4041                          */
4042                         scsi_set_tag_type(sdev, tag_type);
4043
4044                         if (tag_type)
4045                                 scsi_activate_tcq(sdev, sdev->queue_depth);
4046                         else
4047                                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4048                 } else
4049                         tag_type = 0;
4050         } else
4051                 tag_type = 0;
4052
4053         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054         return tag_type;
4055 }
4056
4057 /**
4058  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4059  * @dev:        device struct
4060  * @buf:        buffer
4061  *
4062  * Return value:
4063  *      number of bytes printed to buffer
4064  **/
4065 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4066 {
4067         struct scsi_device *sdev = to_scsi_device(dev);
4068         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4069         struct ipr_resource_entry *res;
4070         unsigned long lock_flags = 0;
4071         ssize_t len = -ENXIO;
4072
4073         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4074         res = (struct ipr_resource_entry *)sdev->hostdata;
4075         if (res)
4076                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4077         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4078         return len;
4079 }
4080
4081 static struct device_attribute ipr_adapter_handle_attr = {
4082         .attr = {
4083                 .name =         "adapter_handle",
4084                 .mode =         S_IRUSR,
4085         },
4086         .show = ipr_show_adapter_handle
4087 };
4088
4089 /**
4090  * ipr_show_resource_path - Show the resource path or the resource address for
4091  *                          this device.
4092  * @dev:        device struct
4093  * @buf:        buffer
4094  *
4095  * Return value:
4096  *      number of bytes printed to buffer
4097  **/
4098 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4099 {
4100         struct scsi_device *sdev = to_scsi_device(dev);
4101         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4102         struct ipr_resource_entry *res;
4103         unsigned long lock_flags = 0;
4104         ssize_t len = -ENXIO;
4105         char buffer[IPR_MAX_RES_PATH_LENGTH];
4106
4107         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4108         res = (struct ipr_resource_entry *)sdev->hostdata;
4109         if (res && ioa_cfg->sis64)
4110                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4111                                ipr_format_res_path(res->res_path, buffer,
4112                                                    sizeof(buffer)));
4113         else if (res)
4114                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4115                                res->bus, res->target, res->lun);
4116
4117         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118         return len;
4119 }
4120
4121 static struct device_attribute ipr_resource_path_attr = {
4122         .attr = {
4123                 .name =         "resource_path",
4124                 .mode =         S_IRUSR,
4125         },
4126         .show = ipr_show_resource_path
4127 };
4128
4129 static struct device_attribute *ipr_dev_attrs[] = {
4130         &ipr_adapter_handle_attr,
4131         &ipr_resource_path_attr,
4132         NULL,
4133 };
4134
4135 /**
4136  * ipr_biosparam - Return the HSC mapping
4137  * @sdev:                       scsi device struct
4138  * @block_device:       block device pointer
4139  * @capacity:           capacity of the device
4140  * @parm:                       Array containing returned HSC values.
4141  *
4142  * This function generates the HSC parms that fdisk uses.
4143  * We want to make sure we return something that places partitions
4144  * on 4k boundaries for best performance with the IOA.
4145  *
4146  * Return value:
4147  *      0 on success
4148  **/
4149 static int ipr_biosparam(struct scsi_device *sdev,
4150                          struct block_device *block_device,
4151                          sector_t capacity, int *parm)
4152 {
4153         int heads, sectors;
4154         sector_t cylinders;
4155
4156         heads = 128;
4157         sectors = 32;
4158
4159         cylinders = capacity;
4160         sector_div(cylinders, (128 * 32));
4161
4162         /* return result */
4163         parm[0] = heads;
4164         parm[1] = sectors;
4165         parm[2] = cylinders;
4166
4167         return 0;
4168 }
4169
4170 /**
4171  * ipr_find_starget - Find target based on bus/target.
4172  * @starget:    scsi target struct
4173  *
4174  * Return value:
4175  *      resource entry pointer if found / NULL if not found
4176  **/
4177 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4178 {
4179         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4180         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4181         struct ipr_resource_entry *res;
4182
4183         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4184                 if ((res->bus == starget->channel) &&
4185                     (res->target == starget->id) &&
4186                     (res->lun == 0)) {
4187                         return res;
4188                 }
4189         }
4190
4191         return NULL;
4192 }
4193
4194 static struct ata_port_info sata_port_info;
4195
4196 /**
4197  * ipr_target_alloc - Prepare for commands to a SCSI target
4198  * @starget:    scsi target struct
4199  *
4200  * If the device is a SATA device, this function allocates an
4201  * ATA port with libata, else it does nothing.
4202  *
4203  * Return value:
4204  *      0 on success / non-0 on failure
4205  **/
4206 static int ipr_target_alloc(struct scsi_target *starget)
4207 {
4208         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4209         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4210         struct ipr_sata_port *sata_port;
4211         struct ata_port *ap;
4212         struct ipr_resource_entry *res;
4213         unsigned long lock_flags;
4214
4215         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216         res = ipr_find_starget(starget);
4217         starget->hostdata = NULL;
4218
4219         if (res && ipr_is_gata(res)) {
4220                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4221                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4222                 if (!sata_port)
4223                         return -ENOMEM;
4224
4225                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4226                 if (ap) {
4227                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4228                         sata_port->ioa_cfg = ioa_cfg;
4229                         sata_port->ap = ap;
4230                         sata_port->res = res;
4231
4232                         res->sata_port = sata_port;
4233                         ap->private_data = sata_port;
4234                         starget->hostdata = sata_port;
4235                 } else {
4236                         kfree(sata_port);
4237                         return -ENOMEM;
4238                 }
4239         }
4240         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4241
4242         return 0;
4243 }
4244
4245 /**
4246  * ipr_target_destroy - Destroy a SCSI target
4247  * @starget:    scsi target struct
4248  *
4249  * If the device was a SATA device, this function frees the libata
4250  * ATA port, else it does nothing.
4251  *
4252  **/
4253 static void ipr_target_destroy(struct scsi_target *starget)
4254 {
4255         struct ipr_sata_port *sata_port = starget->hostdata;
4256         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4257         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4258
4259         if (ioa_cfg->sis64) {
4260                 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4261                         clear_bit(starget->id, ioa_cfg->array_ids);
4262                 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4263                         clear_bit(starget->id, ioa_cfg->vset_ids);
4264                 else if (starget->channel == 0)
4265                         clear_bit(starget->id, ioa_cfg->target_ids);
4266         }
4267
4268         if (sata_port) {
4269                 starget->hostdata = NULL;
4270                 ata_sas_port_destroy(sata_port->ap);
4271                 kfree(sata_port);
4272         }
4273 }
4274
4275 /**
4276  * ipr_find_sdev - Find device based on bus/target/lun.
4277  * @sdev:       scsi device struct
4278  *
4279  * Return value:
4280  *      resource entry pointer if found / NULL if not found
4281  **/
4282 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4283 {
4284         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4285         struct ipr_resource_entry *res;
4286
4287         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4288                 if ((res->bus == sdev->channel) &&
4289                     (res->target == sdev->id) &&
4290                     (res->lun == sdev->lun))
4291                         return res;
4292         }
4293
4294         return NULL;
4295 }
4296
4297 /**
4298  * ipr_slave_destroy - Unconfigure a SCSI device
4299  * @sdev:       scsi device struct
4300  *
4301  * Return value:
4302  *      nothing
4303  **/
4304 static void ipr_slave_destroy(struct scsi_device *sdev)
4305 {
4306         struct ipr_resource_entry *res;
4307         struct ipr_ioa_cfg *ioa_cfg;
4308         unsigned long lock_flags = 0;
4309
4310         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4311
4312         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4313         res = (struct ipr_resource_entry *) sdev->hostdata;
4314         if (res) {
4315                 if (res->sata_port)
4316                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4317                 sdev->hostdata = NULL;
4318                 res->sdev = NULL;
4319                 res->sata_port = NULL;
4320         }
4321         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4322 }
4323
4324 /**
4325  * ipr_slave_configure - Configure a SCSI device
4326  * @sdev:       scsi device struct
4327  *
4328  * This function configures the specified scsi device.
4329  *
4330  * Return value:
4331  *      0 on success
4332  **/
4333 static int ipr_slave_configure(struct scsi_device *sdev)
4334 {
4335         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4336         struct ipr_resource_entry *res;
4337         struct ata_port *ap = NULL;
4338         unsigned long lock_flags = 0;
4339         char buffer[IPR_MAX_RES_PATH_LENGTH];
4340
4341         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4342         res = sdev->hostdata;
4343         if (res) {
4344                 if (ipr_is_af_dasd_device(res))
4345                         sdev->type = TYPE_RAID;
4346                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4347                         sdev->scsi_level = 4;
4348                         sdev->no_uld_attach = 1;
4349                 }
4350                 if (ipr_is_vset_device(res)) {
4351                         blk_queue_rq_timeout(sdev->request_queue,
4352                                              IPR_VSET_RW_TIMEOUT);
4353                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4354                 }
4355                 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
4356                         sdev->allow_restart = 1;
4357                 if (ipr_is_gata(res) && res->sata_port)
4358                         ap = res->sata_port->ap;
4359                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4360
4361                 if (ap) {
4362                         scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4363                         ata_sas_slave_configure(sdev, ap);
4364                 } else
4365                         scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4366                 if (ioa_cfg->sis64)
4367                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4368                                     ipr_format_res_path(res->res_path, buffer,
4369                                                         sizeof(buffer)));
4370                 return 0;
4371         }
4372         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4373         return 0;
4374 }
4375
4376 /**
4377  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4378  * @sdev:       scsi device struct
4379  *
4380  * This function initializes an ATA port so that future commands
4381  * sent through queuecommand will work.
4382  *
4383  * Return value:
4384  *      0 on success
4385  **/
4386 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4387 {
4388         struct ipr_sata_port *sata_port = NULL;
4389         int rc = -ENXIO;
4390
4391         ENTER;
4392         if (sdev->sdev_target)
4393                 sata_port = sdev->sdev_target->hostdata;
4394         if (sata_port)
4395                 rc = ata_sas_port_init(sata_port->ap);
4396         if (rc)
4397                 ipr_slave_destroy(sdev);
4398
4399         LEAVE;
4400         return rc;
4401 }
4402
4403 /**
4404  * ipr_slave_alloc - Prepare for commands to a device.
4405  * @sdev:       scsi device struct
4406  *
4407  * This function saves a pointer to the resource entry
4408  * in the scsi device struct if the device exists. We
4409  * can then use this pointer in ipr_queuecommand when
4410  * handling new commands.
4411  *
4412  * Return value:
4413  *      0 on success / -ENXIO if device does not exist
4414  **/
4415 static int ipr_slave_alloc(struct scsi_device *sdev)
4416 {
4417         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4418         struct ipr_resource_entry *res;
4419         unsigned long lock_flags;
4420         int rc = -ENXIO;
4421
4422         sdev->hostdata = NULL;
4423
4424         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425
4426         res = ipr_find_sdev(sdev);
4427         if (res) {
4428                 res->sdev = sdev;
4429                 res->add_to_ml = 0;
4430                 res->in_erp = 0;
4431                 sdev->hostdata = res;
4432                 if (!ipr_is_naca_model(res))
4433                         res->needs_sync_complete = 1;
4434                 rc = 0;
4435                 if (ipr_is_gata(res)) {
4436                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4437                         return ipr_ata_slave_alloc(sdev);
4438                 }
4439         }
4440
4441         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4442
4443         return rc;
4444 }
4445
4446 /**
4447  * ipr_eh_host_reset - Reset the host adapter
4448  * @scsi_cmd:   scsi command struct
4449  *
4450  * Return value:
4451  *      SUCCESS / FAILED
4452  **/
4453 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4454 {
4455         struct ipr_ioa_cfg *ioa_cfg;
4456         int rc;
4457
4458         ENTER;
4459         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4460
4461         dev_err(&ioa_cfg->pdev->dev,
4462                 "Adapter being reset as a result of error recovery.\n");
4463
4464         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4465                 ioa_cfg->sdt_state = GET_DUMP;
4466
4467         rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4468
4469         LEAVE;
4470         return rc;
4471 }
4472
4473 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4474 {
4475         int rc;
4476
4477         spin_lock_irq(cmd->device->host->host_lock);
4478         rc = __ipr_eh_host_reset(cmd);
4479         spin_unlock_irq(cmd->device->host->host_lock);
4480
4481         return rc;
4482 }
4483
4484 /**
4485  * ipr_device_reset - Reset the device
4486  * @ioa_cfg:    ioa config struct
4487  * @res:                resource entry struct
4488  *
4489  * This function issues a device reset to the affected device.
4490  * If the device is a SCSI device, a LUN reset will be sent
4491  * to the device first. If that does not work, a target reset
4492  * will be sent. If the device is a SATA device, a PHY reset will
4493  * be sent.
4494  *
4495  * Return value:
4496  *      0 on success / non-zero on failure
4497  **/
4498 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4499                             struct ipr_resource_entry *res)
4500 {
4501         struct ipr_cmnd *ipr_cmd;
4502         struct ipr_ioarcb *ioarcb;
4503         struct ipr_cmd_pkt *cmd_pkt;
4504         struct ipr_ioarcb_ata_regs *regs;
4505         u32 ioasc;
4506
4507         ENTER;
4508         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4509         ioarcb = &ipr_cmd->ioarcb;
4510         cmd_pkt = &ioarcb->cmd_pkt;
4511
4512         if (ipr_cmd->ioa_cfg->sis64) {
4513                 regs = &ipr_cmd->i.ata_ioadl.regs;
4514                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4515         } else
4516                 regs = &ioarcb->u.add_data.u.regs;
4517
4518         ioarcb->res_handle = res->res_handle;
4519         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4520         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4521         if (ipr_is_gata(res)) {
4522                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4523                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4524                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4525         }
4526
4527         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4528         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4529         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4530         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4531                 if (ipr_cmd->ioa_cfg->sis64)
4532                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4533                                sizeof(struct ipr_ioasa_gata));
4534                 else
4535                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4536                                sizeof(struct ipr_ioasa_gata));
4537         }
4538
4539         LEAVE;
4540         return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4541 }
4542
4543 /**
4544  * ipr_sata_reset - Reset the SATA port
4545  * @link:       SATA link to reset
4546  * @classes:    class of the attached device
4547  *
4548  * This function issues a SATA phy reset to the affected ATA link.
4549  *
4550  * Return value:
4551  *      0 on success / non-zero on failure
4552  **/
4553 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4554                                 unsigned long deadline)
4555 {
4556         struct ipr_sata_port *sata_port = link->ap->private_data;
4557         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4558         struct ipr_resource_entry *res;
4559         unsigned long lock_flags = 0;
4560         int rc = -ENXIO;
4561
4562         ENTER;
4563         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4564         while(ioa_cfg->in_reset_reload) {
4565                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4566                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4567                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4568         }
4569
4570         res = sata_port->res;
4571         if (res) {
4572                 rc = ipr_device_reset(ioa_cfg, res);
4573                 *classes = res->ata_class;
4574         }
4575
4576         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4577         LEAVE;
4578         return rc;
4579 }
4580
4581 /**
4582  * ipr_eh_dev_reset - Reset the device
4583  * @scsi_cmd:   scsi command struct
4584  *
4585  * This function issues a device reset to the affected device.
4586  * A LUN reset will be sent to the device first. If that does
4587  * not work, a target reset will be sent.
4588  *
4589  * Return value:
4590  *      SUCCESS / FAILED
4591  **/
4592 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4593 {
4594         struct ipr_cmnd *ipr_cmd;
4595         struct ipr_ioa_cfg *ioa_cfg;
4596         struct ipr_resource_entry *res;
4597         struct ata_port *ap;
4598         int rc = 0;
4599
4600         ENTER;
4601         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4602         res = scsi_cmd->device->hostdata;
4603
4604         if (!res)
4605                 return FAILED;
4606
4607         /*
4608          * If we are currently going through reset/reload, return failed. This will force the
4609          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4610          * reset to complete
4611          */
4612         if (ioa_cfg->in_reset_reload)
4613                 return FAILED;
4614         if (ioa_cfg->ioa_is_dead)
4615                 return FAILED;
4616
4617         list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4618                 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4619                         if (ipr_cmd->scsi_cmd)
4620                                 ipr_cmd->done = ipr_scsi_eh_done;
4621                         if (ipr_cmd->qc)
4622                                 ipr_cmd->done = ipr_sata_eh_done;
4623                         if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4624                                 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4625                                 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4626                         }
4627                 }
4628         }
4629
4630         res->resetting_device = 1;
4631         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4632
4633         if (ipr_is_gata(res) && res->sata_port) {
4634                 ap = res->sata_port->ap;
4635                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4636                 ata_std_error_handler(ap);
4637                 spin_lock_irq(scsi_cmd->device->host->host_lock);
4638
4639                 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4640                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4641                                 rc = -EIO;
4642                                 break;
4643                         }
4644                 }
4645         } else
4646                 rc = ipr_device_reset(ioa_cfg, res);
4647         res->resetting_device = 0;
4648
4649         LEAVE;
4650         return (rc ? FAILED : SUCCESS);
4651 }
4652
4653 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4654 {
4655         int rc;
4656
4657         spin_lock_irq(cmd->device->host->host_lock);
4658         rc = __ipr_eh_dev_reset(cmd);
4659         spin_unlock_irq(cmd->device->host->host_lock);
4660
4661         return rc;
4662 }
4663
4664 /**
4665  * ipr_bus_reset_done - Op done function for bus reset.
4666  * @ipr_cmd:    ipr command struct
4667  *
4668  * This function is the op done function for a bus reset
4669  *
4670  * Return value:
4671  *      none
4672  **/
4673 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4674 {
4675         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4676         struct ipr_resource_entry *res;
4677
4678         ENTER;
4679         if (!ioa_cfg->sis64)
4680                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4681                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4682                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4683                                 break;
4684                         }
4685                 }
4686
4687         /*
4688          * If abort has not completed, indicate the reset has, else call the
4689          * abort's done function to wake the sleeping eh thread
4690          */
4691         if (ipr_cmd->sibling->sibling)
4692                 ipr_cmd->sibling->sibling = NULL;
4693         else
4694                 ipr_cmd->sibling->done(ipr_cmd->sibling);
4695
4696         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4697         LEAVE;
4698 }
4699
4700 /**
4701  * ipr_abort_timeout - An abort task has timed out
4702  * @ipr_cmd:    ipr command struct
4703  *
4704  * This function handles when an abort task times out. If this
4705  * happens we issue a bus reset since we have resources tied
4706  * up that must be freed before returning to the midlayer.
4707  *
4708  * Return value:
4709  *      none
4710  **/
4711 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4712 {
4713         struct ipr_cmnd *reset_cmd;
4714         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4715         struct ipr_cmd_pkt *cmd_pkt;
4716         unsigned long lock_flags = 0;
4717
4718         ENTER;
4719         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4720         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4721                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722                 return;
4723         }
4724
4725         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4726         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4727         ipr_cmd->sibling = reset_cmd;
4728         reset_cmd->sibling = ipr_cmd;
4729         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4730         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4731         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4732         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4733         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4734
4735         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4736         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4737         LEAVE;
4738 }
4739
4740 /**
4741  * ipr_cancel_op - Cancel specified op
4742  * @scsi_cmd:   scsi command struct
4743  *
4744  * This function cancels specified op.
4745  *
4746  * Return value:
4747  *      SUCCESS / FAILED
4748  **/
4749 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4750 {
4751         struct ipr_cmnd *ipr_cmd;
4752         struct ipr_ioa_cfg *ioa_cfg;
4753         struct ipr_resource_entry *res;
4754         struct ipr_cmd_pkt *cmd_pkt;
4755         u32 ioasc;
4756         int op_found = 0;
4757
4758         ENTER;
4759         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4760         res = scsi_cmd->device->hostdata;
4761
4762         /* If we are currently going through reset/reload, return failed.
4763          * This will force the mid-layer to call ipr_eh_host_reset,
4764          * which will then go to sleep and wait for the reset to complete
4765          */
4766         if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4767                 return FAILED;
4768         if (!res || !ipr_is_gscsi(res))
4769                 return FAILED;
4770
4771         list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4772                 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4773                         ipr_cmd->done = ipr_scsi_eh_done;
4774                         op_found = 1;
4775                         break;
4776                 }
4777         }
4778
4779         if (!op_found)
4780                 return SUCCESS;
4781
4782         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4783         ipr_cmd->ioarcb.res_handle = res->res_handle;
4784         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4785         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4786         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4787         ipr_cmd->u.sdev = scsi_cmd->device;
4788
4789         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4790                     scsi_cmd->cmnd[0]);
4791         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4792         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4793
4794         /*
4795          * If the abort task timed out and we sent a bus reset, we will get
4796          * one the following responses to the abort
4797          */
4798         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4799                 ioasc = 0;
4800                 ipr_trace;
4801         }
4802
4803         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4804         if (!ipr_is_naca_model(res))
4805                 res->needs_sync_complete = 1;
4806
4807         LEAVE;
4808         return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4809 }
4810
4811 /**
4812  * ipr_eh_abort - Abort a single op
4813  * @scsi_cmd:   scsi command struct
4814  *
4815  * Return value:
4816  *      SUCCESS / FAILED
4817  **/
4818 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4819 {
4820         unsigned long flags;
4821         int rc;
4822
4823         ENTER;
4824
4825         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4826         rc = ipr_cancel_op(scsi_cmd);
4827         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4828
4829         LEAVE;
4830         return rc;
4831 }
4832
4833 /**
4834  * ipr_handle_other_interrupt - Handle "other" interrupts
4835  * @ioa_cfg:    ioa config struct
4836  *
4837  * Return value:
4838  *      IRQ_NONE / IRQ_HANDLED
4839  **/
4840 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
4841 {
4842         irqreturn_t rc = IRQ_HANDLED;
4843         volatile u32 int_reg, int_mask_reg;
4844
4845         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4846         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4847
4848         /* If an interrupt on the adapter did not occur, ignore it.
4849          * Or in the case of SIS 64, check for a stage change interrupt.
4850          */
4851         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4852                 if (ioa_cfg->sis64) {
4853                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4854                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4855                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4856
4857                                 /* clear stage change */
4858                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4859                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4860                                 list_del(&ioa_cfg->reset_cmd->queue);
4861                                 del_timer(&ioa_cfg->reset_cmd->timer);
4862                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4863                                 return IRQ_HANDLED;
4864                         }
4865                 }
4866
4867                 return IRQ_NONE;
4868         }
4869
4870         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4871                 /* Mask the interrupt */
4872                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4873
4874                 /* Clear the interrupt */
4875                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4876                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4877
4878                 list_del(&ioa_cfg->reset_cmd->queue);
4879                 del_timer(&ioa_cfg->reset_cmd->timer);
4880                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4881         } else {
4882                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4883                         ioa_cfg->ioa_unit_checked = 1;
4884                 else
4885                         dev_err(&ioa_cfg->pdev->dev,
4886                                 "Permanent IOA failure. 0x%08X\n", int_reg);
4887
4888                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4889                         ioa_cfg->sdt_state = GET_DUMP;
4890
4891                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4892                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4893         }
4894
4895         return rc;
4896 }
4897
4898 /**
4899  * ipr_isr_eh - Interrupt service routine error handler
4900  * @ioa_cfg:    ioa config struct
4901  * @msg:        message to log
4902  *
4903  * Return value:
4904  *      none
4905  **/
4906 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4907 {
4908         ioa_cfg->errors_logged++;
4909         dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4910
4911         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4912                 ioa_cfg->sdt_state = GET_DUMP;
4913
4914         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4915 }
4916
4917 /**
4918  * ipr_isr - Interrupt service routine
4919  * @irq:        irq number
4920  * @devp:       pointer to ioa config struct
4921  *
4922  * Return value:
4923  *      IRQ_NONE / IRQ_HANDLED
4924  **/
4925 static irqreturn_t ipr_isr(int irq, void *devp)
4926 {
4927         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4928         unsigned long lock_flags = 0;
4929         volatile u32 int_reg;
4930         u32 ioasc;
4931         u16 cmd_index;
4932         int num_hrrq = 0;
4933         struct ipr_cmnd *ipr_cmd;
4934         irqreturn_t rc = IRQ_NONE;
4935
4936         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4937
4938         /* If interrupts are disabled, ignore the interrupt */
4939         if (!ioa_cfg->allow_interrupts) {
4940                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941                 return IRQ_NONE;
4942         }
4943
4944         while (1) {
4945                 ipr_cmd = NULL;
4946
4947                 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4948                        ioa_cfg->toggle_bit) {
4949
4950                         cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4951                                      IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4952
4953                         if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4954                                 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4955                                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956                                 return IRQ_HANDLED;
4957                         }
4958
4959                         ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4960
4961                         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4962
4963                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4964
4965                         list_del(&ipr_cmd->queue);
4966                         del_timer(&ipr_cmd->timer);
4967                         ipr_cmd->done(ipr_cmd);
4968
4969                         rc = IRQ_HANDLED;
4970
4971                         if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4972                                 ioa_cfg->hrrq_curr++;
4973                         } else {
4974                                 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4975                                 ioa_cfg->toggle_bit ^= 1u;
4976                         }
4977                 }
4978
4979                 if (ipr_cmd != NULL) {
4980                         /* Clear the PCI interrupt */
4981                         do {
4982                                 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4983                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
4984                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4985                                         num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4986
4987                         if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4988                                 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4989                                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4990                                 return IRQ_HANDLED;
4991                         }
4992
4993                 } else
4994                         break;
4995         }
4996
4997         if (unlikely(rc == IRQ_NONE))
4998                 rc = ipr_handle_other_interrupt(ioa_cfg);
4999
5000         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5001         return rc;
5002 }
5003
5004 /**
5005  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5006  * @ioa_cfg:    ioa config struct
5007  * @ipr_cmd:    ipr command struct
5008  *
5009  * Return value:
5010  *      0 on success / -1 on failure
5011  **/
5012 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5013                              struct ipr_cmnd *ipr_cmd)
5014 {
5015         int i, nseg;
5016         struct scatterlist *sg;
5017         u32 length;
5018         u32 ioadl_flags = 0;
5019         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5020         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5021         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5022
5023         length = scsi_bufflen(scsi_cmd);
5024         if (!length)
5025                 return 0;
5026
5027         nseg = scsi_dma_map(scsi_cmd);
5028         if (nseg < 0) {
5029                 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5030                 return -1;
5031         }
5032
5033         ipr_cmd->dma_use_sg = nseg;
5034
5035         ioarcb->data_transfer_length = cpu_to_be32(length);
5036         ioarcb->ioadl_len =
5037                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5038
5039         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5040                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5041                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5042         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5043                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5044
5045         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5046                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5047                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5048                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5049         }
5050
5051         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5052         return 0;
5053 }
5054
5055 /**
5056  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5057  * @ioa_cfg:    ioa config struct
5058  * @ipr_cmd:    ipr command struct
5059  *
5060  * Return value:
5061  *      0 on success / -1 on failure
5062  **/
5063 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5064                            struct ipr_cmnd *ipr_cmd)
5065 {
5066         int i, nseg;
5067         struct scatterlist *sg;
5068         u32 length;
5069         u32 ioadl_flags = 0;
5070         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5071         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5072         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5073
5074         length = scsi_bufflen(scsi_cmd);
5075         if (!length)
5076                 return 0;
5077
5078         nseg = scsi_dma_map(scsi_cmd);
5079         if (nseg < 0) {
5080                 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5081                 return -1;
5082         }
5083
5084         ipr_cmd->dma_use_sg = nseg;
5085
5086         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5087                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5088                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5089                 ioarcb->data_transfer_length = cpu_to_be32(length);
5090                 ioarcb->ioadl_len =
5091                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5092         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5093                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5094                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5095                 ioarcb->read_ioadl_len =
5096                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5097         }
5098
5099         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5100                 ioadl = ioarcb->u.add_data.u.ioadl;
5101                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5102                                     offsetof(struct ipr_ioarcb, u.add_data));
5103                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5104         }
5105
5106         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5107                 ioadl[i].flags_and_data_len =
5108                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5109                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5110         }
5111
5112         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5113         return 0;
5114 }
5115
5116 /**
5117  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5118  * @scsi_cmd:   scsi command struct
5119  *
5120  * Return value:
5121  *      task attributes
5122  **/
5123 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5124 {
5125         u8 tag[2];
5126         u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5127
5128         if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5129                 switch (tag[0]) {
5130                 case MSG_SIMPLE_TAG:
5131                         rc = IPR_FLAGS_LO_SIMPLE_TASK;
5132                         break;
5133                 case MSG_HEAD_TAG:
5134                         rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5135                         break;
5136                 case MSG_ORDERED_TAG:
5137                         rc = IPR_FLAGS_LO_ORDERED_TASK;
5138                         break;
5139                 };
5140         }
5141
5142         return rc;
5143 }
5144
5145 /**
5146  * ipr_erp_done - Process completion of ERP for a device
5147  * @ipr_cmd:            ipr command struct
5148  *
5149  * This function copies the sense buffer into the scsi_cmd
5150  * struct and pushes the scsi_done function.
5151  *
5152  * Return value:
5153  *      nothing
5154  **/
5155 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5156 {
5157         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5158         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5159         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5160         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5161
5162         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5163                 scsi_cmd->result |= (DID_ERROR << 16);
5164                 scmd_printk(KERN_ERR, scsi_cmd,
5165                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5166         } else {
5167                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5168                        SCSI_SENSE_BUFFERSIZE);
5169         }
5170
5171         if (res) {
5172                 if (!ipr_is_naca_model(res))
5173                         res->needs_sync_complete = 1;
5174                 res->in_erp = 0;
5175         }
5176         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5177         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5178         scsi_cmd->scsi_done(scsi_cmd);
5179 }
5180
5181 /**
5182  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5183  * @ipr_cmd:    ipr command struct
5184  *
5185  * Return value:
5186  *      none
5187  **/
5188 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5189 {
5190         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5191         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5192         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5193
5194         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5195         ioarcb->data_transfer_length = 0;
5196         ioarcb->read_data_transfer_length = 0;
5197         ioarcb->ioadl_len = 0;
5198         ioarcb->read_ioadl_len = 0;
5199         ioasa->hdr.ioasc = 0;
5200         ioasa->hdr.residual_data_len = 0;
5201
5202         if (ipr_cmd->ioa_cfg->sis64)
5203                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5204                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5205         else {
5206                 ioarcb->write_ioadl_addr =
5207                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5208                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5209         }
5210 }
5211
5212 /**
5213  * ipr_erp_request_sense - Send request sense to a device
5214  * @ipr_cmd:    ipr command struct
5215  *
5216  * This function sends a request sense to a device as a result
5217  * of a check condition.
5218  *
5219  * Return value:
5220  *      nothing
5221  **/
5222 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5223 {
5224         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5225         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5226
5227         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5228                 ipr_erp_done(ipr_cmd);
5229                 return;
5230         }
5231
5232         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5233
5234         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5235         cmd_pkt->cdb[0] = REQUEST_SENSE;
5236         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5237         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5238         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5239         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5240
5241         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5242                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5243
5244         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5245                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5246 }
5247
5248 /**
5249  * ipr_erp_cancel_all - Send cancel all to a device
5250  * @ipr_cmd:    ipr command struct
5251  *
5252  * This function sends a cancel all to a device to clear the
5253  * queue. If we are running TCQ on the device, QERR is set to 1,
5254  * which means all outstanding ops have been dropped on the floor.
5255  * Cancel all will return them to us.
5256  *
5257  * Return value:
5258  *      nothing
5259  **/
5260 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5261 {
5262         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5263         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5264         struct ipr_cmd_pkt *cmd_pkt;
5265
5266         res->in_erp = 1;
5267
5268         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5269
5270         if (!scsi_get_tag_type(scsi_cmd->device)) {
5271                 ipr_erp_request_sense(ipr_cmd);
5272                 return;
5273         }
5274
5275         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5276         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5277         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5278
5279         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5280                    IPR_CANCEL_ALL_TIMEOUT);
5281 }
5282
5283 /**
5284  * ipr_dump_ioasa - Dump contents of IOASA
5285  * @ioa_cfg:    ioa config struct
5286  * @ipr_cmd:    ipr command struct
5287  * @res:                resource entry struct
5288  *
5289  * This function is invoked by the interrupt handler when ops
5290  * fail. It will log the IOASA if appropriate. Only called
5291  * for GPDD ops.
5292  *
5293  * Return value:
5294  *      none
5295  **/
5296 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5297                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5298 {
5299         int i;
5300         u16 data_len;
5301         u32 ioasc, fd_ioasc;
5302         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5303         __be32 *ioasa_data = (__be32 *)ioasa;
5304         int error_index;
5305
5306         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5307         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5308
5309         if (0 == ioasc)
5310                 return;
5311
5312         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5313                 return;
5314
5315         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5316                 error_index = ipr_get_error(fd_ioasc);
5317         else
5318                 error_index = ipr_get_error(ioasc);
5319
5320         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5321                 /* Don't log an error if the IOA already logged one */
5322                 if (ioasa->hdr.ilid != 0)
5323                         return;
5324
5325                 if (!ipr_is_gscsi(res))
5326                         return;
5327
5328                 if (ipr_error_table[error_index].log_ioasa == 0)
5329                         return;
5330         }
5331
5332         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5333
5334         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5335         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5336                 data_len = sizeof(struct ipr_ioasa64);
5337         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5338                 data_len = sizeof(struct ipr_ioasa);
5339
5340         ipr_err("IOASA Dump:\n");
5341
5342         for (i = 0; i < data_len / 4; i += 4) {
5343                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5344                         be32_to_cpu(ioasa_data[i]),
5345                         be32_to_cpu(ioasa_data[i+1]),
5346                         be32_to_cpu(ioasa_data[i+2]),
5347                         be32_to_cpu(ioasa_data[i+3]));
5348         }
5349 }
5350
5351 /**
5352  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5353  * @ioasa:              IOASA
5354  * @sense_buf:  sense data buffer
5355  *
5356  * Return value:
5357  *      none
5358  **/
5359 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5360 {
5361         u32 failing_lba;
5362         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5363         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5364         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5365         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5366
5367         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5368
5369         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5370                 return;
5371
5372         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5373
5374         if (ipr_is_vset_device(res) &&
5375             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5376             ioasa->u.vset.failing_lba_hi != 0) {
5377                 sense_buf[0] = 0x72;
5378                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5379                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5380                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5381
5382                 sense_buf[7] = 12;
5383                 sense_buf[8] = 0;
5384                 sense_buf[9] = 0x0A;
5385                 sense_buf[10] = 0x80;
5386
5387                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5388
5389                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5390                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5391                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5392                 sense_buf[15] = failing_lba & 0x000000ff;
5393
5394                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5395
5396                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5397                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5398                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5399                 sense_buf[19] = failing_lba & 0x000000ff;
5400         } else {
5401                 sense_buf[0] = 0x70;
5402                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5403                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5404                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5405
5406                 /* Illegal request */
5407                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5408                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5409                         sense_buf[7] = 10;      /* additional length */
5410
5411                         /* IOARCB was in error */
5412                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5413                                 sense_buf[15] = 0xC0;
5414                         else    /* Parameter data was invalid */
5415                                 sense_buf[15] = 0x80;
5416
5417                         sense_buf[16] =
5418                             ((IPR_FIELD_POINTER_MASK &
5419                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5420                         sense_buf[17] =
5421                             (IPR_FIELD_POINTER_MASK &
5422                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5423                 } else {
5424                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5425                                 if (ipr_is_vset_device(res))
5426                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5427                                 else
5428                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5429
5430                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
5431                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5432                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5433                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5434                                 sense_buf[6] = failing_lba & 0x000000ff;
5435                         }
5436
5437                         sense_buf[7] = 6;       /* additional length */
5438                 }
5439         }
5440 }
5441
5442 /**
5443  * ipr_get_autosense - Copy autosense data to sense buffer
5444  * @ipr_cmd:    ipr command struct
5445  *
5446  * This function copies the autosense buffer to the buffer
5447  * in the scsi_cmd, if there is autosense available.
5448  *
5449  * Return value:
5450  *      1 if autosense was available / 0 if not
5451  **/
5452 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5453 {
5454         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5455         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5456
5457         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5458                 return 0;
5459
5460         if (ipr_cmd->ioa_cfg->sis64)
5461                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5462                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5463                            SCSI_SENSE_BUFFERSIZE));
5464         else
5465                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5466                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5467                            SCSI_SENSE_BUFFERSIZE));
5468         return 1;
5469 }
5470
5471 /**
5472  * ipr_erp_start - Process an error response for a SCSI op
5473  * @ioa_cfg:    ioa config struct
5474  * @ipr_cmd:    ipr command struct
5475  *
5476  * This function determines whether or not to initiate ERP
5477  * on the affected device.
5478  *
5479  * Return value:
5480  *      nothing
5481  **/
5482 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5483                               struct ipr_cmnd *ipr_cmd)
5484 {
5485         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5486         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5487         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5488         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5489
5490         if (!res) {
5491                 ipr_scsi_eh_done(ipr_cmd);
5492                 return;
5493         }
5494
5495         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5496                 ipr_gen_sense(ipr_cmd);
5497
5498         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5499
5500         switch (masked_ioasc) {
5501         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5502                 if (ipr_is_naca_model(res))
5503                         scsi_cmd->result |= (DID_ABORT << 16);
5504                 else
5505                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
5506                 break;
5507         case IPR_IOASC_IR_RESOURCE_HANDLE:
5508         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5509                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5510                 break;
5511         case IPR_IOASC_HW_SEL_TIMEOUT:
5512                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5513                 if (!ipr_is_naca_model(res))
5514                         res->needs_sync_complete = 1;
5515                 break;
5516         case IPR_IOASC_SYNC_REQUIRED:
5517                 if (!res->in_erp)
5518                         res->needs_sync_complete = 1;
5519                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5520                 break;
5521         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5522         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5523                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5524                 break;
5525         case IPR_IOASC_BUS_WAS_RESET:
5526         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5527                 /*
5528                  * Report the bus reset and ask for a retry. The device
5529                  * will give CC/UA the next command.
5530                  */
5531                 if (!res->resetting_device)
5532                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5533                 scsi_cmd->result |= (DID_ERROR << 16);
5534                 if (!ipr_is_naca_model(res))
5535                         res->needs_sync_complete = 1;
5536                 break;
5537         case IPR_IOASC_HW_DEV_BUS_STATUS:
5538                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5539                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5540                         if (!ipr_get_autosense(ipr_cmd)) {
5541                                 if (!ipr_is_naca_model(res)) {
5542                                         ipr_erp_cancel_all(ipr_cmd);
5543                                         return;
5544                                 }
5545                         }
5546                 }
5547                 if (!ipr_is_naca_model(res))
5548                         res->needs_sync_complete = 1;
5549                 break;
5550         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5551                 break;
5552         default:
5553                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5554                         scsi_cmd->result |= (DID_ERROR << 16);
5555                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5556                         res->needs_sync_complete = 1;
5557                 break;
5558         }
5559
5560         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5561         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5562         scsi_cmd->scsi_done(scsi_cmd);
5563 }
5564
5565 /**
5566  * ipr_scsi_done - mid-layer done function
5567  * @ipr_cmd:    ipr command struct
5568  *
5569  * This function is invoked by the interrupt handler for
5570  * ops generated by the SCSI mid-layer
5571  *
5572  * Return value:
5573  *      none
5574  **/
5575 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5576 {
5577         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5578         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5579         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5580
5581         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5582
5583         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5584                 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5585                 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5586                 scsi_cmd->scsi_done(scsi_cmd);
5587         } else
5588                 ipr_erp_start(ioa_cfg, ipr_cmd);
5589 }
5590
5591 /**
5592  * ipr_queuecommand - Queue a mid-layer request
5593  * @scsi_cmd:   scsi command struct
5594  * @done:               done function
5595  *
5596  * This function queues a request generated by the mid-layer.
5597  *
5598  * Return value:
5599  *      0 on success
5600  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5601  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
5602  **/
5603 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5604                             void (*done) (struct scsi_cmnd *))
5605 {
5606         struct ipr_ioa_cfg *ioa_cfg;
5607         struct ipr_resource_entry *res;
5608         struct ipr_ioarcb *ioarcb;
5609         struct ipr_cmnd *ipr_cmd;
5610         int rc = 0;
5611
5612         scsi_cmd->scsi_done = done;
5613         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5614         res = scsi_cmd->device->hostdata;
5615         scsi_cmd->result = (DID_OK << 16);
5616
5617         /*
5618          * We are currently blocking all devices due to a host reset
5619          * We have told the host to stop giving us new requests, but
5620          * ERP ops don't count. FIXME
5621          */
5622         if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5623                 return SCSI_MLQUEUE_HOST_BUSY;
5624
5625         /*
5626          * FIXME - Create scsi_set_host_offline interface
5627          *  and the ioa_is_dead check can be removed
5628          */
5629         if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5630                 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5631                 scsi_cmd->result = (DID_NO_CONNECT << 16);
5632                 scsi_cmd->scsi_done(scsi_cmd);
5633                 return 0;
5634         }
5635
5636         if (ipr_is_gata(res) && res->sata_port)
5637                 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5638
5639         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5640         ioarcb = &ipr_cmd->ioarcb;
5641         list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5642
5643         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5644         ipr_cmd->scsi_cmd = scsi_cmd;
5645         ioarcb->res_handle = res->res_handle;
5646         ipr_cmd->done = ipr_scsi_done;
5647         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5648
5649         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5650                 if (scsi_cmd->underflow == 0)
5651                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5652
5653                 if (res->needs_sync_complete) {
5654                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5655                         res->needs_sync_complete = 0;
5656                 }
5657
5658                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5659                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5660                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5661                 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5662         }
5663
5664         if (scsi_cmd->cmnd[0] >= 0xC0 &&
5665             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5666                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5667
5668         if (likely(rc == 0)) {
5669                 if (ioa_cfg->sis64)
5670                         rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5671                 else
5672                         rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5673         }
5674
5675         if (likely(rc == 0)) {
5676                 mb();
5677                 ipr_send_command(ipr_cmd);
5678         } else {
5679                  list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5680                  return SCSI_MLQUEUE_HOST_BUSY;
5681         }
5682
5683         return 0;
5684 }
5685
5686 /**
5687  * ipr_ioctl - IOCTL handler
5688  * @sdev:       scsi device struct
5689  * @cmd:        IOCTL cmd
5690  * @arg:        IOCTL arg
5691  *
5692  * Return value:
5693  *      0 on success / other on failure
5694  **/
5695 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5696 {
5697         struct ipr_resource_entry *res;
5698
5699         res = (struct ipr_resource_entry *)sdev->hostdata;
5700         if (res && ipr_is_gata(res)) {
5701                 if (cmd == HDIO_GET_IDENTITY)
5702                         return -ENOTTY;
5703                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5704         }
5705
5706         return -EINVAL;
5707 }
5708
5709 /**
5710  * ipr_info - Get information about the card/driver
5711  * @scsi_host:  scsi host struct
5712  *
5713  * Return value:
5714  *      pointer to buffer with description string
5715  **/
5716 static const char * ipr_ioa_info(struct Scsi_Host *host)
5717 {
5718         static char buffer[512];
5719         struct ipr_ioa_cfg *ioa_cfg;
5720         unsigned long lock_flags = 0;
5721
5722         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5723
5724         spin_lock_irqsave(host->host_lock, lock_flags);
5725         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5726         spin_unlock_irqrestore(host->host_lock, lock_flags);
5727
5728         return buffer;
5729 }
5730
5731 static struct scsi_host_template driver_template = {
5732         .module = THIS_MODULE,
5733         .name = "IPR",
5734         .info = ipr_ioa_info,
5735         .ioctl = ipr_ioctl,
5736         .queuecommand = ipr_queuecommand,
5737         .eh_abort_handler = ipr_eh_abort,
5738         .eh_device_reset_handler = ipr_eh_dev_reset,
5739         .eh_host_reset_handler = ipr_eh_host_reset,
5740         .slave_alloc = ipr_slave_alloc,
5741         .slave_configure = ipr_slave_configure,
5742         .slave_destroy = ipr_slave_destroy,
5743         .target_alloc = ipr_target_alloc,
5744         .target_destroy = ipr_target_destroy,
5745         .change_queue_depth = ipr_change_queue_depth,
5746         .change_queue_type = ipr_change_queue_type,
5747         .bios_param = ipr_biosparam,
5748         .can_queue = IPR_MAX_COMMANDS,
5749         .this_id = -1,
5750         .sg_tablesize = IPR_MAX_SGLIST,
5751         .max_sectors = IPR_IOA_MAX_SECTORS,
5752         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5753         .use_clustering = ENABLE_CLUSTERING,
5754         .shost_attrs = ipr_ioa_attrs,
5755         .sdev_attrs = ipr_dev_attrs,
5756         .proc_name = IPR_NAME
5757 };
5758
5759 /**
5760  * ipr_ata_phy_reset - libata phy_reset handler
5761  * @ap:         ata port to reset
5762  *
5763  **/
5764 static void ipr_ata_phy_reset(struct ata_port *ap)
5765 {
5766         unsigned long flags;
5767         struct ipr_sata_port *sata_port = ap->private_data;
5768         struct ipr_resource_entry *res = sata_port->res;
5769         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5770         int rc;
5771
5772         ENTER;
5773         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5774         while(ioa_cfg->in_reset_reload) {
5775                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5776                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5777                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5778         }
5779
5780         if (!ioa_cfg->allow_cmds)
5781                 goto out_unlock;
5782
5783         rc = ipr_device_reset(ioa_cfg, res);
5784
5785         if (rc) {
5786                 ap->link.device[0].class = ATA_DEV_NONE;
5787                 goto out_unlock;
5788         }
5789
5790         ap->link.device[0].class = res->ata_class;
5791         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5792                 ap->link.device[0].class = ATA_DEV_NONE;
5793
5794 out_unlock:
5795         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5796         LEAVE;
5797 }
5798
5799 /**
5800  * ipr_ata_post_internal - Cleanup after an internal command
5801  * @qc: ATA queued command
5802  *
5803  * Return value:
5804  *      none
5805  **/
5806 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5807 {
5808         struct ipr_sata_port *sata_port = qc->ap->private_data;
5809         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5810         struct ipr_cmnd *ipr_cmd;
5811         unsigned long flags;
5812
5813         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5814         while(ioa_cfg->in_reset_reload) {
5815                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5816                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5817                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5818         }
5819
5820         list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5821                 if (ipr_cmd->qc == qc) {
5822                         ipr_device_reset(ioa_cfg, sata_port->res);
5823                         break;
5824                 }
5825         }
5826         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5827 }
5828
5829 /**
5830  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5831  * @regs:       destination
5832  * @tf: source ATA taskfile
5833  *
5834  * Return value:
5835  *      none
5836  **/
5837 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5838                              struct ata_taskfile *tf)
5839 {
5840         regs->feature = tf->feature;
5841         regs->nsect = tf->nsect;
5842         regs->lbal = tf->lbal;
5843         regs->lbam = tf->lbam;
5844         regs->lbah = tf->lbah;
5845         regs->device = tf->device;
5846         regs->command = tf->command;
5847         regs->hob_feature = tf->hob_feature;
5848         regs->hob_nsect = tf->hob_nsect;
5849         regs->hob_lbal = tf->hob_lbal;
5850         regs->hob_lbam = tf->hob_lbam;
5851         regs->hob_lbah = tf->hob_lbah;
5852         regs->ctl = tf->ctl;
5853 }
5854
5855 /**
5856  * ipr_sata_done - done function for SATA commands
5857  * @ipr_cmd:    ipr command struct
5858  *
5859  * This function is invoked by the interrupt handler for
5860  * ops generated by the SCSI mid-layer to SATA devices
5861  *
5862  * Return value:
5863  *      none
5864  **/
5865 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5866 {
5867         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5868         struct ata_queued_cmd *qc = ipr_cmd->qc;
5869         struct ipr_sata_port *sata_port = qc->ap->private_data;
5870         struct ipr_resource_entry *res = sata_port->res;
5871         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5872
5873         if (ipr_cmd->ioa_cfg->sis64)
5874                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5875                        sizeof(struct ipr_ioasa_gata));
5876         else
5877                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5878                        sizeof(struct ipr_ioasa_gata));
5879         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5880
5881         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5882                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5883
5884         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5885                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
5886         else
5887                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
5888         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5889         ata_qc_complete(qc);
5890 }
5891
5892 /**
5893  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5894  * @ipr_cmd:    ipr command struct
5895  * @qc:         ATA queued command
5896  *
5897  **/
5898 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5899                                   struct ata_queued_cmd *qc)
5900 {
5901         u32 ioadl_flags = 0;
5902         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5903         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5904         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5905         int len = qc->nbytes;
5906         struct scatterlist *sg;
5907         unsigned int si;
5908         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5909
5910         if (len == 0)
5911                 return;
5912
5913         if (qc->dma_dir == DMA_TO_DEVICE) {
5914                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5915                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5916         } else if (qc->dma_dir == DMA_FROM_DEVICE)
5917                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5918
5919         ioarcb->data_transfer_length = cpu_to_be32(len);
5920         ioarcb->ioadl_len =
5921                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5922         ioarcb->u.sis64_addr_data.data_ioadl_addr =
5923                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5924
5925         for_each_sg(qc->sg, sg, qc->n_elem, si) {
5926                 ioadl64->flags = cpu_to_be32(ioadl_flags);
5927                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5928                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5929
5930                 last_ioadl64 = ioadl64;
5931                 ioadl64++;
5932         }
5933
5934         if (likely(last_ioadl64))
5935                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5936 }
5937
5938 /**
5939  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5940  * @ipr_cmd:    ipr command struct
5941  * @qc:         ATA queued command
5942  *
5943  **/
5944 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5945                                 struct ata_queued_cmd *qc)
5946 {
5947         u32 ioadl_flags = 0;
5948         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5949         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5950         struct ipr_ioadl_desc *last_ioadl = NULL;
5951         int len = qc->nbytes;
5952         struct scatterlist *sg;
5953         unsigned int si;
5954
5955         if (len == 0)
5956                 return;
5957
5958         if (qc->dma_dir == DMA_TO_DEVICE) {
5959                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5960                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5961                 ioarcb->data_transfer_length = cpu_to_be32(len);
5962                 ioarcb->ioadl_len =
5963                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5964         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5965                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5966                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5967                 ioarcb->read_ioadl_len =
5968                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5969         }
5970
5971         for_each_sg(qc->sg, sg, qc->n_elem, si) {
5972                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5973                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5974
5975                 last_ioadl = ioadl;
5976                 ioadl++;
5977         }
5978
5979         if (likely(last_ioadl))
5980                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5981 }
5982
5983 /**
5984  * ipr_qc_issue - Issue a SATA qc to a device
5985  * @qc: queued command
5986  *
5987  * Return value:
5988  *      0 if success
5989  **/
5990 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5991 {
5992         struct ata_port *ap = qc->ap;
5993         struct ipr_sata_port *sata_port = ap->private_data;
5994         struct ipr_resource_entry *res = sata_port->res;
5995         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5996         struct ipr_cmnd *ipr_cmd;
5997         struct ipr_ioarcb *ioarcb;
5998         struct ipr_ioarcb_ata_regs *regs;
5999
6000         if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6001                 return AC_ERR_SYSTEM;
6002
6003         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6004         ioarcb = &ipr_cmd->ioarcb;
6005
6006         if (ioa_cfg->sis64) {
6007                 regs = &ipr_cmd->i.ata_ioadl.regs;
6008                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6009         } else
6010                 regs = &ioarcb->u.add_data.u.regs;
6011
6012         memset(regs, 0, sizeof(*regs));
6013         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6014
6015         list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6016         ipr_cmd->qc = qc;
6017         ipr_cmd->done = ipr_sata_done;
6018         ipr_cmd->ioarcb.res_handle = res->res_handle;
6019         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6020         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6021         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6022         ipr_cmd->dma_use_sg = qc->n_elem;
6023
6024         if (ioa_cfg->sis64)
6025                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6026         else
6027                 ipr_build_ata_ioadl(ipr_cmd, qc);
6028
6029         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6030         ipr_copy_sata_tf(regs, &qc->tf);
6031         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6032         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6033
6034         switch (qc->tf.protocol) {
6035         case ATA_PROT_NODATA:
6036         case ATA_PROT_PIO:
6037                 break;
6038
6039         case ATA_PROT_DMA:
6040                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6041                 break;
6042
6043         case ATAPI_PROT_PIO:
6044         case ATAPI_PROT_NODATA:
6045                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6046                 break;
6047
6048         case ATAPI_PROT_DMA:
6049                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6050                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6051                 break;
6052
6053         default:
6054                 WARN_ON(1);
6055                 return AC_ERR_INVALID;
6056         }
6057
6058         mb();
6059
6060         ipr_send_command(ipr_cmd);
6061
6062         return 0;
6063 }
6064
6065 /**
6066  * ipr_qc_fill_rtf - Read result TF
6067  * @qc: ATA queued command
6068  *
6069  * Return value:
6070  *      true
6071  **/
6072 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6073 {
6074         struct ipr_sata_port *sata_port = qc->ap->private_data;
6075         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6076         struct ata_taskfile *tf = &qc->result_tf;
6077
6078         tf->feature = g->error;
6079         tf->nsect = g->nsect;
6080         tf->lbal = g->lbal;
6081         tf->lbam = g->lbam;
6082         tf->lbah = g->lbah;
6083         tf->device = g->device;
6084         tf->command = g->status;
6085         tf->hob_nsect = g->hob_nsect;
6086         tf->hob_lbal = g->hob_lbal;
6087         tf->hob_lbam = g->hob_lbam;
6088         tf->hob_lbah = g->hob_lbah;
6089         tf->ctl = g->alt_status;
6090
6091         return true;
6092 }
6093
6094 static struct ata_port_operations ipr_sata_ops = {
6095         .phy_reset = ipr_ata_phy_reset,
6096         .hardreset = ipr_sata_reset,
6097         .post_internal_cmd = ipr_ata_post_internal,
6098         .qc_prep = ata_noop_qc_prep,
6099         .qc_issue = ipr_qc_issue,
6100         .qc_fill_rtf = ipr_qc_fill_rtf,
6101         .port_start = ata_sas_port_start,
6102         .port_stop = ata_sas_port_stop
6103 };
6104
6105 static struct ata_port_info sata_port_info = {
6106         .flags  = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6107         ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6108         .pio_mask       = 0x10, /* pio4 */
6109         .mwdma_mask = 0x07,
6110         .udma_mask      = 0x7f, /* udma0-6 */
6111         .port_ops       = &ipr_sata_ops
6112 };
6113
6114 #ifdef CONFIG_PPC_PSERIES
6115 static const u16 ipr_blocked_processors[] = {
6116         PV_NORTHSTAR,
6117         PV_PULSAR,
6118         PV_POWER4,
6119         PV_ICESTAR,
6120         PV_SSTAR,
6121         PV_POWER4p,
6122         PV_630,
6123         PV_630p
6124 };
6125
6126 /**
6127  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6128  * @ioa_cfg:    ioa cfg struct
6129  *
6130  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6131  * certain pSeries hardware. This function determines if the given
6132  * adapter is in one of these confgurations or not.
6133  *
6134  * Return value:
6135  *      1 if adapter is not supported / 0 if adapter is supported
6136  **/
6137 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6138 {
6139         int i;
6140
6141         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6142                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6143                         if (__is_processor(ipr_blocked_processors[i]))
6144                                 return 1;
6145                 }
6146         }
6147         return 0;
6148 }
6149 #else
6150 #define ipr_invalid_adapter(ioa_cfg) 0
6151 #endif
6152
6153 /**
6154  * ipr_ioa_bringdown_done - IOA bring down completion.
6155  * @ipr_cmd:    ipr command struct
6156  *
6157  * This function processes the completion of an adapter bring down.
6158  * It wakes any reset sleepers.
6159  *
6160  * Return value:
6161  *      IPR_RC_JOB_RETURN
6162  **/
6163 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6164 {
6165         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6166
6167         ENTER;
6168         ioa_cfg->in_reset_reload = 0;
6169         ioa_cfg->reset_retries = 0;
6170         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6171         wake_up_all(&ioa_cfg->reset_wait_q);
6172
6173         spin_unlock_irq(ioa_cfg->host->host_lock);
6174         scsi_unblock_requests(ioa_cfg->host);
6175         spin_lock_irq(ioa_cfg->host->host_lock);
6176         LEAVE;
6177
6178         return IPR_RC_JOB_RETURN;
6179 }
6180
6181 /**
6182  * ipr_ioa_reset_done - IOA reset completion.
6183  * @ipr_cmd:    ipr command struct
6184  *
6185  * This function processes the completion of an adapter reset.
6186  * It schedules any necessary mid-layer add/removes and
6187  * wakes any reset sleepers.
6188  *
6189  * Return value:
6190  *      IPR_RC_JOB_RETURN
6191  **/
6192 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6193 {
6194         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6195         struct ipr_resource_entry *res;
6196         struct ipr_hostrcb *hostrcb, *temp;
6197         int i = 0;
6198
6199         ENTER;
6200         ioa_cfg->in_reset_reload = 0;
6201         ioa_cfg->allow_cmds = 1;
6202         ioa_cfg->reset_cmd = NULL;
6203         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6204
6205         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6206                 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6207                         ipr_trace;
6208                         break;
6209                 }
6210         }
6211         schedule_work(&ioa_cfg->work_q);
6212
6213         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6214                 list_del(&hostrcb->queue);
6215                 if (i++ < IPR_NUM_LOG_HCAMS)
6216                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6217                 else
6218                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6219         }
6220
6221         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6222         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6223
6224         ioa_cfg->reset_retries = 0;
6225         list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6226         wake_up_all(&ioa_cfg->reset_wait_q);
6227
6228         spin_unlock(ioa_cfg->host->host_lock);
6229         scsi_unblock_requests(ioa_cfg->host);
6230         spin_lock(ioa_cfg->host->host_lock);
6231
6232         if (!ioa_cfg->allow_cmds)
6233                 scsi_block_requests(ioa_cfg->host);
6234
6235         LEAVE;
6236         return IPR_RC_JOB_RETURN;
6237 }
6238
6239 /**
6240  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6241  * @supported_dev:      supported device struct
6242  * @vpids:                      vendor product id struct
6243  *
6244  * Return value:
6245  *      none
6246  **/
6247 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6248                                  struct ipr_std_inq_vpids *vpids)
6249 {
6250         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6251         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6252         supported_dev->num_records = 1;
6253         supported_dev->data_length =
6254                 cpu_to_be16(sizeof(struct ipr_supported_device));
6255         supported_dev->reserved = 0;
6256 }
6257
6258 /**
6259  * ipr_set_supported_devs - Send Set Supported Devices for a device
6260  * @ipr_cmd:    ipr command struct
6261  *
6262  * This function sends a Set Supported Devices to the adapter
6263  *
6264  * Return value:
6265  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6266  **/
6267 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6268 {
6269         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6270         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6271         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6272         struct ipr_resource_entry *res = ipr_cmd->u.res;
6273
6274         ipr_cmd->job_step = ipr_ioa_reset_done;
6275
6276         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6277                 if (!ipr_is_scsi_disk(res))
6278                         continue;
6279
6280                 ipr_cmd->u.res = res;
6281                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6282
6283                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6284                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6285                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6286
6287                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6288                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6289                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6290                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6291
6292                 ipr_init_ioadl(ipr_cmd,