2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
87 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89 static unsigned int ipr_max_speed = 1;
90 static int ipr_testmode = 0;
91 static unsigned int ipr_fastfail = 0;
92 static unsigned int ipr_transop_timeout = 0;
93 static unsigned int ipr_enable_cache = 1;
94 static unsigned int ipr_debug = 0;
95 static DEFINE_SPINLOCK(ipr_driver_lock);
97 /* This table describes the differences between DMA controller chips */
98 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101 .cache_line_size = 0x20,
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
114 { /* Snipe and Scamp */
116 .cache_line_size = 0x20,
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
131 static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
141 static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147 module_param_named(max_speed, ipr_max_speed, uint, 0);
148 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149 module_param_named(log_level, ipr_log_level, uint, 0);
150 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151 module_param_named(testmode, ipr_testmode, int, 0);
152 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153 module_param_named(fastfail, ipr_fastfail, int, 0);
154 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
157 module_param_named(enable_cache, ipr_enable_cache, int, 0);
158 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159 module_param_named(debug, ipr_debug, int, 0);
160 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
161 MODULE_LICENSE("GPL");
162 MODULE_VERSION(IPR_DRIVER_VERSION);
164 /* A constant array of IOASCs/URCs/Error Messages */
166 struct ipr_error_table_t ipr_error_table[] = {
167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
168 "8155: An unknown error was received"},
170 "Soft underlength error"},
172 "Command to be cancelled not found"},
174 "Qualified success"},
175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
176 "FFFE: Soft device bus error recovered by the IOA"},
177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
178 "4101: Soft device bus fabric error"},
179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
180 "FFF9: Device sector reassign successful"},
181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
182 "FFF7: Media error recovered by device rewrite procedures"},
183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
184 "7001: IOA sector reassignment successful"},
185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
186 "FFF9: Soft media error. Sector reassignment recommended"},
187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
188 "FFF7: Media error recovered by IOA rewrite procedures"},
189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
190 "FF3D: Soft PCI bus error recovered by the IOA"},
191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
192 "FFF6: Device hardware error recovered by the IOA"},
193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
194 "FFF6: Device hardware error recovered by the device"},
195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
196 "FF3D: Soft IOA error recovered by the IOA"},
197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
198 "FFFA: Undefined device response recovered by the IOA"},
199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
200 "FFF6: Device bus error, message or command phase"},
201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
202 "FFFE: Task Management Function failed"},
203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
204 "FFF6: Failure prediction threshold exceeded"},
205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
206 "8009: Impending cache battery pack failure"},
208 "34FF: Disk device format in progress"},
210 "Synchronization required"},
212 "No ready, IOA shutdown"},
214 "Not ready, IOA has been shutdown"},
215 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
216 "3020: Storage subsystem configuration error"},
218 "FFF5: Medium error, data unreadable, recommend reassign"},
220 "7000: Medium error, data unreadable, do not reassign"},
221 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
222 "FFF3: Disk media format bad"},
223 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
224 "3002: Addressed device failed to respond to selection"},
225 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
226 "3100: Device bus error"},
227 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
228 "3109: IOA timed out a device command"},
230 "3120: SCSI bus is not operational"},
231 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
232 "4100: Hard device bus fabric error"},
233 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "9000: IOA reserved area data check"},
235 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
236 "9001: IOA reserved area invalid data pattern"},
237 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
238 "9002: IOA reserved area LRC error"},
239 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "102E: Out of alternate sectors for disk storage"},
241 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
242 "FFF4: Data transfer underlength error"},
243 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
244 "FFF4: Data transfer overlength error"},
245 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "3400: Logical unit failure"},
247 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFF4: Device microcode is corrupt"},
249 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
250 "8150: PCI bus error"},
252 "Unsupported device bus message received"},
253 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
254 "FFF4: Disk device problem"},
255 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
256 "8150: Permanent IOA failure"},
257 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
258 "3010: Disk device returned wrong response to IOA"},
259 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
260 "8151: IOA microcode error"},
262 "Device bus status error"},
263 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
264 "8157: IOA error requiring IOA reset to recover"},
266 "ATA device status error"},
268 "Message reject received from the device"},
269 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
270 "8008: A permanent cache battery pack failure occurred"},
271 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
272 "9090: Disk unit has been modified after the last known status"},
273 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
274 "9081: IOA detected device error"},
275 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
276 "9082: IOA detected device error"},
277 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
278 "3110: Device bus error, message or command phase"},
279 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
280 "3110: SAS Command / Task Management Function failed"},
281 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
282 "9091: Incorrect hardware configuration change has been detected"},
283 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9073: Invalid multi-adapter configuration"},
285 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
286 "4010: Incorrect connection between cascaded expanders"},
287 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
288 "4020: Connections exceed IOA design limits"},
289 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
290 "4030: Incorrect multipath connection"},
291 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
292 "4110: Unsupported enclosure function"},
293 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
294 "FFF4: Command to logical unit failed"},
296 "Illegal request, invalid request type or request packet"},
298 "Illegal request, invalid resource handle"},
300 "Illegal request, commands not allowed to this device"},
302 "Illegal request, command not allowed to a secondary adapter"},
304 "Illegal request, invalid field in parameter list"},
306 "Illegal request, parameter not supported"},
308 "Illegal request, parameter value invalid"},
310 "Illegal request, command sequence error"},
312 "Illegal request, dual adapter support not enabled"},
313 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
314 "9031: Array protection temporarily suspended, protection resuming"},
315 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
316 "9040: Array protection temporarily suspended, protection resuming"},
317 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
318 "3140: Device bus not ready to ready transition"},
319 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "FFFB: SCSI bus was reset"},
322 "FFFE: SCSI bus transition to single ended"},
324 "FFFE: SCSI bus transition to LVD"},
325 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
326 "FFFB: SCSI bus was reset by another initiator"},
327 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "3029: A device replacement has occurred"},
329 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
330 "9051: IOA cache data exists for a missing or failed device"},
331 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
332 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
333 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
334 "9025: Disk unit is not supported at its physical location"},
335 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
336 "3020: IOA detected a SCSI bus configuration error"},
337 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
338 "3150: SCSI bus configuration error"},
339 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
340 "9074: Asymmetric advanced function disk configuration"},
341 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
342 "4040: Incomplete multipath connection between IOA and enclosure"},
343 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
344 "4041: Incomplete multipath connection between enclosure and device"},
345 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
346 "9075: Incomplete multipath connection between IOA and remote IOA"},
347 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
348 "9076: Configuration error, missing remote IOA"},
349 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
350 "4050: Enclosure does not support a required multipath function"},
351 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
352 "9041: Array protection temporarily suspended"},
353 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
354 "9042: Corrupt array parity detected on specified device"},
355 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
356 "9030: Array no longer protected due to missing or failed disk unit"},
357 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
358 "9071: Link operational transition"},
359 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
360 "9072: Link not operational transition"},
361 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
362 "9032: Array exposed but still protected"},
363 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
364 "70DD: Device forced failed by disrupt device command"},
365 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
366 "4061: Multipath redundancy level got better"},
367 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
368 "4060: Multipath redundancy level got worse"},
370 "Failure due to other device"},
371 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
372 "9008: IOA does not support functions expected by devices"},
373 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
374 "9010: Cache data associated with attached devices cannot be found"},
375 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
376 "9011: Cache data belongs to devices other than those attached"},
377 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
378 "9020: Array missing 2 or more devices with only 1 device present"},
379 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
380 "9021: Array missing 2 or more devices with 2 or more devices present"},
381 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9022: Exposed array is missing a required device"},
383 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
384 "9023: Array member(s) not at required physical locations"},
385 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
386 "9024: Array not functional due to present hardware configuration"},
387 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
388 "9026: Array not functional due to present hardware configuration"},
389 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
390 "9027: Array is missing a device and parity is out of sync"},
391 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
392 "9028: Maximum number of arrays already exist"},
393 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
394 "9050: Required cache data cannot be located for a disk unit"},
395 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
396 "9052: Cache data exists for a device that has been modified"},
397 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
398 "9054: IOA resources not available due to previous problems"},
399 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
400 "9092: Disk unit requires initialization before use"},
401 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
402 "9029: Incorrect hardware configuration change has been detected"},
403 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
404 "9060: One or more disk pairs are missing from an array"},
405 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
406 "9061: One or more disks are missing from an array"},
407 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
408 "9062: One or more disks are missing from an array"},
409 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
410 "9063: Maximum number of functional arrays has been exceeded"},
412 "Aborted command, invalid descriptor"},
414 "Command terminated by host"}
417 static const struct ipr_ses_table_entry ipr_ses_table[] = {
418 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
419 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
420 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
421 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
422 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
423 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
424 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
425 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
426 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
427 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
429 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
430 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
434 * Function Prototypes
436 static int ipr_reset_alert(struct ipr_cmnd *);
437 static void ipr_process_ccn(struct ipr_cmnd *);
438 static void ipr_process_error(struct ipr_cmnd *);
439 static void ipr_reset_ioa_job(struct ipr_cmnd *);
440 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
441 enum ipr_shutdown_type);
443 #ifdef CONFIG_SCSI_IPR_TRACE
445 * ipr_trc_hook - Add a trace entry to the driver trace
446 * @ipr_cmd: ipr command struct
448 * @add_data: additional data
453 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
454 u8 type, u32 add_data)
456 struct ipr_trace_entry *trace_entry;
457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
459 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
460 trace_entry->time = jiffies;
461 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
462 trace_entry->type = type;
463 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
464 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
465 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
466 trace_entry->u.add_data = add_data;
469 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
473 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
474 * @ipr_cmd: ipr command struct
479 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
482 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
483 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 ioarcb->write_data_transfer_length = 0;
487 ioarcb->read_data_transfer_length = 0;
488 ioarcb->write_ioadl_len = 0;
489 ioarcb->read_ioadl_len = 0;
490 ioarcb->write_ioadl_addr =
491 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
492 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
494 ioasa->residual_data_len = 0;
495 ioasa->u.gata.status = 0;
497 ipr_cmd->scsi_cmd = NULL;
499 ipr_cmd->sense_buffer[0] = 0;
500 ipr_cmd->dma_use_sg = 0;
504 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
505 * @ipr_cmd: ipr command struct
510 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
512 ipr_reinit_ipr_cmnd(ipr_cmd);
513 ipr_cmd->u.scratch = 0;
514 ipr_cmd->sibling = NULL;
515 init_timer(&ipr_cmd->timer);
519 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
520 * @ioa_cfg: ioa config struct
523 * pointer to ipr command struct
526 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
528 struct ipr_cmnd *ipr_cmd;
530 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
531 list_del(&ipr_cmd->queue);
532 ipr_init_ipr_cmnd(ipr_cmd);
538 * ipr_unmap_sglist - Unmap scatterlist if mapped
539 * @ioa_cfg: ioa config struct
540 * @ipr_cmd: ipr command struct
545 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
546 struct ipr_cmnd *ipr_cmd)
548 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
550 if (ipr_cmd->dma_use_sg) {
551 if (scsi_cmd->use_sg > 0) {
552 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
554 scsi_cmd->sc_data_direction);
556 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
557 scsi_cmd->request_bufflen,
558 scsi_cmd->sc_data_direction);
564 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
565 * @ioa_cfg: ioa config struct
566 * @clr_ints: interrupts to clear
568 * This function masks all interrupts on the adapter, then clears the
569 * interrupts specified in the mask
574 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
577 volatile u32 int_reg;
579 /* Stop new interrupts */
580 ioa_cfg->allow_interrupts = 0;
582 /* Set interrupt mask to stop all new interrupts */
583 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
585 /* Clear any pending interrupts */
586 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
587 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
591 * ipr_save_pcix_cmd_reg - Save PCI-X command register
592 * @ioa_cfg: ioa config struct
595 * 0 on success / -EIO on failure
597 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
599 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
601 if (pcix_cmd_reg == 0)
604 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
605 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
606 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
610 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
615 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
616 * @ioa_cfg: ioa config struct
619 * 0 on success / -EIO on failure
621 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
623 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
626 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
627 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
628 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
637 * ipr_sata_eh_done - done function for aborted SATA commands
638 * @ipr_cmd: ipr command struct
640 * This function is invoked for ops generated to SATA
641 * devices which are being aborted.
646 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
649 struct ata_queued_cmd *qc = ipr_cmd->qc;
650 struct ipr_sata_port *sata_port = qc->ap->private_data;
652 qc->err_mask |= AC_ERR_OTHER;
653 sata_port->ioasa.status |= ATA_BUSY;
654 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
659 * ipr_scsi_eh_done - mid-layer done function for aborted ops
660 * @ipr_cmd: ipr command struct
662 * This function is invoked by the interrupt handler for
663 * ops generated by the SCSI mid-layer which are being aborted.
668 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
671 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
673 scsi_cmd->result |= (DID_ERROR << 16);
675 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
676 scsi_cmd->scsi_done(scsi_cmd);
677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
681 * ipr_fail_all_ops - Fails all outstanding ops.
682 * @ioa_cfg: ioa config struct
684 * This function fails all outstanding ops.
689 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
691 struct ipr_cmnd *ipr_cmd, *temp;
694 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
695 list_del(&ipr_cmd->queue);
697 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
698 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
700 if (ipr_cmd->scsi_cmd)
701 ipr_cmd->done = ipr_scsi_eh_done;
702 else if (ipr_cmd->qc)
703 ipr_cmd->done = ipr_sata_eh_done;
705 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
706 del_timer(&ipr_cmd->timer);
707 ipr_cmd->done(ipr_cmd);
714 * ipr_do_req - Send driver initiated requests.
715 * @ipr_cmd: ipr command struct
716 * @done: done function
717 * @timeout_func: timeout function
718 * @timeout: timeout value
720 * This function sends the specified command to the adapter with the
721 * timeout given. The done function is invoked on command completion.
726 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
727 void (*done) (struct ipr_cmnd *),
728 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
730 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
732 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
734 ipr_cmd->done = done;
736 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
737 ipr_cmd->timer.expires = jiffies + timeout;
738 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
740 add_timer(&ipr_cmd->timer);
742 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
745 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
746 ioa_cfg->regs.ioarrin_reg);
750 * ipr_internal_cmd_done - Op done function for an internally generated op.
751 * @ipr_cmd: ipr command struct
753 * This function is the op done function for an internally generated,
754 * blocking op. It simply wakes the sleeping thread.
759 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
761 if (ipr_cmd->sibling)
762 ipr_cmd->sibling = NULL;
764 complete(&ipr_cmd->completion);
768 * ipr_send_blocking_cmd - Send command and sleep on its completion.
769 * @ipr_cmd: ipr command struct
770 * @timeout_func: function to invoke if command times out
776 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
777 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
780 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
782 init_completion(&ipr_cmd->completion);
783 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
785 spin_unlock_irq(ioa_cfg->host->host_lock);
786 wait_for_completion(&ipr_cmd->completion);
787 spin_lock_irq(ioa_cfg->host->host_lock);
791 * ipr_send_hcam - Send an HCAM to the adapter.
792 * @ioa_cfg: ioa config struct
794 * @hostrcb: hostrcb struct
796 * This function will send a Host Controlled Async command to the adapter.
797 * If HCAMs are currently not allowed to be issued to the adapter, it will
798 * place the hostrcb on the free queue.
803 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
804 struct ipr_hostrcb *hostrcb)
806 struct ipr_cmnd *ipr_cmd;
807 struct ipr_ioarcb *ioarcb;
809 if (ioa_cfg->allow_cmds) {
810 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
811 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
812 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
814 ipr_cmd->u.hostrcb = hostrcb;
815 ioarcb = &ipr_cmd->ioarcb;
817 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
818 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
819 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
820 ioarcb->cmd_pkt.cdb[1] = type;
821 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
822 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
824 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
825 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
826 ipr_cmd->ioadl[0].flags_and_data_len =
827 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
828 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
830 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
831 ipr_cmd->done = ipr_process_ccn;
833 ipr_cmd->done = ipr_process_error;
835 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
838 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
839 ioa_cfg->regs.ioarrin_reg);
841 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
846 * ipr_init_res_entry - Initialize a resource entry struct.
847 * @res: resource entry struct
852 static void ipr_init_res_entry(struct ipr_resource_entry *res)
854 res->needs_sync_complete = 0;
857 res->del_from_ml = 0;
858 res->resetting_device = 0;
860 res->sata_port = NULL;
864 * ipr_handle_config_change - Handle a config change from the adapter
865 * @ioa_cfg: ioa config struct
871 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
872 struct ipr_hostrcb *hostrcb)
874 struct ipr_resource_entry *res = NULL;
875 struct ipr_config_table_entry *cfgte;
878 cfgte = &hostrcb->hcam.u.ccn.cfgte;
880 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
881 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
882 sizeof(cfgte->res_addr))) {
889 if (list_empty(&ioa_cfg->free_res_q)) {
890 ipr_send_hcam(ioa_cfg,
891 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
896 res = list_entry(ioa_cfg->free_res_q.next,
897 struct ipr_resource_entry, queue);
899 list_del(&res->queue);
900 ipr_init_res_entry(res);
901 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
904 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
906 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
908 res->del_from_ml = 1;
909 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
910 if (ioa_cfg->allow_ml_add_del)
911 schedule_work(&ioa_cfg->work_q);
913 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
914 } else if (!res->sdev) {
916 if (ioa_cfg->allow_ml_add_del)
917 schedule_work(&ioa_cfg->work_q);
920 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
924 * ipr_process_ccn - Op done function for a CCN.
925 * @ipr_cmd: ipr command struct
927 * This function is the op done function for a configuration
928 * change notification host controlled async from the adapter.
933 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
936 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
937 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
939 list_del(&hostrcb->queue);
940 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
943 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
944 dev_err(&ioa_cfg->pdev->dev,
945 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
947 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
949 ipr_handle_config_change(ioa_cfg, hostrcb);
954 * ipr_log_vpd - Log the passed VPD to the error log.
955 * @vpd: vendor/product id/sn struct
960 static void ipr_log_vpd(struct ipr_vpd *vpd)
962 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
963 + IPR_SERIAL_NUM_LEN];
965 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
966 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
968 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
969 ipr_err("Vendor/Product ID: %s\n", buffer);
971 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
972 buffer[IPR_SERIAL_NUM_LEN] = '\0';
973 ipr_err(" Serial Number: %s\n", buffer);
977 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
978 * @vpd: vendor/product id/sn/wwn struct
983 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
985 ipr_log_vpd(&vpd->vpd);
986 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
987 be32_to_cpu(vpd->wwid[1]));
991 * ipr_log_enhanced_cache_error - Log a cache error.
992 * @ioa_cfg: ioa config struct
993 * @hostrcb: hostrcb struct
998 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
999 struct ipr_hostrcb *hostrcb)
1001 struct ipr_hostrcb_type_12_error *error =
1002 &hostrcb->hcam.u.error.u.type_12_error;
1004 ipr_err("-----Current Configuration-----\n");
1005 ipr_err("Cache Directory Card Information:\n");
1006 ipr_log_ext_vpd(&error->ioa_vpd);
1007 ipr_err("Adapter Card Information:\n");
1008 ipr_log_ext_vpd(&error->cfc_vpd);
1010 ipr_err("-----Expected Configuration-----\n");
1011 ipr_err("Cache Directory Card Information:\n");
1012 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1013 ipr_err("Adapter Card Information:\n");
1014 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1016 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1017 be32_to_cpu(error->ioa_data[0]),
1018 be32_to_cpu(error->ioa_data[1]),
1019 be32_to_cpu(error->ioa_data[2]));
1023 * ipr_log_cache_error - Log a cache error.
1024 * @ioa_cfg: ioa config struct
1025 * @hostrcb: hostrcb struct
1030 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1031 struct ipr_hostrcb *hostrcb)
1033 struct ipr_hostrcb_type_02_error *error =
1034 &hostrcb->hcam.u.error.u.type_02_error;
1036 ipr_err("-----Current Configuration-----\n");
1037 ipr_err("Cache Directory Card Information:\n");
1038 ipr_log_vpd(&error->ioa_vpd);
1039 ipr_err("Adapter Card Information:\n");
1040 ipr_log_vpd(&error->cfc_vpd);
1042 ipr_err("-----Expected Configuration-----\n");
1043 ipr_err("Cache Directory Card Information:\n");
1044 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1045 ipr_err("Adapter Card Information:\n");
1046 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1048 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1049 be32_to_cpu(error->ioa_data[0]),
1050 be32_to_cpu(error->ioa_data[1]),
1051 be32_to_cpu(error->ioa_data[2]));
1055 * ipr_log_enhanced_config_error - Log a configuration error.
1056 * @ioa_cfg: ioa config struct
1057 * @hostrcb: hostrcb struct
1062 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1063 struct ipr_hostrcb *hostrcb)
1065 int errors_logged, i;
1066 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1067 struct ipr_hostrcb_type_13_error *error;
1069 error = &hostrcb->hcam.u.error.u.type_13_error;
1070 errors_logged = be32_to_cpu(error->errors_logged);
1072 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1073 be32_to_cpu(error->errors_detected), errors_logged);
1075 dev_entry = error->dev;
1077 for (i = 0; i < errors_logged; i++, dev_entry++) {
1080 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1081 ipr_log_ext_vpd(&dev_entry->vpd);
1083 ipr_err("-----New Device Information-----\n");
1084 ipr_log_ext_vpd(&dev_entry->new_vpd);
1086 ipr_err("Cache Directory Card Information:\n");
1087 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1089 ipr_err("Adapter Card Information:\n");
1090 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1095 * ipr_log_config_error - Log a configuration error.
1096 * @ioa_cfg: ioa config struct
1097 * @hostrcb: hostrcb struct
1102 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1103 struct ipr_hostrcb *hostrcb)
1105 int errors_logged, i;
1106 struct ipr_hostrcb_device_data_entry *dev_entry;
1107 struct ipr_hostrcb_type_03_error *error;
1109 error = &hostrcb->hcam.u.error.u.type_03_error;
1110 errors_logged = be32_to_cpu(error->errors_logged);
1112 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1113 be32_to_cpu(error->errors_detected), errors_logged);
1115 dev_entry = error->dev;
1117 for (i = 0; i < errors_logged; i++, dev_entry++) {
1120 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1121 ipr_log_vpd(&dev_entry->vpd);
1123 ipr_err("-----New Device Information-----\n");
1124 ipr_log_vpd(&dev_entry->new_vpd);
1126 ipr_err("Cache Directory Card Information:\n");
1127 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1129 ipr_err("Adapter Card Information:\n");
1130 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1132 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1133 be32_to_cpu(dev_entry->ioa_data[0]),
1134 be32_to_cpu(dev_entry->ioa_data[1]),
1135 be32_to_cpu(dev_entry->ioa_data[2]),
1136 be32_to_cpu(dev_entry->ioa_data[3]),
1137 be32_to_cpu(dev_entry->ioa_data[4]));
1142 * ipr_log_enhanced_array_error - Log an array configuration error.
1143 * @ioa_cfg: ioa config struct
1144 * @hostrcb: hostrcb struct
1149 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1150 struct ipr_hostrcb *hostrcb)
1153 struct ipr_hostrcb_type_14_error *error;
1154 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1155 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1157 error = &hostrcb->hcam.u.error.u.type_14_error;
1161 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1162 error->protection_level,
1163 ioa_cfg->host->host_no,
1164 error->last_func_vset_res_addr.bus,
1165 error->last_func_vset_res_addr.target,
1166 error->last_func_vset_res_addr.lun);
1170 array_entry = error->array_member;
1171 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1172 sizeof(error->array_member));
1174 for (i = 0; i < num_entries; i++, array_entry++) {
1175 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1178 if (be32_to_cpu(error->exposed_mode_adn) == i)
1179 ipr_err("Exposed Array Member %d:\n", i);
1181 ipr_err("Array Member %d:\n", i);
1183 ipr_log_ext_vpd(&array_entry->vpd);
1184 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1185 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1186 "Expected Location");
1193 * ipr_log_array_error - Log an array configuration error.
1194 * @ioa_cfg: ioa config struct
1195 * @hostrcb: hostrcb struct
1200 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1201 struct ipr_hostrcb *hostrcb)
1204 struct ipr_hostrcb_type_04_error *error;
1205 struct ipr_hostrcb_array_data_entry *array_entry;
1206 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1208 error = &hostrcb->hcam.u.error.u.type_04_error;
1212 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1213 error->protection_level,
1214 ioa_cfg->host->host_no,
1215 error->last_func_vset_res_addr.bus,
1216 error->last_func_vset_res_addr.target,
1217 error->last_func_vset_res_addr.lun);
1221 array_entry = error->array_member;
1223 for (i = 0; i < 18; i++) {
1224 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1227 if (be32_to_cpu(error->exposed_mode_adn) == i)
1228 ipr_err("Exposed Array Member %d:\n", i);
1230 ipr_err("Array Member %d:\n", i);
1232 ipr_log_vpd(&array_entry->vpd);
1234 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1235 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1236 "Expected Location");
1241 array_entry = error->array_member2;
1248 * ipr_log_hex_data - Log additional hex IOA error data.
1249 * @ioa_cfg: ioa config struct
1250 * @data: IOA error data
1256 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1263 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1264 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1266 for (i = 0; i < len / 4; i += 4) {
1267 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1268 be32_to_cpu(data[i]),
1269 be32_to_cpu(data[i+1]),
1270 be32_to_cpu(data[i+2]),
1271 be32_to_cpu(data[i+3]));
1276 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1277 * @ioa_cfg: ioa config struct
1278 * @hostrcb: hostrcb struct
1283 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1284 struct ipr_hostrcb *hostrcb)
1286 struct ipr_hostrcb_type_17_error *error;
1288 error = &hostrcb->hcam.u.error.u.type_17_error;
1289 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1291 ipr_err("%s\n", error->failure_reason);
1292 ipr_err("Remote Adapter VPD:\n");
1293 ipr_log_ext_vpd(&error->vpd);
1294 ipr_log_hex_data(ioa_cfg, error->data,
1295 be32_to_cpu(hostrcb->hcam.length) -
1296 (offsetof(struct ipr_hostrcb_error, u) +
1297 offsetof(struct ipr_hostrcb_type_17_error, data)));
1301 * ipr_log_dual_ioa_error - Log a dual adapter error.
1302 * @ioa_cfg: ioa config struct
1303 * @hostrcb: hostrcb struct
1308 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1309 struct ipr_hostrcb *hostrcb)
1311 struct ipr_hostrcb_type_07_error *error;
1313 error = &hostrcb->hcam.u.error.u.type_07_error;
1314 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1316 ipr_err("%s\n", error->failure_reason);
1317 ipr_err("Remote Adapter VPD:\n");
1318 ipr_log_vpd(&error->vpd);
1319 ipr_log_hex_data(ioa_cfg, error->data,
1320 be32_to_cpu(hostrcb->hcam.length) -
1321 (offsetof(struct ipr_hostrcb_error, u) +
1322 offsetof(struct ipr_hostrcb_type_07_error, data)));
1325 static const struct {
1328 } path_active_desc[] = {
1329 { IPR_PATH_NO_INFO, "Path" },
1330 { IPR_PATH_ACTIVE, "Active path" },
1331 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1334 static const struct {
1337 } path_state_desc[] = {
1338 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1339 { IPR_PATH_HEALTHY, "is healthy" },
1340 { IPR_PATH_DEGRADED, "is degraded" },
1341 { IPR_PATH_FAILED, "is failed" }
1345 * ipr_log_fabric_path - Log a fabric path error
1346 * @hostrcb: hostrcb struct
1347 * @fabric: fabric descriptor
1352 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1353 struct ipr_hostrcb_fabric_desc *fabric)
1356 u8 path_state = fabric->path_state;
1357 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1358 u8 state = path_state & IPR_PATH_STATE_MASK;
1360 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1361 if (path_active_desc[i].active != active)
1364 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1365 if (path_state_desc[j].state != state)
1368 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1369 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1370 path_active_desc[i].desc, path_state_desc[j].desc,
1372 } else if (fabric->cascaded_expander == 0xff) {
1373 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1374 path_active_desc[i].desc, path_state_desc[j].desc,
1375 fabric->ioa_port, fabric->phy);
1376 } else if (fabric->phy == 0xff) {
1377 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1378 path_active_desc[i].desc, path_state_desc[j].desc,
1379 fabric->ioa_port, fabric->cascaded_expander);
1381 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1382 path_active_desc[i].desc, path_state_desc[j].desc,
1383 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1389 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1390 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1393 static const struct {
1396 } path_type_desc[] = {
1397 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1398 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1399 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1400 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1403 static const struct {
1406 } path_status_desc[] = {
1407 { IPR_PATH_CFG_NO_PROB, "Functional" },
1408 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1409 { IPR_PATH_CFG_FAILED, "Failed" },
1410 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1411 { IPR_PATH_NOT_DETECTED, "Missing" },
1412 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1415 static const char *link_rate[] = {
1418 "phy reset problem",
1435 * ipr_log_path_elem - Log a fabric path element.
1436 * @hostrcb: hostrcb struct
1437 * @cfg: fabric path element struct
1442 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1443 struct ipr_hostrcb_config_element *cfg)
1446 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1447 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1449 if (type == IPR_PATH_CFG_NOT_EXIST)
1452 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1453 if (path_type_desc[i].type != type)
1456 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1457 if (path_status_desc[j].status != status)
1460 if (type == IPR_PATH_CFG_IOA_PORT) {
1461 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1462 path_status_desc[j].desc, path_type_desc[i].desc,
1463 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1464 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1466 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1467 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1468 path_status_desc[j].desc, path_type_desc[i].desc,
1469 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1470 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1471 } else if (cfg->cascaded_expander == 0xff) {
1472 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1473 "WWN=%08X%08X\n", path_status_desc[j].desc,
1474 path_type_desc[i].desc, cfg->phy,
1475 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1476 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1477 } else if (cfg->phy == 0xff) {
1478 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1479 "WWN=%08X%08X\n", path_status_desc[j].desc,
1480 path_type_desc[i].desc, cfg->cascaded_expander,
1481 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1482 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1484 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1485 "WWN=%08X%08X\n", path_status_desc[j].desc,
1486 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1487 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1488 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1495 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1496 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1497 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1498 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1502 * ipr_log_fabric_error - Log a fabric error.
1503 * @ioa_cfg: ioa config struct
1504 * @hostrcb: hostrcb struct
1509 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1510 struct ipr_hostrcb *hostrcb)
1512 struct ipr_hostrcb_type_20_error *error;
1513 struct ipr_hostrcb_fabric_desc *fabric;
1514 struct ipr_hostrcb_config_element *cfg;
1517 error = &hostrcb->hcam.u.error.u.type_20_error;
1518 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1519 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1521 add_len = be32_to_cpu(hostrcb->hcam.length) -
1522 (offsetof(struct ipr_hostrcb_error, u) +
1523 offsetof(struct ipr_hostrcb_type_20_error, desc));
1525 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1526 ipr_log_fabric_path(hostrcb, fabric);
1527 for_each_fabric_cfg(fabric, cfg)
1528 ipr_log_path_elem(hostrcb, cfg);
1530 add_len -= be16_to_cpu(fabric->length);
1531 fabric = (struct ipr_hostrcb_fabric_desc *)
1532 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1535 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1539 * ipr_log_generic_error - Log an adapter error.
1540 * @ioa_cfg: ioa config struct
1541 * @hostrcb: hostrcb struct
1546 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1547 struct ipr_hostrcb *hostrcb)
1549 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1550 be32_to_cpu(hostrcb->hcam.length));
1554 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1557 * This function will return the index of into the ipr_error_table
1558 * for the specified IOASC. If the IOASC is not in the table,
1559 * 0 will be returned, which points to the entry used for unknown errors.
1562 * index into the ipr_error_table
1564 static u32 ipr_get_error(u32 ioasc)
1568 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1569 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1576 * ipr_handle_log_data - Log an adapter error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1580 * This function logs an adapter error to the system.
1585 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1586 struct ipr_hostrcb *hostrcb)
1591 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1594 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1595 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1597 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1599 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1600 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1601 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1602 scsi_report_bus_reset(ioa_cfg->host,
1603 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1606 error_index = ipr_get_error(ioasc);
1608 if (!ipr_error_table[error_index].log_hcam)
1611 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1613 /* Set indication we have logged an error */
1614 ioa_cfg->errors_logged++;
1616 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1618 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1619 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1621 switch (hostrcb->hcam.overlay_id) {
1622 case IPR_HOST_RCB_OVERLAY_ID_2:
1623 ipr_log_cache_error(ioa_cfg, hostrcb);
1625 case IPR_HOST_RCB_OVERLAY_ID_3:
1626 ipr_log_config_error(ioa_cfg, hostrcb);
1628 case IPR_HOST_RCB_OVERLAY_ID_4:
1629 case IPR_HOST_RCB_OVERLAY_ID_6:
1630 ipr_log_array_error(ioa_cfg, hostrcb);
1632 case IPR_HOST_RCB_OVERLAY_ID_7:
1633 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1635 case IPR_HOST_RCB_OVERLAY_ID_12:
1636 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1638 case IPR_HOST_RCB_OVERLAY_ID_13:
1639 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1641 case IPR_HOST_RCB_OVERLAY_ID_14:
1642 case IPR_HOST_RCB_OVERLAY_ID_16:
1643 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1645 case IPR_HOST_RCB_OVERLAY_ID_17:
1646 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1648 case IPR_HOST_RCB_OVERLAY_ID_20:
1649 ipr_log_fabric_error(ioa_cfg, hostrcb);
1651 case IPR_HOST_RCB_OVERLAY_ID_1:
1652 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1654 ipr_log_generic_error(ioa_cfg, hostrcb);
1660 * ipr_process_error - Op done function for an adapter error log.
1661 * @ipr_cmd: ipr command struct
1663 * This function is the op done function for an error log host
1664 * controlled async from the adapter. It will log the error and
1665 * send the HCAM back to the adapter.
1670 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1673 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1674 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1676 list_del(&hostrcb->queue);
1677 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1680 ipr_handle_log_data(ioa_cfg, hostrcb);
1681 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1682 dev_err(&ioa_cfg->pdev->dev,
1683 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1686 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1690 * ipr_timeout - An internally generated op has timed out.
1691 * @ipr_cmd: ipr command struct
1693 * This function blocks host requests and initiates an
1699 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1701 unsigned long lock_flags = 0;
1702 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1705 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1707 ioa_cfg->errors_logged++;
1708 dev_err(&ioa_cfg->pdev->dev,
1709 "Adapter being reset due to command timeout.\n");
1711 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1712 ioa_cfg->sdt_state = GET_DUMP;
1714 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1715 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1717 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1722 * ipr_oper_timeout - Adapter timed out transitioning to operational
1723 * @ipr_cmd: ipr command struct
1725 * This function blocks host requests and initiates an
1731 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1733 unsigned long lock_flags = 0;
1734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1737 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1739 ioa_cfg->errors_logged++;
1740 dev_err(&ioa_cfg->pdev->dev,
1741 "Adapter timed out transitioning to operational.\n");
1743 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1744 ioa_cfg->sdt_state = GET_DUMP;
1746 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1748 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1749 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1752 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1757 * ipr_reset_reload - Reset/Reload the IOA
1758 * @ioa_cfg: ioa config struct
1759 * @shutdown_type: shutdown type
1761 * This function resets the adapter and re-initializes it.
1762 * This function assumes that all new host commands have been stopped.
1766 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1767 enum ipr_shutdown_type shutdown_type)
1769 if (!ioa_cfg->in_reset_reload)
1770 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1772 spin_unlock_irq(ioa_cfg->host->host_lock);
1773 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1774 spin_lock_irq(ioa_cfg->host->host_lock);
1776 /* If we got hit with a host reset while we were already resetting
1777 the adapter for some reason, and the reset failed. */
1778 if (ioa_cfg->ioa_is_dead) {
1787 * ipr_find_ses_entry - Find matching SES in SES table
1788 * @res: resource entry struct of SES
1791 * pointer to SES table entry / NULL on failure
1793 static const struct ipr_ses_table_entry *
1794 ipr_find_ses_entry(struct ipr_resource_entry *res)
1797 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1799 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1800 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1801 if (ste->compare_product_id_byte[j] == 'X') {
1802 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1810 if (matches == IPR_PROD_ID_LEN)
1818 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1819 * @ioa_cfg: ioa config struct
1821 * @bus_width: bus width
1824 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1825 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1826 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1827 * max 160MHz = max 320MB/sec).
1829 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1831 struct ipr_resource_entry *res;
1832 const struct ipr_ses_table_entry *ste;
1833 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1835 /* Loop through each config table entry in the config table buffer */
1836 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1837 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1840 if (bus != res->cfgte.res_addr.bus)
1843 if (!(ste = ipr_find_ses_entry(res)))
1846 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1849 return max_xfer_rate;
1853 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1854 * @ioa_cfg: ioa config struct
1855 * @max_delay: max delay in micro-seconds to wait
1857 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1860 * 0 on success / other on failure
1862 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1864 volatile u32 pcii_reg;
1867 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1868 while (delay < max_delay) {
1869 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1871 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1874 /* udelay cannot be used if delay is more than a few milliseconds */
1875 if ((delay / 1000) > MAX_UDELAY_MS)
1876 mdelay(delay / 1000);
1886 * ipr_get_ldump_data_section - Dump IOA memory
1887 * @ioa_cfg: ioa config struct
1888 * @start_addr: adapter address to dump
1889 * @dest: destination kernel buffer
1890 * @length_in_words: length to dump in 4 byte words
1893 * 0 on success / -EIO on failure
1895 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1897 __be32 *dest, u32 length_in_words)
1899 volatile u32 temp_pcii_reg;
1902 /* Write IOA interrupt reg starting LDUMP state */
1903 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1904 ioa_cfg->regs.set_uproc_interrupt_reg);
1906 /* Wait for IO debug acknowledge */
1907 if (ipr_wait_iodbg_ack(ioa_cfg,
1908 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1909 dev_err(&ioa_cfg->pdev->dev,
1910 "IOA dump long data transfer timeout\n");
1914 /* Signal LDUMP interlocked - clear IO debug ack */
1915 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1916 ioa_cfg->regs.clr_interrupt_reg);
1918 /* Write Mailbox with starting address */
1919 writel(start_addr, ioa_cfg->ioa_mailbox);
1921 /* Signal address valid - clear IOA Reset alert */
1922 writel(IPR_UPROCI_RESET_ALERT,
1923 ioa_cfg->regs.clr_uproc_interrupt_reg);
1925 for (i = 0; i < length_in_words; i++) {
1926 /* Wait for IO debug acknowledge */
1927 if (ipr_wait_iodbg_ack(ioa_cfg,
1928 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1929 dev_err(&ioa_cfg->pdev->dev,
1930 "IOA dump short data transfer timeout\n");
1934 /* Read data from mailbox and increment destination pointer */
1935 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1938 /* For all but the last word of data, signal data received */
1939 if (i < (length_in_words - 1)) {
1940 /* Signal dump data received - Clear IO debug Ack */
1941 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1942 ioa_cfg->regs.clr_interrupt_reg);
1946 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1947 writel(IPR_UPROCI_RESET_ALERT,
1948 ioa_cfg->regs.set_uproc_interrupt_reg);
1950 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1951 ioa_cfg->regs.clr_uproc_interrupt_reg);
1953 /* Signal dump data received - Clear IO debug Ack */
1954 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1955 ioa_cfg->regs.clr_interrupt_reg);
1957 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1958 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1960 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1962 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1972 #ifdef CONFIG_SCSI_IPR_DUMP
1974 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1975 * @ioa_cfg: ioa config struct
1976 * @pci_address: adapter address
1977 * @length: length of data to copy
1979 * Copy data from PCI adapter to kernel buffer.
1980 * Note: length MUST be a 4 byte multiple
1982 * 0 on success / other on failure
1984 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1985 unsigned long pci_address, u32 length)
1987 int bytes_copied = 0;
1988 int cur_len, rc, rem_len, rem_page_len;
1990 unsigned long lock_flags = 0;
1991 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1993 while (bytes_copied < length &&
1994 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1995 if (ioa_dump->page_offset >= PAGE_SIZE ||
1996 ioa_dump->page_offset == 0) {
1997 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2001 return bytes_copied;
2004 ioa_dump->page_offset = 0;
2005 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2006 ioa_dump->next_page_index++;
2008 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2010 rem_len = length - bytes_copied;
2011 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2012 cur_len = min(rem_len, rem_page_len);
2014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2015 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2018 rc = ipr_get_ldump_data_section(ioa_cfg,
2019 pci_address + bytes_copied,
2020 &page[ioa_dump->page_offset / 4],
2021 (cur_len / sizeof(u32)));
2023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2026 ioa_dump->page_offset += cur_len;
2027 bytes_copied += cur_len;
2035 return bytes_copied;
2039 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2040 * @hdr: dump entry header struct
2045 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2047 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2049 hdr->offset = sizeof(*hdr);
2050 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2054 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2055 * @ioa_cfg: ioa config struct
2056 * @driver_dump: driver dump struct
2061 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2062 struct ipr_driver_dump *driver_dump)
2064 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2066 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2067 driver_dump->ioa_type_entry.hdr.len =
2068 sizeof(struct ipr_dump_ioa_type_entry) -
2069 sizeof(struct ipr_dump_entry_header);
2070 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2071 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2072 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2073 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2074 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2075 ucode_vpd->minor_release[1];
2076 driver_dump->hdr.num_entries++;
2080 * ipr_dump_version_data - Fill in the driver version in the dump.
2081 * @ioa_cfg: ioa config struct
2082 * @driver_dump: driver dump struct
2087 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2088 struct ipr_driver_dump *driver_dump)
2090 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2091 driver_dump->version_entry.hdr.len =
2092 sizeof(struct ipr_dump_version_entry) -
2093 sizeof(struct ipr_dump_entry_header);
2094 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2095 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2096 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2097 driver_dump->hdr.num_entries++;
2101 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2102 * @ioa_cfg: ioa config struct
2103 * @driver_dump: driver dump struct
2108 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2109 struct ipr_driver_dump *driver_dump)
2111 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2112 driver_dump->trace_entry.hdr.len =
2113 sizeof(struct ipr_dump_trace_entry) -
2114 sizeof(struct ipr_dump_entry_header);
2115 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2116 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2117 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2118 driver_dump->hdr.num_entries++;
2122 * ipr_dump_location_data - Fill in the IOA location in the dump.
2123 * @ioa_cfg: ioa config struct
2124 * @driver_dump: driver dump struct
2129 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2130 struct ipr_driver_dump *driver_dump)
2132 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2133 driver_dump->location_entry.hdr.len =
2134 sizeof(struct ipr_dump_location_entry) -
2135 sizeof(struct ipr_dump_entry_header);
2136 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2137 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2138 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2139 driver_dump->hdr.num_entries++;
2143 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2144 * @ioa_cfg: ioa config struct
2145 * @dump: dump struct
2150 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2152 unsigned long start_addr, sdt_word;
2153 unsigned long lock_flags = 0;
2154 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2155 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2156 u32 num_entries, start_off, end_off;
2157 u32 bytes_to_copy, bytes_copied, rc;
2158 struct ipr_sdt *sdt;
2163 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2165 if (ioa_cfg->sdt_state != GET_DUMP) {
2166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170 start_addr = readl(ioa_cfg->ioa_mailbox);
2172 if (!ipr_sdt_is_fmt2(start_addr)) {
2173 dev_err(&ioa_cfg->pdev->dev,
2174 "Invalid dump table format: %lx\n", start_addr);
2175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2179 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2181 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2183 /* Initialize the overall dump header */
2184 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2185 driver_dump->hdr.num_entries = 1;
2186 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2187 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2188 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2189 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2191 ipr_dump_version_data(ioa_cfg, driver_dump);
2192 ipr_dump_location_data(ioa_cfg, driver_dump);
2193 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2194 ipr_dump_trace_data(ioa_cfg, driver_dump);
2196 /* Update dump_header */
2197 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2199 /* IOA Dump entry */
2200 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2201 ioa_dump->format = IPR_SDT_FMT2;
2202 ioa_dump->hdr.len = 0;
2203 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2204 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2206 /* First entries in sdt are actually a list of dump addresses and
2207 lengths to gather the real dump data. sdt represents the pointer
2208 to the ioa generated dump table. Dump data will be extracted based
2209 on entries in this table */
2210 sdt = &ioa_dump->sdt;
2212 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2213 sizeof(struct ipr_sdt) / sizeof(__be32));
2215 /* Smart Dump table is ready to use and the first entry is valid */
2216 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2217 dev_err(&ioa_cfg->pdev->dev,
2218 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2219 rc, be32_to_cpu(sdt->hdr.state));
2220 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2221 ioa_cfg->sdt_state = DUMP_OBTAINED;
2222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2226 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2228 if (num_entries > IPR_NUM_SDT_ENTRIES)
2229 num_entries = IPR_NUM_SDT_ENTRIES;
2231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2233 for (i = 0; i < num_entries; i++) {
2234 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2235 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2239 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2240 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2241 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2242 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2244 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2245 bytes_to_copy = end_off - start_off;
2246 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2247 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2251 /* Copy data from adapter to driver buffers */
2252 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2255 ioa_dump->hdr.len += bytes_copied;
2257 if (bytes_copied != bytes_to_copy) {
2258 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2265 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2267 /* Update dump_header */
2268 driver_dump->hdr.len += ioa_dump->hdr.len;
2270 ioa_cfg->sdt_state = DUMP_OBTAINED;
2275 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2279 * ipr_release_dump - Free adapter dump memory
2280 * @kref: kref struct
2285 static void ipr_release_dump(struct kref *kref)
2287 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2288 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2289 unsigned long lock_flags = 0;
2293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2294 ioa_cfg->dump = NULL;
2295 ioa_cfg->sdt_state = INACTIVE;
2296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2298 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2299 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2306 * ipr_worker_thread - Worker thread
2307 * @work: ioa config struct
2309 * Called at task level from a work thread. This function takes care
2310 * of adding and removing device from the mid-layer as configuration
2311 * changes are detected by the adapter.
2316 static void ipr_worker_thread(struct work_struct *work)
2318 unsigned long lock_flags;
2319 struct ipr_resource_entry *res;
2320 struct scsi_device *sdev;
2321 struct ipr_dump *dump;
2322 struct ipr_ioa_cfg *ioa_cfg =
2323 container_of(work, struct ipr_ioa_cfg, work_q);
2324 u8 bus, target, lun;
2328 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2330 if (ioa_cfg->sdt_state == GET_DUMP) {
2331 dump = ioa_cfg->dump;
2333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2336 kref_get(&dump->kref);
2337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2338 ipr_get_ioa_dump(ioa_cfg, dump);
2339 kref_put(&dump->kref, ipr_release_dump);
2341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2342 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2343 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2351 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2356 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2357 if (res->del_from_ml && res->sdev) {
2360 if (!scsi_device_get(sdev)) {
2361 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2363 scsi_remove_device(sdev);
2364 scsi_device_put(sdev);
2365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2372 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2373 if (res->add_to_ml) {
2374 bus = res->cfgte.res_addr.bus;
2375 target = res->cfgte.res_addr.target;
2376 lun = res->cfgte.res_addr.lun;
2378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2379 scsi_add_device(ioa_cfg->host, bus, target, lun);
2380 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2386 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2390 #ifdef CONFIG_SCSI_IPR_TRACE
2392 * ipr_read_trace - Dump the adapter trace
2393 * @kobj: kobject struct
2396 * @count: buffer size
2399 * number of bytes printed to buffer
2401 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2402 loff_t off, size_t count)
2404 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2405 struct Scsi_Host *shost = class_to_shost(cdev);
2406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2407 unsigned long lock_flags = 0;
2408 int size = IPR_TRACE_SIZE;
2409 char *src = (char *)ioa_cfg->trace;
2413 if (off + count > size) {
2418 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2419 memcpy(buf, &src[off], count);
2420 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2424 static struct bin_attribute ipr_trace_attr = {
2430 .read = ipr_read_trace,
2434 static const struct {
2435 enum ipr_cache_state state;
2437 } cache_state [] = {
2438 { CACHE_NONE, "none" },
2439 { CACHE_DISABLED, "disabled" },
2440 { CACHE_ENABLED, "enabled" }
2444 * ipr_show_write_caching - Show the write caching attribute
2445 * @class_dev: class device struct
2449 * number of bytes printed to buffer
2451 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2453 struct Scsi_Host *shost = class_to_shost(class_dev);
2454 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2455 unsigned long lock_flags = 0;
2458 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2459 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2460 if (cache_state[i].state == ioa_cfg->cache_state) {
2461 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2465 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471 * ipr_store_write_caching - Enable/disable adapter write cache
2472 * @class_dev: class_device struct
2474 * @count: buffer size
2476 * This function will enable/disable adapter write cache.
2479 * count on success / other on failure
2481 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2482 const char *buf, size_t count)
2484 struct Scsi_Host *shost = class_to_shost(class_dev);
2485 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2486 unsigned long lock_flags = 0;
2487 enum ipr_cache_state new_state = CACHE_INVALID;
2490 if (!capable(CAP_SYS_ADMIN))
2492 if (ioa_cfg->cache_state == CACHE_NONE)
2495 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2496 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2497 new_state = cache_state[i].state;
2502 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2506 if (ioa_cfg->cache_state == new_state) {
2507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511 ioa_cfg->cache_state = new_state;
2512 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2513 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2514 if (!ioa_cfg->in_reset_reload)
2515 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2517 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2522 static struct class_device_attribute ipr_ioa_cache_attr = {
2524 .name = "write_cache",
2525 .mode = S_IRUGO | S_IWUSR,
2527 .show = ipr_show_write_caching,
2528 .store = ipr_store_write_caching
2532 * ipr_show_fw_version - Show the firmware version
2533 * @class_dev: class device struct
2537 * number of bytes printed to buffer
2539 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2541 struct Scsi_Host *shost = class_to_shost(class_dev);
2542 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2543 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2544 unsigned long lock_flags = 0;
2547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2548 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2549 ucode_vpd->major_release, ucode_vpd->card_type,
2550 ucode_vpd->minor_release[0],
2551 ucode_vpd->minor_release[1]);
2552 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556 static struct class_device_attribute ipr_fw_version_attr = {
2558 .name = "fw_version",
2561 .show = ipr_show_fw_version,
2565 * ipr_show_log_level - Show the adapter's error logging level
2566 * @class_dev: class device struct
2570 * number of bytes printed to buffer
2572 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2574 struct Scsi_Host *shost = class_to_shost(class_dev);
2575 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2576 unsigned long lock_flags = 0;
2579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2580 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2586 * ipr_store_log_level - Change the adapter's error logging level
2587 * @class_dev: class device struct
2591 * number of bytes printed to buffer
2593 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2594 const char *buf, size_t count)
2596 struct Scsi_Host *shost = class_to_shost(class_dev);
2597 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2598 unsigned long lock_flags = 0;
2600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2601 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2606 static struct class_device_attribute ipr_log_level_attr = {
2608 .name = "log_level",
2609 .mode = S_IRUGO | S_IWUSR,
2611 .show = ipr_show_log_level,
2612 .store = ipr_store_log_level
2616 * ipr_store_diagnostics - IOA Diagnostics interface
2617 * @class_dev: class_device struct
2619 * @count: buffer size
2621 * This function will reset the adapter and wait a reasonable
2622 * amount of time for any errors that the adapter might log.
2625 * count on success / other on failure
2627 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2628 const char *buf, size_t count)
2630 struct Scsi_Host *shost = class_to_shost(class_dev);
2631 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2632 unsigned long lock_flags = 0;
2635 if (!capable(CAP_SYS_ADMIN))
2638 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2640 ioa_cfg->errors_logged = 0;
2641 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2643 if (ioa_cfg->in_reset_reload) {
2644 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2645 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2647 /* Wait for a second for any errors to be logged */
2650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2655 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2662 static struct class_device_attribute ipr_diagnostics_attr = {
2664 .name = "run_diagnostics",
2667 .store = ipr_store_diagnostics
2671 * ipr_show_adapter_state - Show the adapter's state
2672 * @class_dev: class device struct
2676 * number of bytes printed to buffer
2678 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2680 struct Scsi_Host *shost = class_to_shost(class_dev);
2681 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2682 unsigned long lock_flags = 0;
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2686 if (ioa_cfg->ioa_is_dead)
2687 len = snprintf(buf, PAGE_SIZE, "offline\n");
2689 len = snprintf(buf, PAGE_SIZE, "online\n");
2690 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2695 * ipr_store_adapter_state - Change adapter state
2696 * @class_dev: class_device struct
2698 * @count: buffer size
2700 * This function will change the adapter's state.
2703 * count on success / other on failure
2705 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2706 const char *buf, size_t count)
2708 struct Scsi_Host *shost = class_to_shost(class_dev);
2709 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2710 unsigned long lock_flags;
2713 if (!capable(CAP_SYS_ADMIN))
2716 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2717 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2718 ioa_cfg->ioa_is_dead = 0;
2719 ioa_cfg->reset_retries = 0;
2720 ioa_cfg->in_ioa_bringdown = 0;
2721 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2729 static struct class_device_attribute ipr_ioa_state_attr = {
2732 .mode = S_IRUGO | S_IWUSR,
2734 .show = ipr_show_adapter_state,
2735 .store = ipr_store_adapter_state
2739 * ipr_store_reset_adapter - Reset the adapter
2740 * @class_dev: class_device struct
2742 * @count: buffer size
2744 * This function will reset the adapter.
2747 * count on success / other on failure
2749 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2750 const char *buf, size_t count)
2752 struct Scsi_Host *shost = class_to_shost(class_dev);
2753 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2754 unsigned long lock_flags;
2757 if (!capable(CAP_SYS_ADMIN))
2760 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2761 if (!ioa_cfg->in_reset_reload)
2762 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2764 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2769 static struct class_device_attribute ipr_ioa_reset_attr = {
2771 .name = "reset_host",
2774 .store = ipr_store_reset_adapter
2778 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2779 * @buf_len: buffer length
2781 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2782 * list to use for microcode download
2785 * pointer to sglist / NULL on failure
2787 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2789 int sg_size, order, bsize_elem, num_elem, i, j;
2790 struct ipr_sglist *sglist;
2791 struct scatterlist *scatterlist;
2794 /* Get the minimum size per scatter/gather element */
2795 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2797 /* Get the actual size per element */
2798 order = get_order(sg_size);
2800 /* Determine the actual number of bytes per element */
2801 bsize_elem = PAGE_SIZE * (1 << order);
2803 /* Determine the actual number of sg entries needed */
2804 if (buf_len % bsize_elem)
2805 num_elem = (buf_len / bsize_elem) + 1;
2807 num_elem = buf_len / bsize_elem;
2809 /* Allocate a scatter/gather list for the DMA */
2810 sglist = kzalloc(sizeof(struct ipr_sglist) +
2811 (sizeof(struct scatterlist) * (num_elem - 1)),
2814 if (sglist == NULL) {
2819 scatterlist = sglist->scatterlist;
2821 sglist->order = order;
2822 sglist->num_sg = num_elem;
2824 /* Allocate a bunch of sg elements */
2825 for (i = 0; i < num_elem; i++) {
2826 page = alloc_pages(GFP_KERNEL, order);
2830 /* Free up what we already allocated */
2831 for (j = i - 1; j >= 0; j--)
2832 __free_pages(scatterlist[j].page, order);
2837 scatterlist[i].page = page;
2844 * ipr_free_ucode_buffer - Frees a microcode download buffer
2845 * @p_dnld: scatter/gather list pointer
2847 * Free a DMA'able ucode download buffer previously allocated with
2848 * ipr_alloc_ucode_buffer
2853 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2857 for (i = 0; i < sglist->num_sg; i++)
2858 __free_pages(sglist->scatterlist[i].page, sglist->order);
2864 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2865 * @sglist: scatter/gather list pointer
2866 * @buffer: buffer pointer
2867 * @len: buffer length
2869 * Copy a microcode image from a user buffer into a buffer allocated by
2870 * ipr_alloc_ucode_buffer
2873 * 0 on success / other on failure
2875 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2876 u8 *buffer, u32 len)
2878 int bsize_elem, i, result = 0;
2879 struct scatterlist *scatterlist;
2882 /* Determine the actual number of bytes per element */
2883 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2885 scatterlist = sglist->scatterlist;
2887 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2888 kaddr = kmap(scatterlist[i].page);
2889 memcpy(kaddr, buffer, bsize_elem);
2890 kunmap(scatterlist[i].page);
2892 scatterlist[i].length = bsize_elem;
2900 if (len % bsize_elem) {
2901 kaddr = kmap(scatterlist[i].page);
2902 memcpy(kaddr, buffer, len % bsize_elem);
2903 kunmap(scatterlist[i].page);
2905 scatterlist[i].length = len % bsize_elem;
2908 sglist->buffer_len = len;
2913 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2914 * @ipr_cmd: ipr command struct
2915 * @sglist: scatter/gather list
2917 * Builds a microcode download IOA data list (IOADL).
2920 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2921 struct ipr_sglist *sglist)
2923 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2924 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2925 struct scatterlist *scatterlist = sglist->scatterlist;
2928 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2929 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2930 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2931 ioarcb->write_ioadl_len =
2932 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2934 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2935 ioadl[i].flags_and_data_len =
2936 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2938 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2941 ioadl[i-1].flags_and_data_len |=
2942 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2946 * ipr_update_ioa_ucode - Update IOA's microcode
2947 * @ioa_cfg: ioa config struct
2948 * @sglist: scatter/gather list
2950 * Initiate an adapter reset to update the IOA's microcode
2953 * 0 on success / -EIO on failure
2955 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2956 struct ipr_sglist *sglist)
2958 unsigned long lock_flags;
2960 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2962 if (ioa_cfg->ucode_sglist) {
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964 dev_err(&ioa_cfg->pdev->dev,
2965 "Microcode download already in progress\n");
2969 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2970 sglist->num_sg, DMA_TO_DEVICE);
2972 if (!sglist->num_dma_sg) {
2973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2974 dev_err(&ioa_cfg->pdev->dev,
2975 "Failed to map microcode download buffer!\n");
2979 ioa_cfg->ucode_sglist = sglist;
2980 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2982 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2984 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2985 ioa_cfg->ucode_sglist = NULL;
2986 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2991 * ipr_store_update_fw - Update the firmware on the adapter
2992 * @class_dev: class_device struct
2994 * @count: buffer size
2996 * This function will update the firmware on the adapter.
2999 * count on success / other on failure
3001 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3002 const char *buf, size_t count)
3004 struct Scsi_Host *shost = class_to_shost(class_dev);
3005 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3006 struct ipr_ucode_image_header *image_hdr;
3007 const struct firmware *fw_entry;
3008 struct ipr_sglist *sglist;
3011 int len, result, dnld_size;
3013 if (!capable(CAP_SYS_ADMIN))
3016 len = snprintf(fname, 99, "%s", buf);
3017 fname[len-1] = '\0';
3019 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3020 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3024 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3026 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3027 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3028 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3029 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3030 release_firmware(fw_entry);
3034 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3035 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3036 sglist = ipr_alloc_ucode_buffer(dnld_size);
3039 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3040 release_firmware(fw_entry);
3044 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3047 dev_err(&ioa_cfg->pdev->dev,
3048 "Microcode buffer copy to DMA buffer failed\n");
3052 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3057 ipr_free_ucode_buffer(sglist);
3058 release_firmware(fw_entry);
3062 static struct class_device_attribute ipr_update_fw_attr = {
3064 .name = "update_fw",
3067 .store = ipr_store_update_fw
3070 static struct class_device_attribute *ipr_ioa_attrs[] = {
3071 &ipr_fw_version_attr,
3072 &ipr_log_level_attr,
3073 &ipr_diagnostics_attr,
3074 &ipr_ioa_state_attr,
3075 &ipr_ioa_reset_attr,
3076 &ipr_update_fw_attr,
3077 &ipr_ioa_cache_attr,
3081 #ifdef CONFIG_SCSI_IPR_DUMP
3083 * ipr_read_dump - Dump the adapter
3084 * @kobj: kobject struct
3087 * @count: buffer size
3090 * number of bytes printed to buffer
3092 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3093 loff_t off, size_t count)
3095 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3096 struct Scsi_Host *shost = class_to_shost(cdev);
3097 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3098 struct ipr_dump *dump;
3099 unsigned long lock_flags = 0;
3104 if (!capable(CAP_SYS_ADMIN))
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 dump = ioa_cfg->dump;
3110 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3111 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3114 kref_get(&dump->kref);
3115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117 if (off > dump->driver_dump.hdr.len) {
3118 kref_put(&dump->kref, ipr_release_dump);
3122 if (off + count > dump->driver_dump.hdr.len) {
3123 count = dump->driver_dump.hdr.len - off;
3127 if (count && off < sizeof(dump->driver_dump)) {
3128 if (off + count > sizeof(dump->driver_dump))
3129 len = sizeof(dump->driver_dump) - off;
3132 src = (u8 *)&dump->driver_dump + off;
3133 memcpy(buf, src, len);
3139 off -= sizeof(dump->driver_dump);
3141 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3142 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3143 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3146 src = (u8 *)&dump->ioa_dump + off;
3147 memcpy(buf, src, len);
3153 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3156 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3157 len = PAGE_ALIGN(off) - off;
3160 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3161 src += off & ~PAGE_MASK;
3162 memcpy(buf, src, len);
3168 kref_put(&dump->kref, ipr_release_dump);
3173 * ipr_alloc_dump - Prepare for adapter dump
3174 * @ioa_cfg: ioa config struct
3177 * 0 on success / other on failure
3179 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3181 struct ipr_dump *dump;
3182 unsigned long lock_flags = 0;
3184 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3187 ipr_err("Dump memory allocation failed\n");
3191 kref_init(&dump->kref);
3192 dump->ioa_cfg = ioa_cfg;
3194 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3196 if (INACTIVE != ioa_cfg->sdt_state) {
3197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3202 ioa_cfg->dump = dump;
3203 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3204 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3205 ioa_cfg->dump_taken = 1;
3206 schedule_work(&ioa_cfg->work_q);
3208 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214 * ipr_free_dump - Free adapter dump memory
3215 * @ioa_cfg: ioa config struct
3218 * 0 on success / other on failure
3220 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3222 struct ipr_dump *dump;
3223 unsigned long lock_flags = 0;
3227 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3228 dump = ioa_cfg->dump;
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234 ioa_cfg->dump = NULL;
3235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3237 kref_put(&dump->kref, ipr_release_dump);
3244 * ipr_write_dump - Setup dump state of adapter
3245 * @kobj: kobject struct
3248 * @count: buffer size
3251 * number of bytes printed to buffer
3253 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3254 loff_t off, size_t count)
3256 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3257 struct Scsi_Host *shost = class_to_shost(cdev);
3258 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3261 if (!capable(CAP_SYS_ADMIN))
3265 rc = ipr_alloc_dump(ioa_cfg);
3266 else if (buf[0] == '0')
3267 rc = ipr_free_dump(ioa_cfg);
3277 static struct bin_attribute ipr_dump_attr = {
3280 .mode = S_IRUSR | S_IWUSR,
3283 .read = ipr_read_dump,
3284 .write = ipr_write_dump
3287 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3291 * ipr_change_queue_depth - Change the device's queue depth
3292 * @sdev: scsi device struct
3293 * @qdepth: depth to set
3298 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3300 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3301 struct ipr_resource_entry *res;
3302 unsigned long lock_flags = 0;
3304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3305 res = (struct ipr_resource_entry *)sdev->hostdata;
3307 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3308 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3311 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3312 return sdev->queue_depth;
3316 * ipr_change_queue_type - Change the device's queue type
3317 * @dsev: scsi device struct
3318 * @tag_type: type of tags to use
3321 * actual queue type set
3323 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3325 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3326 struct ipr_resource_entry *res;
3327 unsigned long lock_flags = 0;
3329 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3330 res = (struct ipr_resource_entry *)sdev->hostdata;
3333 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3335 * We don't bother quiescing the device here since the
3336 * adapter firmware does it for us.
3338 scsi_set_tag_type(sdev, tag_type);
3341 scsi_activate_tcq(sdev, sdev->queue_depth);
3343 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3355 * @dev: device struct
3359 * number of bytes printed to buffer
3361 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3363 struct scsi_device *sdev = to_scsi_device(dev);
3364 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3365 struct ipr_resource_entry *res;
3366 unsigned long lock_flags = 0;
3367 ssize_t len = -ENXIO;
3369 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3370 res = (struct ipr_resource_entry *)sdev->hostdata;
3372 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 static struct device_attribute ipr_adapter_handle_attr = {
3379 .name = "adapter_handle",
3382 .show = ipr_show_adapter_handle
3385 static struct device_attribute *ipr_dev_attrs[] = {
3386 &ipr_adapter_handle_attr,
3391 * ipr_biosparam - Return the HSC mapping
3392 * @sdev: scsi device struct
3393 * @block_device: block device pointer
3394 * @capacity: capacity of the device
3395 * @parm: Array containing returned HSC values.
3397 * This function generates the HSC parms that fdisk uses.
3398 * We want to make sure we return something that places partitions
3399 * on 4k boundaries for best performance with the IOA.
3404 static int ipr_biosparam(struct scsi_device *sdev,
3405 struct block_device *block_device,
3406 sector_t capacity, int *parm)
3414 cylinders = capacity;
3415 sector_div(cylinders, (128 * 32));
3420 parm[2] = cylinders;
3426 * ipr_find_starget - Find target based on bus/target.
3427 * @starget: scsi target struct
3430 * resource entry pointer if found / NULL if not found
3432 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3434 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3436 struct ipr_resource_entry *res;
3438 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3439 if ((res->cfgte.res_addr.bus == starget->channel) &&
3440 (res->cfgte.res_addr.target == starget->id) &&