ipr.c revision 63015bc9333907725f90a1691d0ade44e51cdcbf
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/libata.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include "ipr.h"
83
84/*
85 *   Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = 0;
93static unsigned int ipr_enable_cache = 1;
94static unsigned int ipr_debug = 0;
95static unsigned int ipr_dual_ioa_raid = 1;
96static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101		.mailbox = 0x0042C,
102		.cache_line_size = 0x20,
103		{
104			.set_interrupt_mask_reg = 0x0022C,
105			.clr_interrupt_mask_reg = 0x00230,
106			.sense_interrupt_mask_reg = 0x0022C,
107			.clr_interrupt_reg = 0x00228,
108			.sense_interrupt_reg = 0x00224,
109			.ioarrin_reg = 0x00404,
110			.sense_uproc_interrupt_reg = 0x00214,
111			.set_uproc_interrupt_reg = 0x00214,
112			.clr_uproc_interrupt_reg = 0x00218
113		}
114	},
115	{ /* Snipe and Scamp */
116		.mailbox = 0x0052C,
117		.cache_line_size = 0x20,
118		{
119			.set_interrupt_mask_reg = 0x00288,
120			.clr_interrupt_mask_reg = 0x0028C,
121			.sense_interrupt_mask_reg = 0x00288,
122			.clr_interrupt_reg = 0x00284,
123			.sense_interrupt_reg = 0x00280,
124			.ioarrin_reg = 0x00504,
125			.sense_uproc_interrupt_reg = 0x00290,
126			.set_uproc_interrupt_reg = 0x00290,
127			.clr_uproc_interrupt_reg = 0x00294
128		}
129	},
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
138	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140};
141
142static int ipr_max_bus_speeds [] = {
143	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144};
145
146MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148module_param_named(max_speed, ipr_max_speed, uint, 0);
149MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150module_param_named(log_level, ipr_log_level, uint, 0);
151MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152module_param_named(testmode, ipr_testmode, int, 0);
153MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154module_param_named(fastfail, ipr_fastfail, int, 0);
155MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
163MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
164MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION);
166
167/*  A constant array of IOASCs/URCs/Error Messages */
168static const
169struct ipr_error_table_t ipr_error_table[] = {
170	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
171	"8155: An unknown error was received"},
172	{0x00330000, 0, 0,
173	"Soft underlength error"},
174	{0x005A0000, 0, 0,
175	"Command to be cancelled not found"},
176	{0x00808000, 0, 0,
177	"Qualified success"},
178	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
179	"FFFE: Soft device bus error recovered by the IOA"},
180	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
181	"4101: Soft device bus fabric error"},
182	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
183	"FFF9: Device sector reassign successful"},
184	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
185	"FFF7: Media error recovered by device rewrite procedures"},
186	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
187	"7001: IOA sector reassignment successful"},
188	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
189	"FFF9: Soft media error. Sector reassignment recommended"},
190	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
191	"FFF7: Media error recovered by IOA rewrite procedures"},
192	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
193	"FF3D: Soft PCI bus error recovered by the IOA"},
194	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
195	"FFF6: Device hardware error recovered by the IOA"},
196	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
197	"FFF6: Device hardware error recovered by the device"},
198	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
199	"FF3D: Soft IOA error recovered by the IOA"},
200	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
201	"FFFA: Undefined device response recovered by the IOA"},
202	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
203	"FFF6: Device bus error, message or command phase"},
204	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
205	"FFFE: Task Management Function failed"},
206	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
207	"FFF6: Failure prediction threshold exceeded"},
208	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
209	"8009: Impending cache battery pack failure"},
210	{0x02040400, 0, 0,
211	"34FF: Disk device format in progress"},
212	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
213	"9070: IOA requested reset"},
214	{0x023F0000, 0, 0,
215	"Synchronization required"},
216	{0x024E0000, 0, 0,
217	"No ready, IOA shutdown"},
218	{0x025A0000, 0, 0,
219	"Not ready, IOA has been shutdown"},
220	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
221	"3020: Storage subsystem configuration error"},
222	{0x03110B00, 0, 0,
223	"FFF5: Medium error, data unreadable, recommend reassign"},
224	{0x03110C00, 0, 0,
225	"7000: Medium error, data unreadable, do not reassign"},
226	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
227	"FFF3: Disk media format bad"},
228	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
229	"3002: Addressed device failed to respond to selection"},
230	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
231	"3100: Device bus error"},
232	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
233	"3109: IOA timed out a device command"},
234	{0x04088000, 0, 0,
235	"3120: SCSI bus is not operational"},
236	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
237	"4100: Hard device bus fabric error"},
238	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
239	"9000: IOA reserved area data check"},
240	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
241	"9001: IOA reserved area invalid data pattern"},
242	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
243	"9002: IOA reserved area LRC error"},
244	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
245	"102E: Out of alternate sectors for disk storage"},
246	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
247	"FFF4: Data transfer underlength error"},
248	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
249	"FFF4: Data transfer overlength error"},
250	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
251	"3400: Logical unit failure"},
252	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
253	"FFF4: Device microcode is corrupt"},
254	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
255	"8150: PCI bus error"},
256	{0x04430000, 1, 0,
257	"Unsupported device bus message received"},
258	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
259	"FFF4: Disk device problem"},
260	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
261	"8150: Permanent IOA failure"},
262	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
263	"3010: Disk device returned wrong response to IOA"},
264	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
265	"8151: IOA microcode error"},
266	{0x04448500, 0, 0,
267	"Device bus status error"},
268	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
269	"8157: IOA error requiring IOA reset to recover"},
270	{0x04448700, 0, 0,
271	"ATA device status error"},
272	{0x04490000, 0, 0,
273	"Message reject received from the device"},
274	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
275	"8008: A permanent cache battery pack failure occurred"},
276	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
277	"9090: Disk unit has been modified after the last known status"},
278	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
279	"9081: IOA detected device error"},
280	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
281	"9082: IOA detected device error"},
282	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
283	"3110: Device bus error, message or command phase"},
284	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
285	"3110: SAS Command / Task Management Function failed"},
286	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
287	"9091: Incorrect hardware configuration change has been detected"},
288	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
289	"9073: Invalid multi-adapter configuration"},
290	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
291	"4010: Incorrect connection between cascaded expanders"},
292	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
293	"4020: Connections exceed IOA design limits"},
294	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
295	"4030: Incorrect multipath connection"},
296	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
297	"4110: Unsupported enclosure function"},
298	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
299	"FFF4: Command to logical unit failed"},
300	{0x05240000, 1, 0,
301	"Illegal request, invalid request type or request packet"},
302	{0x05250000, 0, 0,
303	"Illegal request, invalid resource handle"},
304	{0x05258000, 0, 0,
305	"Illegal request, commands not allowed to this device"},
306	{0x05258100, 0, 0,
307	"Illegal request, command not allowed to a secondary adapter"},
308	{0x05260000, 0, 0,
309	"Illegal request, invalid field in parameter list"},
310	{0x05260100, 0, 0,
311	"Illegal request, parameter not supported"},
312	{0x05260200, 0, 0,
313	"Illegal request, parameter value invalid"},
314	{0x052C0000, 0, 0,
315	"Illegal request, command sequence error"},
316	{0x052C8000, 1, 0,
317	"Illegal request, dual adapter support not enabled"},
318	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
319	"9031: Array protection temporarily suspended, protection resuming"},
320	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
321	"9040: Array protection temporarily suspended, protection resuming"},
322	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
323	"3140: Device bus not ready to ready transition"},
324	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
325	"FFFB: SCSI bus was reset"},
326	{0x06290500, 0, 0,
327	"FFFE: SCSI bus transition to single ended"},
328	{0x06290600, 0, 0,
329	"FFFE: SCSI bus transition to LVD"},
330	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
331	"FFFB: SCSI bus was reset by another initiator"},
332	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
333	"3029: A device replacement has occurred"},
334	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
335	"9051: IOA cache data exists for a missing or failed device"},
336	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
337	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
338	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
339	"9025: Disk unit is not supported at its physical location"},
340	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
341	"3020: IOA detected a SCSI bus configuration error"},
342	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
343	"3150: SCSI bus configuration error"},
344	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
345	"9074: Asymmetric advanced function disk configuration"},
346	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
347	"4040: Incomplete multipath connection between IOA and enclosure"},
348	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
349	"4041: Incomplete multipath connection between enclosure and device"},
350	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
351	"9075: Incomplete multipath connection between IOA and remote IOA"},
352	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
353	"9076: Configuration error, missing remote IOA"},
354	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
355	"4050: Enclosure does not support a required multipath function"},
356	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
357	"9041: Array protection temporarily suspended"},
358	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
359	"9042: Corrupt array parity detected on specified device"},
360	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
361	"9030: Array no longer protected due to missing or failed disk unit"},
362	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
363	"9071: Link operational transition"},
364	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
365	"9072: Link not operational transition"},
366	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
367	"9032: Array exposed but still protected"},
368	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
369	"70DD: Device forced failed by disrupt device command"},
370	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
371	"4061: Multipath redundancy level got better"},
372	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
373	"4060: Multipath redundancy level got worse"},
374	{0x07270000, 0, 0,
375	"Failure due to other device"},
376	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
377	"9008: IOA does not support functions expected by devices"},
378	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
379	"9010: Cache data associated with attached devices cannot be found"},
380	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
381	"9011: Cache data belongs to devices other than those attached"},
382	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
383	"9020: Array missing 2 or more devices with only 1 device present"},
384	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
385	"9021: Array missing 2 or more devices with 2 or more devices present"},
386	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
387	"9022: Exposed array is missing a required device"},
388	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
389	"9023: Array member(s) not at required physical locations"},
390	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
391	"9024: Array not functional due to present hardware configuration"},
392	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
393	"9026: Array not functional due to present hardware configuration"},
394	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
395	"9027: Array is missing a device and parity is out of sync"},
396	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
397	"9028: Maximum number of arrays already exist"},
398	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
399	"9050: Required cache data cannot be located for a disk unit"},
400	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
401	"9052: Cache data exists for a device that has been modified"},
402	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
403	"9054: IOA resources not available due to previous problems"},
404	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
405	"9092: Disk unit requires initialization before use"},
406	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
407	"9029: Incorrect hardware configuration change has been detected"},
408	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
409	"9060: One or more disk pairs are missing from an array"},
410	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
411	"9061: One or more disks are missing from an array"},
412	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
413	"9062: One or more disks are missing from an array"},
414	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
415	"9063: Maximum number of functional arrays has been exceeded"},
416	{0x0B260000, 0, 0,
417	"Aborted command, invalid descriptor"},
418	{0x0B5A0000, 0, 0,
419	"Command terminated by host"}
420};
421
422static const struct ipr_ses_table_entry ipr_ses_table[] = {
423	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
424	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
425	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
426	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
427	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
428	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
429	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
430	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
431	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
432	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
433	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
434	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
435	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
436};
437
438/*
439 *  Function Prototypes
440 */
441static int ipr_reset_alert(struct ipr_cmnd *);
442static void ipr_process_ccn(struct ipr_cmnd *);
443static void ipr_process_error(struct ipr_cmnd *);
444static void ipr_reset_ioa_job(struct ipr_cmnd *);
445static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
446				   enum ipr_shutdown_type);
447
448#ifdef CONFIG_SCSI_IPR_TRACE
449/**
450 * ipr_trc_hook - Add a trace entry to the driver trace
451 * @ipr_cmd:	ipr command struct
452 * @type:		trace type
453 * @add_data:	additional data
454 *
455 * Return value:
456 * 	none
457 **/
458static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
459			 u8 type, u32 add_data)
460{
461	struct ipr_trace_entry *trace_entry;
462	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463
464	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
465	trace_entry->time = jiffies;
466	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
467	trace_entry->type = type;
468	trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
469	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
470	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
471	trace_entry->u.add_data = add_data;
472}
473#else
474#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
475#endif
476
477/**
478 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
479 * @ipr_cmd:	ipr command struct
480 *
481 * Return value:
482 * 	none
483 **/
484static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
485{
486	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
487	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
488	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
489
490	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
491	ioarcb->write_data_transfer_length = 0;
492	ioarcb->read_data_transfer_length = 0;
493	ioarcb->write_ioadl_len = 0;
494	ioarcb->read_ioadl_len = 0;
495	ioarcb->write_ioadl_addr =
496		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
497	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
498	ioasa->ioasc = 0;
499	ioasa->residual_data_len = 0;
500	ioasa->u.gata.status = 0;
501
502	ipr_cmd->scsi_cmd = NULL;
503	ipr_cmd->qc = NULL;
504	ipr_cmd->sense_buffer[0] = 0;
505	ipr_cmd->dma_use_sg = 0;
506}
507
508/**
509 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
510 * @ipr_cmd:	ipr command struct
511 *
512 * Return value:
513 * 	none
514 **/
515static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
516{
517	ipr_reinit_ipr_cmnd(ipr_cmd);
518	ipr_cmd->u.scratch = 0;
519	ipr_cmd->sibling = NULL;
520	init_timer(&ipr_cmd->timer);
521}
522
523/**
524 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
525 * @ioa_cfg:	ioa config struct
526 *
527 * Return value:
528 * 	pointer to ipr command struct
529 **/
530static
531struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
532{
533	struct ipr_cmnd *ipr_cmd;
534
535	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
536	list_del(&ipr_cmd->queue);
537	ipr_init_ipr_cmnd(ipr_cmd);
538
539	return ipr_cmd;
540}
541
542/**
543 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
544 * @ioa_cfg:	ioa config struct
545 * @clr_ints:     interrupts to clear
546 *
547 * This function masks all interrupts on the adapter, then clears the
548 * interrupts specified in the mask
549 *
550 * Return value:
551 * 	none
552 **/
553static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
554					  u32 clr_ints)
555{
556	volatile u32 int_reg;
557
558	/* Stop new interrupts */
559	ioa_cfg->allow_interrupts = 0;
560
561	/* Set interrupt mask to stop all new interrupts */
562	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
563
564	/* Clear any pending interrupts */
565	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
566	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
567}
568
569/**
570 * ipr_save_pcix_cmd_reg - Save PCI-X command register
571 * @ioa_cfg:	ioa config struct
572 *
573 * Return value:
574 * 	0 on success / -EIO on failure
575 **/
576static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
577{
578	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
579
580	if (pcix_cmd_reg == 0)
581		return 0;
582
583	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
584				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
585		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
586		return -EIO;
587	}
588
589	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
590	return 0;
591}
592
593/**
594 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
595 * @ioa_cfg:	ioa config struct
596 *
597 * Return value:
598 * 	0 on success / -EIO on failure
599 **/
600static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
601{
602	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
603
604	if (pcix_cmd_reg) {
605		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
606					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
607			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
608			return -EIO;
609		}
610	}
611
612	return 0;
613}
614
615/**
616 * ipr_sata_eh_done - done function for aborted SATA commands
617 * @ipr_cmd:	ipr command struct
618 *
619 * This function is invoked for ops generated to SATA
620 * devices which are being aborted.
621 *
622 * Return value:
623 * 	none
624 **/
625static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
626{
627	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
628	struct ata_queued_cmd *qc = ipr_cmd->qc;
629	struct ipr_sata_port *sata_port = qc->ap->private_data;
630
631	qc->err_mask |= AC_ERR_OTHER;
632	sata_port->ioasa.status |= ATA_BUSY;
633	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
634	ata_qc_complete(qc);
635}
636
637/**
638 * ipr_scsi_eh_done - mid-layer done function for aborted ops
639 * @ipr_cmd:	ipr command struct
640 *
641 * This function is invoked by the interrupt handler for
642 * ops generated by the SCSI mid-layer which are being aborted.
643 *
644 * Return value:
645 * 	none
646 **/
647static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
648{
649	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
650	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
651
652	scsi_cmd->result |= (DID_ERROR << 16);
653
654	scsi_dma_unmap(ipr_cmd->scsi_cmd);
655	scsi_cmd->scsi_done(scsi_cmd);
656	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
657}
658
659/**
660 * ipr_fail_all_ops - Fails all outstanding ops.
661 * @ioa_cfg:	ioa config struct
662 *
663 * This function fails all outstanding ops.
664 *
665 * Return value:
666 * 	none
667 **/
668static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
669{
670	struct ipr_cmnd *ipr_cmd, *temp;
671
672	ENTER;
673	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
674		list_del(&ipr_cmd->queue);
675
676		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
677		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
678
679		if (ipr_cmd->scsi_cmd)
680			ipr_cmd->done = ipr_scsi_eh_done;
681		else if (ipr_cmd->qc)
682			ipr_cmd->done = ipr_sata_eh_done;
683
684		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
685		del_timer(&ipr_cmd->timer);
686		ipr_cmd->done(ipr_cmd);
687	}
688
689	LEAVE;
690}
691
692/**
693 * ipr_do_req -  Send driver initiated requests.
694 * @ipr_cmd:		ipr command struct
695 * @done:			done function
696 * @timeout_func:	timeout function
697 * @timeout:		timeout value
698 *
699 * This function sends the specified command to the adapter with the
700 * timeout given. The done function is invoked on command completion.
701 *
702 * Return value:
703 * 	none
704 **/
705static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
706		       void (*done) (struct ipr_cmnd *),
707		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
708{
709	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
710
711	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
712
713	ipr_cmd->done = done;
714
715	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
716	ipr_cmd->timer.expires = jiffies + timeout;
717	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
718
719	add_timer(&ipr_cmd->timer);
720
721	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
722
723	mb();
724	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
725	       ioa_cfg->regs.ioarrin_reg);
726}
727
728/**
729 * ipr_internal_cmd_done - Op done function for an internally generated op.
730 * @ipr_cmd:	ipr command struct
731 *
732 * This function is the op done function for an internally generated,
733 * blocking op. It simply wakes the sleeping thread.
734 *
735 * Return value:
736 * 	none
737 **/
738static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
739{
740	if (ipr_cmd->sibling)
741		ipr_cmd->sibling = NULL;
742	else
743		complete(&ipr_cmd->completion);
744}
745
746/**
747 * ipr_send_blocking_cmd - Send command and sleep on its completion.
748 * @ipr_cmd:	ipr command struct
749 * @timeout_func:	function to invoke if command times out
750 * @timeout:	timeout
751 *
752 * Return value:
753 * 	none
754 **/
755static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
756				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
757				  u32 timeout)
758{
759	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
760
761	init_completion(&ipr_cmd->completion);
762	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
763
764	spin_unlock_irq(ioa_cfg->host->host_lock);
765	wait_for_completion(&ipr_cmd->completion);
766	spin_lock_irq(ioa_cfg->host->host_lock);
767}
768
769/**
770 * ipr_send_hcam - Send an HCAM to the adapter.
771 * @ioa_cfg:	ioa config struct
772 * @type:		HCAM type
773 * @hostrcb:	hostrcb struct
774 *
775 * This function will send a Host Controlled Async command to the adapter.
776 * If HCAMs are currently not allowed to be issued to the adapter, it will
777 * place the hostrcb on the free queue.
778 *
779 * Return value:
780 * 	none
781 **/
782static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
783			  struct ipr_hostrcb *hostrcb)
784{
785	struct ipr_cmnd *ipr_cmd;
786	struct ipr_ioarcb *ioarcb;
787
788	if (ioa_cfg->allow_cmds) {
789		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
790		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
791		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
792
793		ipr_cmd->u.hostrcb = hostrcb;
794		ioarcb = &ipr_cmd->ioarcb;
795
796		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
797		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
798		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
799		ioarcb->cmd_pkt.cdb[1] = type;
800		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
801		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
802
803		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
804		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
805		ipr_cmd->ioadl[0].flags_and_data_len =
806			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
807		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
808
809		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
810			ipr_cmd->done = ipr_process_ccn;
811		else
812			ipr_cmd->done = ipr_process_error;
813
814		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
815
816		mb();
817		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
818		       ioa_cfg->regs.ioarrin_reg);
819	} else {
820		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
821	}
822}
823
824/**
825 * ipr_init_res_entry - Initialize a resource entry struct.
826 * @res:	resource entry struct
827 *
828 * Return value:
829 * 	none
830 **/
831static void ipr_init_res_entry(struct ipr_resource_entry *res)
832{
833	res->needs_sync_complete = 0;
834	res->in_erp = 0;
835	res->add_to_ml = 0;
836	res->del_from_ml = 0;
837	res->resetting_device = 0;
838	res->sdev = NULL;
839	res->sata_port = NULL;
840}
841
842/**
843 * ipr_handle_config_change - Handle a config change from the adapter
844 * @ioa_cfg:	ioa config struct
845 * @hostrcb:	hostrcb
846 *
847 * Return value:
848 * 	none
849 **/
850static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
851			      struct ipr_hostrcb *hostrcb)
852{
853	struct ipr_resource_entry *res = NULL;
854	struct ipr_config_table_entry *cfgte;
855	u32 is_ndn = 1;
856
857	cfgte = &hostrcb->hcam.u.ccn.cfgte;
858
859	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
860		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
861			    sizeof(cfgte->res_addr))) {
862			is_ndn = 0;
863			break;
864		}
865	}
866
867	if (is_ndn) {
868		if (list_empty(&ioa_cfg->free_res_q)) {
869			ipr_send_hcam(ioa_cfg,
870				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
871				      hostrcb);
872			return;
873		}
874
875		res = list_entry(ioa_cfg->free_res_q.next,
876				 struct ipr_resource_entry, queue);
877
878		list_del(&res->queue);
879		ipr_init_res_entry(res);
880		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
881	}
882
883	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
884
885	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
886		if (res->sdev) {
887			res->del_from_ml = 1;
888			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
889			if (ioa_cfg->allow_ml_add_del)
890				schedule_work(&ioa_cfg->work_q);
891		} else
892			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
893	} else if (!res->sdev) {
894		res->add_to_ml = 1;
895		if (ioa_cfg->allow_ml_add_del)
896			schedule_work(&ioa_cfg->work_q);
897	}
898
899	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
900}
901
902/**
903 * ipr_process_ccn - Op done function for a CCN.
904 * @ipr_cmd:	ipr command struct
905 *
906 * This function is the op done function for a configuration
907 * change notification host controlled async from the adapter.
908 *
909 * Return value:
910 * 	none
911 **/
912static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
913{
914	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
915	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
916	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
917
918	list_del(&hostrcb->queue);
919	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
920
921	if (ioasc) {
922		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
923			dev_err(&ioa_cfg->pdev->dev,
924				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
925
926		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
927	} else {
928		ipr_handle_config_change(ioa_cfg, hostrcb);
929	}
930}
931
932/**
933 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
934 * @i:		index into buffer
935 * @buf:		string to modify
936 *
937 * This function will strip all trailing whitespace, pad the end
938 * of the string with a single space, and NULL terminate the string.
939 *
940 * Return value:
941 * 	new length of string
942 **/
943static int strip_and_pad_whitespace(int i, char *buf)
944{
945	while (i && buf[i] == ' ')
946		i--;
947	buf[i+1] = ' ';
948	buf[i+2] = '\0';
949	return i + 2;
950}
951
952/**
953 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
954 * @prefix:		string to print at start of printk
955 * @hostrcb:	hostrcb pointer
956 * @vpd:		vendor/product id/sn struct
957 *
958 * Return value:
959 * 	none
960 **/
961static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
962				struct ipr_vpd *vpd)
963{
964	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
965	int i = 0;
966
967	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
968	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
969
970	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
971	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
972
973	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
974	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
975
976	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
977}
978
979/**
980 * ipr_log_vpd - Log the passed VPD to the error log.
981 * @vpd:		vendor/product id/sn struct
982 *
983 * Return value:
984 * 	none
985 **/
986static void ipr_log_vpd(struct ipr_vpd *vpd)
987{
988	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
989		    + IPR_SERIAL_NUM_LEN];
990
991	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
992	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
993	       IPR_PROD_ID_LEN);
994	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
995	ipr_err("Vendor/Product ID: %s\n", buffer);
996
997	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
998	buffer[IPR_SERIAL_NUM_LEN] = '\0';
999	ipr_err("    Serial Number: %s\n", buffer);
1000}
1001
1002/**
1003 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1004 * @prefix:		string to print at start of printk
1005 * @hostrcb:	hostrcb pointer
1006 * @vpd:		vendor/product id/sn/wwn struct
1007 *
1008 * Return value:
1009 * 	none
1010 **/
1011static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1012				    struct ipr_ext_vpd *vpd)
1013{
1014	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1015	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1016		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1017}
1018
1019/**
1020 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1021 * @vpd:		vendor/product id/sn/wwn struct
1022 *
1023 * Return value:
1024 * 	none
1025 **/
1026static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1027{
1028	ipr_log_vpd(&vpd->vpd);
1029	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1030		be32_to_cpu(vpd->wwid[1]));
1031}
1032
1033/**
1034 * ipr_log_enhanced_cache_error - Log a cache error.
1035 * @ioa_cfg:	ioa config struct
1036 * @hostrcb:	hostrcb struct
1037 *
1038 * Return value:
1039 * 	none
1040 **/
1041static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1042					 struct ipr_hostrcb *hostrcb)
1043{
1044	struct ipr_hostrcb_type_12_error *error =
1045		&hostrcb->hcam.u.error.u.type_12_error;
1046
1047	ipr_err("-----Current Configuration-----\n");
1048	ipr_err("Cache Directory Card Information:\n");
1049	ipr_log_ext_vpd(&error->ioa_vpd);
1050	ipr_err("Adapter Card Information:\n");
1051	ipr_log_ext_vpd(&error->cfc_vpd);
1052
1053	ipr_err("-----Expected Configuration-----\n");
1054	ipr_err("Cache Directory Card Information:\n");
1055	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1056	ipr_err("Adapter Card Information:\n");
1057	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1058
1059	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1060		     be32_to_cpu(error->ioa_data[0]),
1061		     be32_to_cpu(error->ioa_data[1]),
1062		     be32_to_cpu(error->ioa_data[2]));
1063}
1064
1065/**
1066 * ipr_log_cache_error - Log a cache error.
1067 * @ioa_cfg:	ioa config struct
1068 * @hostrcb:	hostrcb struct
1069 *
1070 * Return value:
1071 * 	none
1072 **/
1073static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1074				struct ipr_hostrcb *hostrcb)
1075{
1076	struct ipr_hostrcb_type_02_error *error =
1077		&hostrcb->hcam.u.error.u.type_02_error;
1078
1079	ipr_err("-----Current Configuration-----\n");
1080	ipr_err("Cache Directory Card Information:\n");
1081	ipr_log_vpd(&error->ioa_vpd);
1082	ipr_err("Adapter Card Information:\n");
1083	ipr_log_vpd(&error->cfc_vpd);
1084
1085	ipr_err("-----Expected Configuration-----\n");
1086	ipr_err("Cache Directory Card Information:\n");
1087	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1088	ipr_err("Adapter Card Information:\n");
1089	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1090
1091	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1092		     be32_to_cpu(error->ioa_data[0]),
1093		     be32_to_cpu(error->ioa_data[1]),
1094		     be32_to_cpu(error->ioa_data[2]));
1095}
1096
1097/**
1098 * ipr_log_enhanced_config_error - Log a configuration error.
1099 * @ioa_cfg:	ioa config struct
1100 * @hostrcb:	hostrcb struct
1101 *
1102 * Return value:
1103 * 	none
1104 **/
1105static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1106					  struct ipr_hostrcb *hostrcb)
1107{
1108	int errors_logged, i;
1109	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1110	struct ipr_hostrcb_type_13_error *error;
1111
1112	error = &hostrcb->hcam.u.error.u.type_13_error;
1113	errors_logged = be32_to_cpu(error->errors_logged);
1114
1115	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1116		be32_to_cpu(error->errors_detected), errors_logged);
1117
1118	dev_entry = error->dev;
1119
1120	for (i = 0; i < errors_logged; i++, dev_entry++) {
1121		ipr_err_separator;
1122
1123		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1124		ipr_log_ext_vpd(&dev_entry->vpd);
1125
1126		ipr_err("-----New Device Information-----\n");
1127		ipr_log_ext_vpd(&dev_entry->new_vpd);
1128
1129		ipr_err("Cache Directory Card Information:\n");
1130		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1131
1132		ipr_err("Adapter Card Information:\n");
1133		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1134	}
1135}
1136
1137/**
1138 * ipr_log_config_error - Log a configuration error.
1139 * @ioa_cfg:	ioa config struct
1140 * @hostrcb:	hostrcb struct
1141 *
1142 * Return value:
1143 * 	none
1144 **/
1145static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1146				 struct ipr_hostrcb *hostrcb)
1147{
1148	int errors_logged, i;
1149	struct ipr_hostrcb_device_data_entry *dev_entry;
1150	struct ipr_hostrcb_type_03_error *error;
1151
1152	error = &hostrcb->hcam.u.error.u.type_03_error;
1153	errors_logged = be32_to_cpu(error->errors_logged);
1154
1155	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1156		be32_to_cpu(error->errors_detected), errors_logged);
1157
1158	dev_entry = error->dev;
1159
1160	for (i = 0; i < errors_logged; i++, dev_entry++) {
1161		ipr_err_separator;
1162
1163		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1164		ipr_log_vpd(&dev_entry->vpd);
1165
1166		ipr_err("-----New Device Information-----\n");
1167		ipr_log_vpd(&dev_entry->new_vpd);
1168
1169		ipr_err("Cache Directory Card Information:\n");
1170		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1171
1172		ipr_err("Adapter Card Information:\n");
1173		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1174
1175		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1176			be32_to_cpu(dev_entry->ioa_data[0]),
1177			be32_to_cpu(dev_entry->ioa_data[1]),
1178			be32_to_cpu(dev_entry->ioa_data[2]),
1179			be32_to_cpu(dev_entry->ioa_data[3]),
1180			be32_to_cpu(dev_entry->ioa_data[4]));
1181	}
1182}
1183
1184/**
1185 * ipr_log_enhanced_array_error - Log an array configuration error.
1186 * @ioa_cfg:	ioa config struct
1187 * @hostrcb:	hostrcb struct
1188 *
1189 * Return value:
1190 * 	none
1191 **/
1192static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1193					 struct ipr_hostrcb *hostrcb)
1194{
1195	int i, num_entries;
1196	struct ipr_hostrcb_type_14_error *error;
1197	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1198	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1199
1200	error = &hostrcb->hcam.u.error.u.type_14_error;
1201
1202	ipr_err_separator;
1203
1204	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1205		error->protection_level,
1206		ioa_cfg->host->host_no,
1207		error->last_func_vset_res_addr.bus,
1208		error->last_func_vset_res_addr.target,
1209		error->last_func_vset_res_addr.lun);
1210
1211	ipr_err_separator;
1212
1213	array_entry = error->array_member;
1214	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1215			    sizeof(error->array_member));
1216
1217	for (i = 0; i < num_entries; i++, array_entry++) {
1218		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1219			continue;
1220
1221		if (be32_to_cpu(error->exposed_mode_adn) == i)
1222			ipr_err("Exposed Array Member %d:\n", i);
1223		else
1224			ipr_err("Array Member %d:\n", i);
1225
1226		ipr_log_ext_vpd(&array_entry->vpd);
1227		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1228		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1229				 "Expected Location");
1230
1231		ipr_err_separator;
1232	}
1233}
1234
1235/**
1236 * ipr_log_array_error - Log an array configuration error.
1237 * @ioa_cfg:	ioa config struct
1238 * @hostrcb:	hostrcb struct
1239 *
1240 * Return value:
1241 * 	none
1242 **/
1243static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1244				struct ipr_hostrcb *hostrcb)
1245{
1246	int i;
1247	struct ipr_hostrcb_type_04_error *error;
1248	struct ipr_hostrcb_array_data_entry *array_entry;
1249	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1250
1251	error = &hostrcb->hcam.u.error.u.type_04_error;
1252
1253	ipr_err_separator;
1254
1255	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1256		error->protection_level,
1257		ioa_cfg->host->host_no,
1258		error->last_func_vset_res_addr.bus,
1259		error->last_func_vset_res_addr.target,
1260		error->last_func_vset_res_addr.lun);
1261
1262	ipr_err_separator;
1263
1264	array_entry = error->array_member;
1265
1266	for (i = 0; i < 18; i++) {
1267		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1268			continue;
1269
1270		if (be32_to_cpu(error->exposed_mode_adn) == i)
1271			ipr_err("Exposed Array Member %d:\n", i);
1272		else
1273			ipr_err("Array Member %d:\n", i);
1274
1275		ipr_log_vpd(&array_entry->vpd);
1276
1277		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1278		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1279				 "Expected Location");
1280
1281		ipr_err_separator;
1282
1283		if (i == 9)
1284			array_entry = error->array_member2;
1285		else
1286			array_entry++;
1287	}
1288}
1289
1290/**
1291 * ipr_log_hex_data - Log additional hex IOA error data.
1292 * @ioa_cfg:	ioa config struct
1293 * @data:		IOA error data
1294 * @len:		data length
1295 *
1296 * Return value:
1297 * 	none
1298 **/
1299static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1300{
1301	int i;
1302
1303	if (len == 0)
1304		return;
1305
1306	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1307		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1308
1309	for (i = 0; i < len / 4; i += 4) {
1310		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1311			be32_to_cpu(data[i]),
1312			be32_to_cpu(data[i+1]),
1313			be32_to_cpu(data[i+2]),
1314			be32_to_cpu(data[i+3]));
1315	}
1316}
1317
1318/**
1319 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1320 * @ioa_cfg:	ioa config struct
1321 * @hostrcb:	hostrcb struct
1322 *
1323 * Return value:
1324 * 	none
1325 **/
1326static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1327					    struct ipr_hostrcb *hostrcb)
1328{
1329	struct ipr_hostrcb_type_17_error *error;
1330
1331	error = &hostrcb->hcam.u.error.u.type_17_error;
1332	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1333	strstrip(error->failure_reason);
1334
1335	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1336		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1337	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1338	ipr_log_hex_data(ioa_cfg, error->data,
1339			 be32_to_cpu(hostrcb->hcam.length) -
1340			 (offsetof(struct ipr_hostrcb_error, u) +
1341			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1342}
1343
1344/**
1345 * ipr_log_dual_ioa_error - Log a dual adapter error.
1346 * @ioa_cfg:	ioa config struct
1347 * @hostrcb:	hostrcb struct
1348 *
1349 * Return value:
1350 * 	none
1351 **/
1352static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1353				   struct ipr_hostrcb *hostrcb)
1354{
1355	struct ipr_hostrcb_type_07_error *error;
1356
1357	error = &hostrcb->hcam.u.error.u.type_07_error;
1358	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1359	strstrip(error->failure_reason);
1360
1361	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1362		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1363	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1364	ipr_log_hex_data(ioa_cfg, error->data,
1365			 be32_to_cpu(hostrcb->hcam.length) -
1366			 (offsetof(struct ipr_hostrcb_error, u) +
1367			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1368}
1369
1370static const struct {
1371	u8 active;
1372	char *desc;
1373} path_active_desc[] = {
1374	{ IPR_PATH_NO_INFO, "Path" },
1375	{ IPR_PATH_ACTIVE, "Active path" },
1376	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1377};
1378
1379static const struct {
1380	u8 state;
1381	char *desc;
1382} path_state_desc[] = {
1383	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1384	{ IPR_PATH_HEALTHY, "is healthy" },
1385	{ IPR_PATH_DEGRADED, "is degraded" },
1386	{ IPR_PATH_FAILED, "is failed" }
1387};
1388
1389/**
1390 * ipr_log_fabric_path - Log a fabric path error
1391 * @hostrcb:	hostrcb struct
1392 * @fabric:		fabric descriptor
1393 *
1394 * Return value:
1395 * 	none
1396 **/
1397static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1398				struct ipr_hostrcb_fabric_desc *fabric)
1399{
1400	int i, j;
1401	u8 path_state = fabric->path_state;
1402	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1403	u8 state = path_state & IPR_PATH_STATE_MASK;
1404
1405	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1406		if (path_active_desc[i].active != active)
1407			continue;
1408
1409		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1410			if (path_state_desc[j].state != state)
1411				continue;
1412
1413			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1414				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1415					     path_active_desc[i].desc, path_state_desc[j].desc,
1416					     fabric->ioa_port);
1417			} else if (fabric->cascaded_expander == 0xff) {
1418				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1419					     path_active_desc[i].desc, path_state_desc[j].desc,
1420					     fabric->ioa_port, fabric->phy);
1421			} else if (fabric->phy == 0xff) {
1422				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1423					     path_active_desc[i].desc, path_state_desc[j].desc,
1424					     fabric->ioa_port, fabric->cascaded_expander);
1425			} else {
1426				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1427					     path_active_desc[i].desc, path_state_desc[j].desc,
1428					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1429			}
1430			return;
1431		}
1432	}
1433
1434	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1435		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1436}
1437
1438static const struct {
1439	u8 type;
1440	char *desc;
1441} path_type_desc[] = {
1442	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1443	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1444	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1445	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1446};
1447
1448static const struct {
1449	u8 status;
1450	char *desc;
1451} path_status_desc[] = {
1452	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1453	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1454	{ IPR_PATH_CFG_FAILED, "Failed" },
1455	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1456	{ IPR_PATH_NOT_DETECTED, "Missing" },
1457	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1458};
1459
1460static const char *link_rate[] = {
1461	"unknown",
1462	"disabled",
1463	"phy reset problem",
1464	"spinup hold",
1465	"port selector",
1466	"unknown",
1467	"unknown",
1468	"unknown",
1469	"1.5Gbps",
1470	"3.0Gbps",
1471	"unknown",
1472	"unknown",
1473	"unknown",
1474	"unknown",
1475	"unknown",
1476	"unknown"
1477};
1478
1479/**
1480 * ipr_log_path_elem - Log a fabric path element.
1481 * @hostrcb:	hostrcb struct
1482 * @cfg:		fabric path element struct
1483 *
1484 * Return value:
1485 * 	none
1486 **/
1487static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1488			      struct ipr_hostrcb_config_element *cfg)
1489{
1490	int i, j;
1491	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1492	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1493
1494	if (type == IPR_PATH_CFG_NOT_EXIST)
1495		return;
1496
1497	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1498		if (path_type_desc[i].type != type)
1499			continue;
1500
1501		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1502			if (path_status_desc[j].status != status)
1503				continue;
1504
1505			if (type == IPR_PATH_CFG_IOA_PORT) {
1506				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1507					     path_status_desc[j].desc, path_type_desc[i].desc,
1508					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1509					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1510			} else {
1511				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1512					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1513						     path_status_desc[j].desc, path_type_desc[i].desc,
1514						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1515						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1516				} else if (cfg->cascaded_expander == 0xff) {
1517					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1518						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1519						     path_type_desc[i].desc, cfg->phy,
1520						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1521						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1522				} else if (cfg->phy == 0xff) {
1523					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1524						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1525						     path_type_desc[i].desc, cfg->cascaded_expander,
1526						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1527						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1528				} else {
1529					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1530						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1531						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1532						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1533						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1534				}
1535			}
1536			return;
1537		}
1538	}
1539
1540	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1541		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1542		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1543		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1544}
1545
1546/**
1547 * ipr_log_fabric_error - Log a fabric error.
1548 * @ioa_cfg:	ioa config struct
1549 * @hostrcb:	hostrcb struct
1550 *
1551 * Return value:
1552 * 	none
1553 **/
1554static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1555				 struct ipr_hostrcb *hostrcb)
1556{
1557	struct ipr_hostrcb_type_20_error *error;
1558	struct ipr_hostrcb_fabric_desc *fabric;
1559	struct ipr_hostrcb_config_element *cfg;
1560	int i, add_len;
1561
1562	error = &hostrcb->hcam.u.error.u.type_20_error;
1563	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1564	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1565
1566	add_len = be32_to_cpu(hostrcb->hcam.length) -
1567		(offsetof(struct ipr_hostrcb_error, u) +
1568		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1569
1570	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1571		ipr_log_fabric_path(hostrcb, fabric);
1572		for_each_fabric_cfg(fabric, cfg)
1573			ipr_log_path_elem(hostrcb, cfg);
1574
1575		add_len -= be16_to_cpu(fabric->length);
1576		fabric = (struct ipr_hostrcb_fabric_desc *)
1577			((unsigned long)fabric + be16_to_cpu(fabric->length));
1578	}
1579
1580	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1581}
1582
1583/**
1584 * ipr_log_generic_error - Log an adapter error.
1585 * @ioa_cfg:	ioa config struct
1586 * @hostrcb:	hostrcb struct
1587 *
1588 * Return value:
1589 * 	none
1590 **/
1591static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1592				  struct ipr_hostrcb *hostrcb)
1593{
1594	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1595			 be32_to_cpu(hostrcb->hcam.length));
1596}
1597
1598/**
1599 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1600 * @ioasc:	IOASC
1601 *
1602 * This function will return the index of into the ipr_error_table
1603 * for the specified IOASC. If the IOASC is not in the table,
1604 * 0 will be returned, which points to the entry used for unknown errors.
1605 *
1606 * Return value:
1607 * 	index into the ipr_error_table
1608 **/
1609static u32 ipr_get_error(u32 ioasc)
1610{
1611	int i;
1612
1613	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1614		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1615			return i;
1616
1617	return 0;
1618}
1619
1620/**
1621 * ipr_handle_log_data - Log an adapter error.
1622 * @ioa_cfg:	ioa config struct
1623 * @hostrcb:	hostrcb struct
1624 *
1625 * This function logs an adapter error to the system.
1626 *
1627 * Return value:
1628 * 	none
1629 **/
1630static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1631				struct ipr_hostrcb *hostrcb)
1632{
1633	u32 ioasc;
1634	int error_index;
1635
1636	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1637		return;
1638
1639	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1640		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1641
1642	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1643
1644	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1645	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1646		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1647		scsi_report_bus_reset(ioa_cfg->host,
1648				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1649	}
1650
1651	error_index = ipr_get_error(ioasc);
1652
1653	if (!ipr_error_table[error_index].log_hcam)
1654		return;
1655
1656	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1657
1658	/* Set indication we have logged an error */
1659	ioa_cfg->errors_logged++;
1660
1661	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1662		return;
1663	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1664		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1665
1666	switch (hostrcb->hcam.overlay_id) {
1667	case IPR_HOST_RCB_OVERLAY_ID_2:
1668		ipr_log_cache_error(ioa_cfg, hostrcb);
1669		break;
1670	case IPR_HOST_RCB_OVERLAY_ID_3:
1671		ipr_log_config_error(ioa_cfg, hostrcb);
1672		break;
1673	case IPR_HOST_RCB_OVERLAY_ID_4:
1674	case IPR_HOST_RCB_OVERLAY_ID_6:
1675		ipr_log_array_error(ioa_cfg, hostrcb);
1676		break;
1677	case IPR_HOST_RCB_OVERLAY_ID_7:
1678		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1679		break;
1680	case IPR_HOST_RCB_OVERLAY_ID_12:
1681		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1682		break;
1683	case IPR_HOST_RCB_OVERLAY_ID_13:
1684		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1685		break;
1686	case IPR_HOST_RCB_OVERLAY_ID_14:
1687	case IPR_HOST_RCB_OVERLAY_ID_16:
1688		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1689		break;
1690	case IPR_HOST_RCB_OVERLAY_ID_17:
1691		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1692		break;
1693	case IPR_HOST_RCB_OVERLAY_ID_20:
1694		ipr_log_fabric_error(ioa_cfg, hostrcb);
1695		break;
1696	case IPR_HOST_RCB_OVERLAY_ID_1:
1697	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1698	default:
1699		ipr_log_generic_error(ioa_cfg, hostrcb);
1700		break;
1701	}
1702}
1703
1704/**
1705 * ipr_process_error - Op done function for an adapter error log.
1706 * @ipr_cmd:	ipr command struct
1707 *
1708 * This function is the op done function for an error log host
1709 * controlled async from the adapter. It will log the error and
1710 * send the HCAM back to the adapter.
1711 *
1712 * Return value:
1713 * 	none
1714 **/
1715static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1716{
1717	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1718	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1719	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1720	u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1721
1722	list_del(&hostrcb->queue);
1723	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1724
1725	if (!ioasc) {
1726		ipr_handle_log_data(ioa_cfg, hostrcb);
1727		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1728			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1729	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1730		dev_err(&ioa_cfg->pdev->dev,
1731			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1732	}
1733
1734	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1735}
1736
1737/**
1738 * ipr_timeout -  An internally generated op has timed out.
1739 * @ipr_cmd:	ipr command struct
1740 *
1741 * This function blocks host requests and initiates an
1742 * adapter reset.
1743 *
1744 * Return value:
1745 * 	none
1746 **/
1747static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1748{
1749	unsigned long lock_flags = 0;
1750	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1751
1752	ENTER;
1753	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1754
1755	ioa_cfg->errors_logged++;
1756	dev_err(&ioa_cfg->pdev->dev,
1757		"Adapter being reset due to command timeout.\n");
1758
1759	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1760		ioa_cfg->sdt_state = GET_DUMP;
1761
1762	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1763		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1764
1765	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1766	LEAVE;
1767}
1768
1769/**
1770 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1771 * @ipr_cmd:	ipr command struct
1772 *
1773 * This function blocks host requests and initiates an
1774 * adapter reset.
1775 *
1776 * Return value:
1777 * 	none
1778 **/
1779static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1780{
1781	unsigned long lock_flags = 0;
1782	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1783
1784	ENTER;
1785	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1786
1787	ioa_cfg->errors_logged++;
1788	dev_err(&ioa_cfg->pdev->dev,
1789		"Adapter timed out transitioning to operational.\n");
1790
1791	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1792		ioa_cfg->sdt_state = GET_DUMP;
1793
1794	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1795		if (ipr_fastfail)
1796			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1797		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1798	}
1799
1800	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1801	LEAVE;
1802}
1803
1804/**
1805 * ipr_reset_reload - Reset/Reload the IOA
1806 * @ioa_cfg:		ioa config struct
1807 * @shutdown_type:	shutdown type
1808 *
1809 * This function resets the adapter and re-initializes it.
1810 * This function assumes that all new host commands have been stopped.
1811 * Return value:
1812 * 	SUCCESS / FAILED
1813 **/
1814static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1815			    enum ipr_shutdown_type shutdown_type)
1816{
1817	if (!ioa_cfg->in_reset_reload)
1818		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1819
1820	spin_unlock_irq(ioa_cfg->host->host_lock);
1821	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1822	spin_lock_irq(ioa_cfg->host->host_lock);
1823
1824	/* If we got hit with a host reset while we were already resetting
1825	 the adapter for some reason, and the reset failed. */
1826	if (ioa_cfg->ioa_is_dead) {
1827		ipr_trace;
1828		return FAILED;
1829	}
1830
1831	return SUCCESS;
1832}
1833
1834/**
1835 * ipr_find_ses_entry - Find matching SES in SES table
1836 * @res:	resource entry struct of SES
1837 *
1838 * Return value:
1839 * 	pointer to SES table entry / NULL on failure
1840 **/
1841static const struct ipr_ses_table_entry *
1842ipr_find_ses_entry(struct ipr_resource_entry *res)
1843{
1844	int i, j, matches;
1845	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1846
1847	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1848		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1849			if (ste->compare_product_id_byte[j] == 'X') {
1850				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1851					matches++;
1852				else
1853					break;
1854			} else
1855				matches++;
1856		}
1857
1858		if (matches == IPR_PROD_ID_LEN)
1859			return ste;
1860	}
1861
1862	return NULL;
1863}
1864
1865/**
1866 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1867 * @ioa_cfg:	ioa config struct
1868 * @bus:		SCSI bus
1869 * @bus_width:	bus width
1870 *
1871 * Return value:
1872 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1873 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1874 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1875 *	max 160MHz = max 320MB/sec).
1876 **/
1877static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1878{
1879	struct ipr_resource_entry *res;
1880	const struct ipr_ses_table_entry *ste;
1881	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1882
1883	/* Loop through each config table entry in the config table buffer */
1884	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1885		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1886			continue;
1887
1888		if (bus != res->cfgte.res_addr.bus)
1889			continue;
1890
1891		if (!(ste = ipr_find_ses_entry(res)))
1892			continue;
1893
1894		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1895	}
1896
1897	return max_xfer_rate;
1898}
1899
1900/**
1901 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1902 * @ioa_cfg:		ioa config struct
1903 * @max_delay:		max delay in micro-seconds to wait
1904 *
1905 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1906 *
1907 * Return value:
1908 * 	0 on success / other on failure
1909 **/
1910static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1911{
1912	volatile u32 pcii_reg;
1913	int delay = 1;
1914
1915	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1916	while (delay < max_delay) {
1917		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1918
1919		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1920			return 0;
1921
1922		/* udelay cannot be used if delay is more than a few milliseconds */
1923		if ((delay / 1000) > MAX_UDELAY_MS)
1924			mdelay(delay / 1000);
1925		else
1926			udelay(delay);
1927
1928		delay += delay;
1929	}
1930	return -EIO;
1931}
1932
1933/**
1934 * ipr_get_ldump_data_section - Dump IOA memory
1935 * @ioa_cfg:			ioa config struct
1936 * @start_addr:			adapter address to dump
1937 * @dest:				destination kernel buffer
1938 * @length_in_words:	length to dump in 4 byte words
1939 *
1940 * Return value:
1941 * 	0 on success / -EIO on failure
1942 **/
1943static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1944				      u32 start_addr,
1945				      __be32 *dest, u32 length_in_words)
1946{
1947	volatile u32 temp_pcii_reg;
1948	int i, delay = 0;
1949
1950	/* Write IOA interrupt reg starting LDUMP state  */
1951	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1952	       ioa_cfg->regs.set_uproc_interrupt_reg);
1953
1954	/* Wait for IO debug acknowledge */
1955	if (ipr_wait_iodbg_ack(ioa_cfg,
1956			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1957		dev_err(&ioa_cfg->pdev->dev,
1958			"IOA dump long data transfer timeout\n");
1959		return -EIO;
1960	}
1961
1962	/* Signal LDUMP interlocked - clear IO debug ack */
1963	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1964	       ioa_cfg->regs.clr_interrupt_reg);
1965
1966	/* Write Mailbox with starting address */
1967	writel(start_addr, ioa_cfg->ioa_mailbox);
1968
1969	/* Signal address valid - clear IOA Reset alert */
1970	writel(IPR_UPROCI_RESET_ALERT,
1971	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1972
1973	for (i = 0; i < length_in_words; i++) {
1974		/* Wait for IO debug acknowledge */
1975		if (ipr_wait_iodbg_ack(ioa_cfg,
1976				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1977			dev_err(&ioa_cfg->pdev->dev,
1978				"IOA dump short data transfer timeout\n");
1979			return -EIO;
1980		}
1981
1982		/* Read data from mailbox and increment destination pointer */
1983		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1984		dest++;
1985
1986		/* For all but the last word of data, signal data received */
1987		if (i < (length_in_words - 1)) {
1988			/* Signal dump data received - Clear IO debug Ack */
1989			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1990			       ioa_cfg->regs.clr_interrupt_reg);
1991		}
1992	}
1993
1994	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1995	writel(IPR_UPROCI_RESET_ALERT,
1996	       ioa_cfg->regs.set_uproc_interrupt_reg);
1997
1998	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1999	       ioa_cfg->regs.clr_uproc_interrupt_reg);
2000
2001	/* Signal dump data received - Clear IO debug Ack */
2002	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2003	       ioa_cfg->regs.clr_interrupt_reg);
2004
2005	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2006	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2007		temp_pcii_reg =
2008		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2009
2010		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2011			return 0;
2012
2013		udelay(10);
2014		delay += 10;
2015	}
2016
2017	return 0;
2018}
2019
2020#ifdef CONFIG_SCSI_IPR_DUMP
2021/**
2022 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2023 * @ioa_cfg:		ioa config struct
2024 * @pci_address:	adapter address
2025 * @length:			length of data to copy
2026 *
2027 * Copy data from PCI adapter to kernel buffer.
2028 * Note: length MUST be a 4 byte multiple
2029 * Return value:
2030 * 	0 on success / other on failure
2031 **/
2032static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2033			unsigned long pci_address, u32 length)
2034{
2035	int bytes_copied = 0;
2036	int cur_len, rc, rem_len, rem_page_len;
2037	__be32 *page;
2038	unsigned long lock_flags = 0;
2039	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2040
2041	while (bytes_copied < length &&
2042	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2043		if (ioa_dump->page_offset >= PAGE_SIZE ||
2044		    ioa_dump->page_offset == 0) {
2045			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2046
2047			if (!page) {
2048				ipr_trace;
2049				return bytes_copied;
2050			}
2051
2052			ioa_dump->page_offset = 0;
2053			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2054			ioa_dump->next_page_index++;
2055		} else
2056			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2057
2058		rem_len = length - bytes_copied;
2059		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2060		cur_len = min(rem_len, rem_page_len);
2061
2062		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2063		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2064			rc = -EIO;
2065		} else {
2066			rc = ipr_get_ldump_data_section(ioa_cfg,
2067							pci_address + bytes_copied,
2068							&page[ioa_dump->page_offset / 4],
2069							(cur_len / sizeof(u32)));
2070		}
2071		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2072
2073		if (!rc) {
2074			ioa_dump->page_offset += cur_len;
2075			bytes_copied += cur_len;
2076		} else {
2077			ipr_trace;
2078			break;
2079		}
2080		schedule();
2081	}
2082
2083	return bytes_copied;
2084}
2085
2086/**
2087 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2088 * @hdr:	dump entry header struct
2089 *
2090 * Return value:
2091 * 	nothing
2092 **/
2093static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2094{
2095	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2096	hdr->num_elems = 1;
2097	hdr->offset = sizeof(*hdr);
2098	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2099}
2100
2101/**
2102 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2103 * @ioa_cfg:	ioa config struct
2104 * @driver_dump:	driver dump struct
2105 *
2106 * Return value:
2107 * 	nothing
2108 **/
2109static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2110				   struct ipr_driver_dump *driver_dump)
2111{
2112	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2113
2114	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2115	driver_dump->ioa_type_entry.hdr.len =
2116		sizeof(struct ipr_dump_ioa_type_entry) -
2117		sizeof(struct ipr_dump_entry_header);
2118	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2119	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2120	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2121	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2122		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2123		ucode_vpd->minor_release[1];
2124	driver_dump->hdr.num_entries++;
2125}
2126
2127/**
2128 * ipr_dump_version_data - Fill in the driver version in the dump.
2129 * @ioa_cfg:	ioa config struct
2130 * @driver_dump:	driver dump struct
2131 *
2132 * Return value:
2133 * 	nothing
2134 **/
2135static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2136				  struct ipr_driver_dump *driver_dump)
2137{
2138	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2139	driver_dump->version_entry.hdr.len =
2140		sizeof(struct ipr_dump_version_entry) -
2141		sizeof(struct ipr_dump_entry_header);
2142	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2143	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2144	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2145	driver_dump->hdr.num_entries++;
2146}
2147
2148/**
2149 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2150 * @ioa_cfg:	ioa config struct
2151 * @driver_dump:	driver dump struct
2152 *
2153 * Return value:
2154 * 	nothing
2155 **/
2156static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2157				   struct ipr_driver_dump *driver_dump)
2158{
2159	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2160	driver_dump->trace_entry.hdr.len =
2161		sizeof(struct ipr_dump_trace_entry) -
2162		sizeof(struct ipr_dump_entry_header);
2163	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2164	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2165	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2166	driver_dump->hdr.num_entries++;
2167}
2168
2169/**
2170 * ipr_dump_location_data - Fill in the IOA location in the dump.
2171 * @ioa_cfg:	ioa config struct
2172 * @driver_dump:	driver dump struct
2173 *
2174 * Return value:
2175 * 	nothing
2176 **/
2177static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2178				   struct ipr_driver_dump *driver_dump)
2179{
2180	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2181	driver_dump->location_entry.hdr.len =
2182		sizeof(struct ipr_dump_location_entry) -
2183		sizeof(struct ipr_dump_entry_header);
2184	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2185	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2186	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2187	driver_dump->hdr.num_entries++;
2188}
2189
2190/**
2191 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2192 * @ioa_cfg:	ioa config struct
2193 * @dump:		dump struct
2194 *
2195 * Return value:
2196 * 	nothing
2197 **/
2198static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2199{
2200	unsigned long start_addr, sdt_word;
2201	unsigned long lock_flags = 0;
2202	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2203	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2204	u32 num_entries, start_off, end_off;
2205	u32 bytes_to_copy, bytes_copied, rc;
2206	struct ipr_sdt *sdt;
2207	int i;
2208
2209	ENTER;
2210
2211	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2212
2213	if (ioa_cfg->sdt_state != GET_DUMP) {
2214		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2215		return;
2216	}
2217
2218	start_addr = readl(ioa_cfg->ioa_mailbox);
2219
2220	if (!ipr_sdt_is_fmt2(start_addr)) {
2221		dev_err(&ioa_cfg->pdev->dev,
2222			"Invalid dump table format: %lx\n", start_addr);
2223		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2224		return;
2225	}
2226
2227	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2228
2229	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2230
2231	/* Initialize the overall dump header */
2232	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2233	driver_dump->hdr.num_entries = 1;
2234	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2235	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2236	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2237	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2238
2239	ipr_dump_version_data(ioa_cfg, driver_dump);
2240	ipr_dump_location_data(ioa_cfg, driver_dump);
2241	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2242	ipr_dump_trace_data(ioa_cfg, driver_dump);
2243
2244	/* Update dump_header */
2245	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2246
2247	/* IOA Dump entry */
2248	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2249	ioa_dump->format = IPR_SDT_FMT2;
2250	ioa_dump->hdr.len = 0;
2251	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2252	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2253
2254	/* First entries in sdt are actually a list of dump addresses and
2255	 lengths to gather the real dump data.  sdt represents the pointer
2256	 to the ioa generated dump table.  Dump data will be extracted based
2257	 on entries in this table */
2258	sdt = &ioa_dump->sdt;
2259
2260	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2261					sizeof(struct ipr_sdt) / sizeof(__be32));
2262
2263	/* Smart Dump table is ready to use and the first entry is valid */
2264	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2265		dev_err(&ioa_cfg->pdev->dev,
2266			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2267			rc, be32_to_cpu(sdt->hdr.state));
2268		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2269		ioa_cfg->sdt_state = DUMP_OBTAINED;
2270		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2271		return;
2272	}
2273
2274	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2275
2276	if (num_entries > IPR_NUM_SDT_ENTRIES)
2277		num_entries = IPR_NUM_SDT_ENTRIES;
2278
2279	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2280
2281	for (i = 0; i < num_entries; i++) {
2282		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2283			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2284			break;
2285		}
2286
2287		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2288			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2289			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2290			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2291
2292			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2293				bytes_to_copy = end_off - start_off;
2294				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2295					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2296					continue;
2297				}
2298
2299				/* Copy data from adapter to driver buffers */
2300				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2301							    bytes_to_copy);
2302
2303				ioa_dump->hdr.len += bytes_copied;
2304
2305				if (bytes_copied != bytes_to_copy) {
2306					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2307					break;
2308				}
2309			}
2310		}
2311	}
2312
2313	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2314
2315	/* Update dump_header */
2316	driver_dump->hdr.len += ioa_dump->hdr.len;
2317	wmb();
2318	ioa_cfg->sdt_state = DUMP_OBTAINED;
2319	LEAVE;
2320}
2321
2322#else
2323#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2324#endif
2325
2326/**
2327 * ipr_release_dump - Free adapter dump memory
2328 * @kref:	kref struct
2329 *
2330 * Return value:
2331 *	nothing
2332 **/
2333static void ipr_release_dump(struct kref *kref)
2334{
2335	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2336	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2337	unsigned long lock_flags = 0;
2338	int i;
2339
2340	ENTER;
2341	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2342	ioa_cfg->dump = NULL;
2343	ioa_cfg->sdt_state = INACTIVE;
2344	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2345
2346	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2347		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2348
2349	kfree(dump);
2350	LEAVE;
2351}
2352
2353/**
2354 * ipr_worker_thread - Worker thread
2355 * @work:		ioa config struct
2356 *
2357 * Called at task level from a work thread. This function takes care
2358 * of adding and removing device from the mid-layer as configuration
2359 * changes are detected by the adapter.
2360 *
2361 * Return value:
2362 * 	nothing
2363 **/
2364static void ipr_worker_thread(struct work_struct *work)
2365{
2366	unsigned long lock_flags;
2367	struct ipr_resource_entry *res;
2368	struct scsi_device *sdev;
2369	struct ipr_dump *dump;
2370	struct ipr_ioa_cfg *ioa_cfg =
2371		container_of(work, struct ipr_ioa_cfg, work_q);
2372	u8 bus, target, lun;
2373	int did_work;
2374
2375	ENTER;
2376	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2377
2378	if (ioa_cfg->sdt_state == GET_DUMP) {
2379		dump = ioa_cfg->dump;
2380		if (!dump) {
2381			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2382			return;
2383		}
2384		kref_get(&dump->kref);
2385		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2386		ipr_get_ioa_dump(ioa_cfg, dump);
2387		kref_put(&dump->kref, ipr_release_dump);
2388
2389		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2390		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2391			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2392		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2393		return;
2394	}
2395
2396restart:
2397	do {
2398		did_work = 0;
2399		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2400			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2401			return;
2402		}
2403
2404		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2405			if (res->del_from_ml && res->sdev) {
2406				did_work = 1;
2407				sdev = res->sdev;
2408				if (!scsi_device_get(sdev)) {
2409					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2410					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2411					scsi_remove_device(sdev);
2412					scsi_device_put(sdev);
2413					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2414				}
2415				break;
2416			}
2417		}
2418	} while(did_work);
2419
2420	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2421		if (res->add_to_ml) {
2422			bus = res->cfgte.res_addr.bus;
2423			target = res->cfgte.res_addr.target;
2424			lun = res->cfgte.res_addr.lun;
2425			res->add_to_ml = 0;
2426			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2427			scsi_add_device(ioa_cfg->host, bus, target, lun);
2428			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2429			goto restart;
2430		}
2431	}
2432
2433	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2434	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2435	LEAVE;
2436}
2437
2438#ifdef CONFIG_SCSI_IPR_TRACE
2439/**
2440 * ipr_read_trace - Dump the adapter trace
2441 * @kobj:		kobject struct
2442 * @buf:		buffer
2443 * @off:		offset
2444 * @count:		buffer size
2445 *
2446 * Return value:
2447 *	number of bytes printed to buffer
2448 **/
2449static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2450			      loff_t off, size_t count)
2451{
2452	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2453	struct Scsi_Host *shost = class_to_shost(cdev);
2454	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2455	unsigned long lock_flags = 0;
2456	int size = IPR_TRACE_SIZE;
2457	char *src = (char *)ioa_cfg->trace;
2458
2459	if (off > size)
2460		return 0;
2461	if (off + count > size) {
2462		size -= off;
2463		count = size;
2464	}
2465
2466	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2467	memcpy(buf, &src[off], count);
2468	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2469	return count;
2470}
2471
2472static struct bin_attribute ipr_trace_attr = {
2473	.attr =	{
2474		.name = "trace",
2475		.mode = S_IRUGO,
2476	},
2477	.size = 0,
2478	.read = ipr_read_trace,
2479};
2480#endif
2481
2482static const struct {
2483	enum ipr_cache_state state;
2484	char *name;
2485} cache_state [] = {
2486	{ CACHE_NONE, "none" },
2487	{ CACHE_DISABLED, "disabled" },
2488	{ CACHE_ENABLED, "enabled" }
2489};
2490
2491/**
2492 * ipr_show_write_caching - Show the write caching attribute
2493 * @class_dev:	class device struct
2494 * @buf:		buffer
2495 *
2496 * Return value:
2497 *	number of bytes printed to buffer
2498 **/
2499static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2500{
2501	struct Scsi_Host *shost = class_to_shost(class_dev);
2502	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2503	unsigned long lock_flags = 0;
2504	int i, len = 0;
2505
2506	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2507	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2508		if (cache_state[i].state == ioa_cfg->cache_state) {
2509			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2510			break;
2511		}
2512	}
2513	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514	return len;
2515}
2516
2517
2518/**
2519 * ipr_store_write_caching - Enable/disable adapter write cache
2520 * @class_dev:	class_device struct
2521 * @buf:		buffer
2522 * @count:		buffer size
2523 *
2524 * This function will enable/disable adapter write cache.
2525 *
2526 * Return value:
2527 * 	count on success / other on failure
2528 **/
2529static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2530					const char *buf, size_t count)
2531{
2532	struct Scsi_Host *shost = class_to_shost(class_dev);
2533	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2534	unsigned long lock_flags = 0;
2535	enum ipr_cache_state new_state = CACHE_INVALID;
2536	int i;
2537
2538	if (!capable(CAP_SYS_ADMIN))
2539		return -EACCES;
2540	if (ioa_cfg->cache_state == CACHE_NONE)
2541		return -EINVAL;
2542
2543	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2544		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2545			new_state = cache_state[i].state;
2546			break;
2547		}
2548	}
2549
2550	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2551		return -EINVAL;
2552
2553	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2554	if (ioa_cfg->cache_state == new_state) {
2555		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556		return count;
2557	}
2558
2559	ioa_cfg->cache_state = new_state;
2560	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2561		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2562	if (!ioa_cfg->in_reset_reload)
2563		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2564	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2565	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2566
2567	return count;
2568}
2569
2570static struct class_device_attribute ipr_ioa_cache_attr = {
2571	.attr = {
2572		.name =		"write_cache",
2573		.mode =		S_IRUGO | S_IWUSR,
2574	},
2575	.show = ipr_show_write_caching,
2576	.store = ipr_store_write_caching
2577};
2578
2579/**
2580 * ipr_show_fw_version - Show the firmware version
2581 * @class_dev:	class device struct
2582 * @buf:		buffer
2583 *
2584 * Return value:
2585 *	number of bytes printed to buffer
2586 **/
2587static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2588{
2589	struct Scsi_Host *shost = class_to_shost(class_dev);
2590	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2591	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2592	unsigned long lock_flags = 0;
2593	int len;
2594
2595	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2596	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2597		       ucode_vpd->major_release, ucode_vpd->card_type,
2598		       ucode_vpd->minor_release[0],
2599		       ucode_vpd->minor_release[1]);
2600	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2601	return len;
2602}
2603
2604static struct class_device_attribute ipr_fw_version_attr = {
2605	.attr = {
2606		.name =		"fw_version",
2607		.mode =		S_IRUGO,
2608	},
2609	.show = ipr_show_fw_version,
2610};
2611
2612/**
2613 * ipr_show_log_level - Show the adapter's error logging level
2614 * @class_dev:	class device struct
2615 * @buf:		buffer
2616 *
2617 * Return value:
2618 * 	number of bytes printed to buffer
2619 **/
2620static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2621{
2622	struct Scsi_Host *shost = class_to_shost(class_dev);
2623	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2624	unsigned long lock_flags = 0;
2625	int len;
2626
2627	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2628	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2629	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2630	return len;
2631}
2632
2633/**
2634 * ipr_store_log_level - Change the adapter's error logging level
2635 * @class_dev:	class device struct
2636 * @buf:		buffer
2637 *
2638 * Return value:
2639 * 	number of bytes printed to buffer
2640 **/
2641static ssize_t ipr_store_log_level(struct class_device *class_dev,
2642				   const char *buf, size_t count)
2643{
2644	struct Scsi_Host *shost = class_to_shost(class_dev);
2645	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2646	unsigned long lock_flags = 0;
2647
2648	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2649	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2650	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2651	return strlen(buf);
2652}
2653
2654static struct class_device_attribute ipr_log_level_attr = {
2655	.attr = {
2656		.name =		"log_level",
2657		.mode =		S_IRUGO | S_IWUSR,
2658	},
2659	.show = ipr_show_log_level,
2660	.store = ipr_store_log_level
2661};
2662
2663/**
2664 * ipr_store_diagnostics - IOA Diagnostics interface
2665 * @class_dev:	class_device struct
2666 * @buf:		buffer
2667 * @count:		buffer size
2668 *
2669 * This function will reset the adapter and wait a reasonable
2670 * amount of time for any errors that the adapter might log.
2671 *
2672 * Return value:
2673 * 	count on success / other on failure
2674 **/
2675static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2676				     const char *buf, size_t count)
2677{
2678	struct Scsi_Host *shost = class_to_shost(class_dev);
2679	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2680	unsigned long lock_flags = 0;
2681	int rc = count;
2682
2683	if (!capable(CAP_SYS_ADMIN))
2684		return -EACCES;
2685
2686	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687	while(ioa_cfg->in_reset_reload) {
2688		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2689		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2690		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2691	}
2692
2693	ioa_cfg->errors_logged = 0;
2694	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2695
2696	if (ioa_cfg->in_reset_reload) {
2697		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2698		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2699
2700		/* Wait for a second for any errors to be logged */
2701		msleep(1000);
2702	} else {
2703		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2704		return -EIO;
2705	}
2706
2707	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2708	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2709		rc = -EIO;
2710	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711
2712	return rc;
2713}
2714
2715static struct class_device_attribute ipr_diagnostics_attr = {
2716	.attr = {
2717		.name =		"run_diagnostics",
2718		.mode =		S_IWUSR,
2719	},
2720	.store = ipr_store_diagnostics
2721};
2722
2723/**
2724 * ipr_show_adapter_state - Show the adapter's state
2725 * @class_dev:	class device struct
2726 * @buf:		buffer
2727 *
2728 * Return value:
2729 * 	number of bytes printed to buffer
2730 **/
2731static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2732{
2733	struct Scsi_Host *shost = class_to_shost(class_dev);
2734	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2735	unsigned long lock_flags = 0;
2736	int len;
2737
2738	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2739	if (ioa_cfg->ioa_is_dead)
2740		len = snprintf(buf, PAGE_SIZE, "offline\n");
2741	else
2742		len = snprintf(buf, PAGE_SIZE, "online\n");
2743	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2744	return len;
2745}
2746
2747/**
2748 * ipr_store_adapter_state - Change adapter state
2749 * @class_dev:	class_device struct
2750 * @buf:		buffer
2751 * @count:		buffer size
2752 *
2753 * This function will change the adapter's state.
2754 *
2755 * Return value:
2756 * 	count on success / other on failure
2757 **/
2758static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2759				       const char *buf, size_t count)
2760{
2761	struct Scsi_Host *shost = class_to_shost(class_dev);
2762	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2763	unsigned long lock_flags;
2764	int result = count;
2765
2766	if (!capable(CAP_SYS_ADMIN))
2767		return -EACCES;
2768
2769	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2770	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2771		ioa_cfg->ioa_is_dead = 0;
2772		ioa_cfg->reset_retries = 0;
2773		ioa_cfg->in_ioa_bringdown = 0;
2774		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2775	}
2776	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2777	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2778
2779	return result;
2780}
2781
2782static struct class_device_attribute ipr_ioa_state_attr = {
2783	.attr = {
2784		.name =		"state",
2785		.mode =		S_IRUGO | S_IWUSR,
2786	},
2787	.show = ipr_show_adapter_state,
2788	.store = ipr_store_adapter_state
2789};
2790
2791/**
2792 * ipr_store_reset_adapter - Reset the adapter
2793 * @class_dev:	class_device struct
2794 * @buf:		buffer
2795 * @count:		buffer size
2796 *
2797 * This function will reset the adapter.
2798 *
2799 * Return value:
2800 * 	count on success / other on failure
2801 **/
2802static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2803				       const char *buf, size_t count)
2804{
2805	struct Scsi_Host *shost = class_to_shost(class_dev);
2806	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2807	unsigned long lock_flags;
2808	int result = count;
2809
2810	if (!capable(CAP_SYS_ADMIN))
2811		return -EACCES;
2812
2813	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2814	if (!ioa_cfg->in_reset_reload)
2815		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2816	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2817	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2818
2819	return result;
2820}
2821
2822static struct class_device_attribute ipr_ioa_reset_attr = {
2823	.attr = {
2824		.name =		"reset_host",
2825		.mode =		S_IWUSR,
2826	},
2827	.store = ipr_store_reset_adapter
2828};
2829
2830/**
2831 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2832 * @buf_len:		buffer length
2833 *
2834 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2835 * list to use for microcode download
2836 *
2837 * Return value:
2838 * 	pointer to sglist / NULL on failure
2839 **/
2840static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2841{
2842	int sg_size, order, bsize_elem, num_elem, i, j;
2843	struct ipr_sglist *sglist;
2844	struct scatterlist *scatterlist;
2845	struct page *page;
2846
2847	/* Get the minimum size per scatter/gather element */
2848	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2849
2850	/* Get the actual size per element */
2851	order = get_order(sg_size);
2852
2853	/* Determine the actual number of bytes per element */
2854	bsize_elem = PAGE_SIZE * (1 << order);
2855
2856	/* Determine the actual number of sg entries needed */
2857	if (buf_len % bsize_elem)
2858		num_elem = (buf_len / bsize_elem) + 1;
2859	else
2860		num_elem = buf_len / bsize_elem;
2861
2862	/* Allocate a scatter/gather list for the DMA */
2863	sglist = kzalloc(sizeof(struct ipr_sglist) +
2864			 (sizeof(struct scatterlist) * (num_elem - 1)),
2865			 GFP_KERNEL);
2866
2867	if (sglist == NULL) {
2868		ipr_trace;
2869		return NULL;
2870	}
2871
2872	scatterlist = sglist->scatterlist;
2873
2874	sglist->order = order;
2875	sglist->num_sg = num_elem;
2876
2877	/* Allocate a bunch of sg elements */
2878	for (i = 0; i < num_elem; i++) {
2879		page = alloc_pages(GFP_KERNEL, order);
2880		if (!page) {
2881			ipr_trace;
2882
2883			/* Free up what we already allocated */
2884			for (j = i - 1; j >= 0; j--)
2885				__free_pages(scatterlist[j].page, order);
2886			kfree(sglist);
2887			return NULL;
2888		}
2889
2890		scatterlist[i].page = page;
2891	}
2892
2893	return sglist;
2894}
2895
2896/**
2897 * ipr_free_ucode_buffer - Frees a microcode download buffer
2898 * @p_dnld:		scatter/gather list pointer
2899 *
2900 * Free a DMA'able ucode download buffer previously allocated with
2901 * ipr_alloc_ucode_buffer
2902 *
2903 * Return value:
2904 * 	nothing
2905 **/
2906static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2907{
2908	int i;
2909
2910	for (i = 0; i < sglist->num_sg; i++)
2911		__free_pages(sglist->scatterlist[i].page, sglist->order);
2912
2913	kfree(sglist);
2914}
2915
2916/**
2917 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2918 * @sglist:		scatter/gather list pointer
2919 * @buffer:		buffer pointer
2920 * @len:		buffer length
2921 *
2922 * Copy a microcode image from a user buffer into a buffer allocated by
2923 * ipr_alloc_ucode_buffer
2924 *
2925 * Return value:
2926 * 	0 on success / other on failure
2927 **/
2928static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2929				 u8 *buffer, u32 len)
2930{
2931	int bsize_elem, i, result = 0;
2932	struct scatterlist *scatterlist;
2933	void *kaddr;
2934
2935	/* Determine the actual number of bytes per element */
2936	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2937
2938	scatterlist = sglist->scatterlist;
2939
2940	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2941		kaddr = kmap(scatterlist[i].page);
2942		memcpy(kaddr, buffer, bsize_elem);
2943		kunmap(scatterlist[i].page);
2944
2945		scatterlist[i].length = bsize_elem;
2946
2947		if (result != 0) {
2948			ipr_trace;
2949			return result;
2950		}
2951	}
2952
2953	if (len % bsize_elem) {
2954		kaddr = kmap(scatterlist[i].page);
2955		memcpy(kaddr, buffer, len % bsize_elem);
2956		kunmap(scatterlist[i].page);
2957
2958		scatterlist[i].length = len % bsize_elem;
2959	}
2960
2961	sglist->buffer_len = len;
2962	return result;
2963}
2964
2965/**
2966 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2967 * @ipr_cmd:	ipr command struct
2968 * @sglist:		scatter/gather list
2969 *
2970 * Builds a microcode download IOA data list (IOADL).
2971 *
2972 **/
2973static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2974				  struct ipr_sglist *sglist)
2975{
2976	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2977	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2978	struct scatterlist *scatterlist = sglist->scatterlist;
2979	int i;
2980
2981	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2982	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2983	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2984	ioarcb->write_ioadl_len =
2985		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2986
2987	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2988		ioadl[i].flags_and_data_len =
2989			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2990		ioadl[i].address =
2991			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2992	}
2993
2994	ioadl[i-1].flags_and_data_len |=
2995		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2996}
2997
2998/**
2999 * ipr_update_ioa_ucode - Update IOA's microcode
3000 * @ioa_cfg:	ioa config struct
3001 * @sglist:		scatter/gather list
3002 *
3003 * Initiate an adapter reset to update the IOA's microcode
3004 *
3005 * Return value:
3006 * 	0 on success / -EIO on failure
3007 **/
3008static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3009				struct ipr_sglist *sglist)
3010{
3011	unsigned long lock_flags;
3012
3013	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3014	while(ioa_cfg->in_reset_reload) {
3015		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3016		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3017		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3018	}
3019
3020	if (ioa_cfg->ucode_sglist) {
3021		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3022		dev_err(&ioa_cfg->pdev->dev,
3023			"Microcode download already in progress\n");
3024		return -EIO;
3025	}
3026
3027	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3028					sglist->num_sg, DMA_TO_DEVICE);
3029
3030	if (!sglist->num_dma_sg) {
3031		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3032		dev_err(&ioa_cfg->pdev->dev,
3033			"Failed to map microcode download buffer!\n");
3034		return -EIO;
3035	}
3036
3037	ioa_cfg->ucode_sglist = sglist;
3038	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3039	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3040	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3041
3042	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3043	ioa_cfg->ucode_sglist = NULL;
3044	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3045	return 0;
3046}
3047
3048/**
3049 * ipr_store_update_fw - Update the firmware on the adapter
3050 * @class_dev:	class_device struct
3051 * @buf:		buffer
3052 * @count:		buffer size
3053 *
3054 * This function will update the firmware on the adapter.
3055 *
3056 * Return value:
3057 * 	count on success / other on failure
3058 **/
3059static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3060				       const char *buf, size_t count)
3061{
3062	struct Scsi_Host *shost = class_to_shost(class_dev);
3063	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3064	struct ipr_ucode_image_header *image_hdr;
3065	const struct firmware *fw_entry;
3066	struct ipr_sglist *sglist;
3067	char fname[100];
3068	char *src;
3069	int len, result, dnld_size;
3070
3071	if (!capable(CAP_SYS_ADMIN))
3072		return -EACCES;
3073
3074	len = snprintf(fname, 99, "%s", buf);
3075	fname[len-1] = '\0';
3076
3077	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3078		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3079		return -EIO;
3080	}
3081
3082	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3083
3084	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3085	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3086	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3087		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3088		release_firmware(fw_entry);
3089		return -EINVAL;
3090	}
3091
3092	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3093	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3094	sglist = ipr_alloc_ucode_buffer(dnld_size);
3095
3096	if (!sglist) {
3097		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3098		release_firmware(fw_entry);
3099		return -ENOMEM;
3100	}
3101
3102	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3103
3104	if (result) {
3105		dev_err(&ioa_cfg->pdev->dev,
3106			"Microcode buffer copy to DMA buffer failed\n");
3107		goto out;
3108	}
3109
3110	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3111
3112	if (!result)
3113		result = count;
3114out:
3115	ipr_free_ucode_buffer(sglist);
3116	release_firmware(fw_entry);
3117	return result;
3118}
3119
3120static struct class_device_attribute ipr_update_fw_attr = {
3121	.attr = {
3122		.name =		"update_fw",
3123		.mode =		S_IWUSR,
3124	},
3125	.store = ipr_store_update_fw
3126};
3127
3128static struct class_device_attribute *ipr_ioa_attrs[] = {
3129	&ipr_fw_version_attr,
3130	&ipr_log_level_attr,
3131	&ipr_diagnostics_attr,
3132	&ipr_ioa_state_attr,
3133	&ipr_ioa_reset_attr,
3134	&ipr_update_fw_attr,
3135	&ipr_ioa_cache_attr,
3136	NULL,
3137};
3138
3139#ifdef CONFIG_SCSI_IPR_DUMP
3140/**
3141 * ipr_read_dump - Dump the adapter
3142 * @kobj:		kobject struct
3143 * @buf:		buffer
3144 * @off:		offset
3145 * @count:		buffer size
3146 *
3147 * Return value:
3148 *	number of bytes printed to buffer
3149 **/
3150static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3151			      loff_t off, size_t count)
3152{
3153	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3154	struct Scsi_Host *shost = class_to_shost(cdev);
3155	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3156	struct ipr_dump *dump;
3157	unsigned long lock_flags = 0;
3158	char *src;
3159	int len;
3160	size_t rc = count;
3161
3162	if (!capable(CAP_SYS_ADMIN))
3163		return -EACCES;
3164
3165	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166	dump = ioa_cfg->dump;
3167
3168	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3169		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3170		return 0;
3171	}
3172	kref_get(&dump->kref);
3173	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174
3175	if (off > dump->driver_dump.hdr.len) {
3176		kref_put(&dump->kref, ipr_release_dump);
3177		return 0;
3178	}
3179
3180	if (off + count > dump->driver_dump.hdr.len) {
3181		count = dump->driver_dump.hdr.len - off;
3182		rc = count;
3183	}
3184
3185	if (count && off < sizeof(dump->driver_dump)) {
3186		if (off + count > sizeof(dump->driver_dump))
3187			len = sizeof(dump->driver_dump) - off;
3188		else
3189			len = count;
3190		src = (u8 *)&dump->driver_dump + off;
3191		memcpy(buf, src, len);
3192		buf += len;
3193		off += len;
3194		count -= len;
3195	}
3196
3197	off -= sizeof(dump->driver_dump);
3198
3199	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3200		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3201			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3202		else
3203			len = count;
3204		src = (u8 *)&dump->ioa_dump + off;
3205		memcpy(buf, src, len);
3206		buf += len;
3207		off += len;
3208		count -= len;
3209	}
3210
3211	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3212
3213	while (count) {
3214		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3215			len = PAGE_ALIGN(off) - off;
3216		else
3217			len = count;
3218		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3219		src += off & ~PAGE_MASK;
3220		memcpy(buf, src, len);
3221		buf += len;
3222		off += len;
3223		count -= len;
3224	}
3225
3226	kref_put(&dump->kref, ipr_release_dump);
3227	return rc;
3228}
3229
3230/**
3231 * ipr_alloc_dump - Prepare for adapter dump
3232 * @ioa_cfg:	ioa config struct
3233 *
3234 * Return value:
3235 *	0 on success / other on failure
3236 **/
3237static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3238{
3239	struct ipr_dump *dump;
3240	unsigned long lock_flags = 0;
3241
3242	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3243
3244	if (!dump) {
3245		ipr_err("Dump memory allocation failed\n");
3246		return -ENOMEM;
3247	}
3248
3249	kref_init(&dump->kref);
3250	dump->ioa_cfg = ioa_cfg;
3251
3252	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3253
3254	if (INACTIVE != ioa_cfg->sdt_state) {
3255		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3256		kfree(dump);
3257		return 0;
3258	}
3259
3260	ioa_cfg->dump = dump;
3261	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3262	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3263		ioa_cfg->dump_taken = 1;
3264		schedule_work(&ioa_cfg->work_q);
3265	}
3266	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267
3268	return 0;
3269}
3270
3271/**
3272 * ipr_free_dump - Free adapter dump memory
3273 * @ioa_cfg:	ioa config struct
3274 *
3275 * Return value:
3276 *	0 on success / other on failure
3277 **/
3278static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3279{
3280	struct ipr_dump *dump;
3281	unsigned long lock_flags = 0;
3282
3283	ENTER;
3284
3285	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3286	dump = ioa_cfg->dump;
3287	if (!dump) {
3288		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289		return 0;
3290	}
3291
3292	ioa_cfg->dump = NULL;
3293	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294
3295	kref_put(&dump->kref, ipr_release_dump);
3296
3297	LEAVE;
3298	return 0;
3299}
3300
3301/**
3302 * ipr_write_dump - Setup dump state of adapter
3303 * @kobj:		kobject struct
3304 * @buf:		buffer
3305 * @off:		offset
3306 * @count:		buffer size
3307 *
3308 * Return value:
3309 *	number of bytes printed to buffer
3310 **/
3311static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3312			      loff_t off, size_t count)
3313{
3314	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3315	struct Scsi_Host *shost = class_to_shost(cdev);
3316	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3317	int rc;
3318
3319	if (!capable(CAP_SYS_ADMIN))
3320		return -EACCES;
3321
3322	if (buf[0] == '1')
3323		rc = ipr_alloc_dump(ioa_cfg);
3324	else if (buf[0] == '0')
3325		rc = ipr_free_dump(ioa_cfg);
3326	else
3327		return -EINVAL;
3328
3329	if (rc)
3330		return rc;
3331	else
3332		return count;
3333}
3334
3335static struct bin_attribute ipr_dump_attr = {
3336	.attr =	{
3337		.name = "dump",
3338		.mode = S_IRUSR | S_IWUSR,
3339	},
3340	.size = 0,
3341	.read = ipr_read_dump,
3342	.write = ipr_write_dump
3343};
3344#else
3345static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3346#endif
3347
3348/**
3349 * ipr_change_queue_depth - Change the device's queue depth
3350 * @sdev:	scsi device struct
3351 * @qdepth:	depth to set
3352 *
3353 * Return value:
3354 * 	actual depth set
3355 **/
3356static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3357{
3358	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3359	struct ipr_resource_entry *res;
3360	unsigned long lock_flags = 0;
3361
3362	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3363	res = (struct ipr_resource_entry *)sdev->hostdata;
3364
3365	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3366		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3367	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3368
3369	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3370	return sdev->queue_depth;
3371}
3372
3373/**
3374 * ipr_change_queue_type - Change the device's queue type
3375 * @dsev:		scsi device struct
3376 * @tag_type:	type of tags to use
3377 *
3378 * Return value:
3379 * 	actual queue type set
3380 **/
3381static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3382{
3383	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3384	struct ipr_resource_entry *res;
3385	unsigned long lock_flags = 0;
3386
3387	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3388	res = (struct ipr_resource_entry *)sdev->hostdata;
3389
3390	if (res) {
3391		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3392			/*
3393			 * We don't bother quiescing the device here since the
3394			 * adapter firmware does it for us.
3395			 */
3396			scsi_set_tag_type(sdev, tag_type);
3397
3398			if (tag_type)
3399				scsi_activate_tcq(sdev, sdev->queue_depth);
3400			else
3401				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3402		} else
3403			tag_type = 0;
3404	} else
3405		tag_type = 0;
3406
3407	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3408	return tag_type;
3409}
3410
3411/**
3412 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3413 * @dev:	device struct
3414 * @buf:	buffer
3415 *
3416 * Return value:
3417 * 	number of bytes printed to buffer
3418 **/
3419static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3420{
3421	struct scsi_device *sdev = to_scsi_device(dev);
3422	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3423	struct ipr_resource_entry *res;
3424	unsigned long lock_flags = 0;
3425	ssize_t len = -ENXIO;
3426
3427	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428	res = (struct ipr_resource_entry *)sdev->hostdata;
3429	if (res)
3430		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3431	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3432	return len;
3433}
3434
3435static struct device_attribute ipr_adapter_handle_attr = {
3436	.attr = {
3437		.name = 	"adapter_handle",
3438		.mode =		S_IRUSR,
3439	},
3440	.show = ipr_show_adapter_handle
3441};
3442
3443static struct device_attribute *ipr_dev_attrs[] = {
3444	&ipr_adapter_handle_attr,
3445	NULL,
3446};
3447
3448/**
3449 * ipr_biosparam - Return the HSC mapping
3450 * @sdev:			scsi device struct
3451 * @block_device:	block device pointer
3452 * @capacity:		capacity of the device
3453 * @parm:			Array containing returned HSC values.
3454 *
3455 * This function generates the HSC parms that fdisk uses.
3456 * We want to make sure we return something that places partitions
3457 * on 4k boundaries for best performance with the IOA.
3458 *
3459 * Return value:
3460 * 	0 on success
3461 **/
3462static int ipr_biosparam(struct scsi_device *sdev,
3463			 struct block_device *block_device,
3464			 sector_t capacity, int *parm)
3465{
3466	int heads, sectors;
3467	sector_t cylinders;
3468
3469	heads = 128;
3470	sectors = 32;
3471
3472	cylinders = capacity;
3473	sector_div(cylinders, (128 * 32));
3474
3475	/* return result */
3476	parm[0] = heads;
3477	parm[1] = sectors;
3478	parm[2] = cylinders;
3479
3480	return 0;
3481}
3482
3483/**
3484 * ipr_find_starget - Find target based on bus/target.
3485 * @starget:	scsi target struct
3486 *
3487 * Return value:
3488 * 	resource entry pointer if found / NULL if not found
3489 **/
3490static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3491{
3492	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3493	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3494	struct ipr_resource_entry *res;
3495
3496	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3497		if ((res->cfgte.res_addr.bus == starget->channel) &&
3498		    (res->cfgte.res_addr.target == starget->id) &&
3499		    (res->cfgte.res_addr.lun == 0)) {
3500			return res;
3501		}
3502	}
3503
3504	return NULL;
3505}
3506
3507static struct ata_port_info sata_port_info;
3508
3509/**
3510 * ipr_target_alloc - Prepare for commands to a SCSI target
3511 * @starget:	scsi target struct
3512 *
3513 * If the device is a SATA device, this function allocates an
3514 * ATA port with libata, else it does nothing.
3515 *
3516 * Return value:
3517 * 	0 on success / non-0 on failure
3518 **/
3519static int ipr_target_alloc(struct scsi_target *starget)
3520{
3521	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3522	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3523	struct ipr_sata_port *sata_port;
3524	struct ata_port *ap;
3525	struct ipr_resource_entry *res;
3526	unsigned long lock_flags;
3527
3528	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3529	res = ipr_find_starget(starget);
3530	starget->hostdata = NULL;
3531
3532	if (res && ipr_is_gata(res)) {
3533		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3534		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3535		if (!sata_port)
3536			return -ENOMEM;
3537
3538		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3539		if (ap) {
3540			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3541			sata_port->ioa_cfg = ioa_cfg;
3542			sata_port->ap = ap;
3543			sata_port->res = res;
3544
3545			res->sata_port = sata_port;
3546			ap->private_data = sata_port;
3547			starget->hostdata = sata_port;
3548		} else {
3549			kfree(sata_port);
3550			return -ENOMEM;
3551		}
3552	}
3553	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3554
3555	return 0;
3556}
3557
3558/**
3559 * ipr_target_destroy - Destroy a SCSI target
3560 * @starget:	scsi target struct
3561 *
3562 * If the device was a SATA device, this function frees the libata
3563 * ATA port, else it does nothing.
3564 *
3565 **/
3566static void ipr_target_destroy(struct scsi_target *starget)
3567{
3568	struct ipr_sata_port *sata_port = starget->hostdata;
3569
3570	if (sata_port) {
3571		starget->hostdata = NULL;
3572		ata_sas_port_destroy(sata_port->ap);
3573		kfree(sata_port);
3574	}
3575}
3576
3577/**
3578 * ipr_find_sdev - Find device based on bus/target/lun.
3579 * @sdev:	scsi device struct
3580 *
3581 * Return value:
3582 * 	resource entry pointer if found / NULL if not found
3583 **/
3584static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3585{
3586	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3587	struct ipr_resource_entry *res;
3588
3589	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3590		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3591		    (res->cfgte.res_addr.target == sdev->id) &&
3592		    (res->cfgte.res_addr.lun == sdev->lun))
3593			return res;
3594	}
3595
3596	return NULL;
3597}
3598
3599/**
3600 * ipr_slave_destroy - Unconfigure a SCSI device
3601 * @sdev:	scsi device struct
3602 *
3603 * Return value:
3604 * 	nothing
3605 **/
3606static void ipr_slave_destroy(struct scsi_device *sdev)
3607{
3608	struct ipr_resource_entry *res;
3609	struct ipr_ioa_cfg *ioa_cfg;
3610	unsigned long lock_flags = 0;
3611
3612	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3613
3614	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3615	res = (struct ipr_resource_entry *) sdev->hostdata;
3616	if (res) {
3617		if (res->sata_port)
3618			ata_port_disable(res->sata_port->ap);
3619		sdev->hostdata = NULL;
3620		res->sdev = NULL;
3621		res->sata_port = NULL;
3622	}
3623	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624}
3625
3626/**
3627 * ipr_slave_configure - Configure a SCSI device
3628 * @sdev:	scsi device struct
3629 *
3630 * This function configures the specified scsi device.
3631 *
3632 * Return value:
3633 * 	0 on success
3634 **/
3635static int ipr_slave_configure(struct scsi_device *sdev)
3636{
3637	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3638	struct ipr_resource_entry *res;
3639	unsigned long lock_flags = 0;
3640
3641	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3642	res = sdev->hostdata;
3643	if (res) {
3644		if (ipr_is_af_dasd_device(res))
3645			sdev->type = TYPE_RAID;
3646		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3647			sdev->scsi_level = 4;
3648			sdev->no_uld_attach = 1;
3649		}
3650		if (ipr_is_vset_device(res)) {
3651			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3652			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3653		}
3654		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3655			sdev->allow_restart = 1;
3656		if (ipr_is_gata(res) && res->sata_port) {
3657			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3658			ata_sas_slave_configure(sdev, res->sata_port->ap);
3659		} else {
3660			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3661		}
3662	}
3663	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3664	return 0;
3665}
3666
3667/**
3668 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3669 * @sdev:	scsi device struct
3670 *
3671 * This function initializes an ATA port so that future commands
3672 * sent through queuecommand will work.
3673 *
3674 * Return value:
3675 * 	0 on success
3676 **/
3677static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3678{
3679	struct ipr_sata_port *sata_port = NULL;
3680	int rc = -ENXIO;
3681
3682	ENTER;
3683	if (sdev->sdev_target)
3684		sata_port = sdev->sdev_target->hostdata;
3685	if (sata_port)
3686		rc = ata_sas_port_init(sata_port->ap);
3687	if (rc)
3688		ipr_slave_destroy(sdev);
3689
3690	LEAVE;
3691	return rc;
3692}
3693
3694/**
3695 * ipr_slave_alloc - Prepare for commands to a device.
3696 * @sdev:	scsi device struct
3697 *
3698 * This function saves a pointer to the resource entry
3699 * in the scsi device struct if the device exists. We
3700 * can then use this pointer in ipr_queuecommand when
3701 * handling new commands.
3702 *
3703 * Return value:
3704 * 	0 on success / -ENXIO if device does not exist
3705 **/
3706static int ipr_slave_alloc(struct scsi_device *sdev)
3707{
3708	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3709	struct ipr_resource_entry *res;
3710	unsigned long lock_flags;
3711	int rc = -ENXIO;
3712
3713	sdev->hostdata = NULL;
3714
3715	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3716
3717	res = ipr_find_sdev(sdev);
3718	if (res) {
3719		res->sdev = sdev;
3720		res->add_to_ml = 0;
3721		res->in_erp = 0;
3722		sdev->hostdata = res;
3723		if (!ipr_is_naca_model(res))
3724			res->needs_sync_complete = 1;
3725		rc = 0;
3726		if (ipr_is_gata(res)) {
3727			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3728			return ipr_ata_slave_alloc(sdev);
3729		}
3730	}
3731
3732	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3733
3734	return rc;
3735}
3736
3737/**
3738 * ipr_eh_host_reset - Reset the host adapter
3739 * @scsi_cmd:	scsi command struct
3740 *
3741 * Return value:
3742 * 	SUCCESS / FAILED
3743 **/
3744static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3745{
3746	struct ipr_ioa_cfg *ioa_cfg;
3747	int rc;
3748
3749	ENTER;
3750	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3751
3752	dev_err(&ioa_cfg->pdev->dev,
3753		"Adapter being reset as a result of error recovery.\n");
3754
3755	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3756		ioa_cfg->sdt_state = GET_DUMP;
3757
3758	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3759
3760	LEAVE;
3761	return rc;
3762}
3763
3764static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3765{
3766	int rc;
3767
3768	spin_lock_irq(cmd->device->host->host_lock);
3769	rc = __ipr_eh_host_reset(cmd);
3770	spin_unlock_irq(cmd->device->host->host_lock);
3771
3772	return rc;
3773}
3774
3775/**
3776 * ipr_device_reset - Reset the device
3777 * @ioa_cfg:	ioa config struct
3778 * @res:		resource entry struct
3779 *
3780 * This function issues a device reset to the affected device.
3781 * If the device is a SCSI device, a LUN reset will be sent
3782 * to the device first. If that does not work, a target reset
3783 * will be sent. If the device is a SATA device, a PHY reset will
3784 * be sent.
3785 *
3786 * Return value:
3787 *	0 on success / non-zero on failure
3788 **/
3789static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3790			    struct ipr_resource_entry *res)
3791{
3792	struct ipr_cmnd *ipr_cmd;
3793	struct ipr_ioarcb *ioarcb;
3794	struct ipr_cmd_pkt *cmd_pkt;
3795	struct ipr_ioarcb_ata_regs *regs;
3796	u32 ioasc;
3797
3798	ENTER;
3799	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3800	ioarcb = &ipr_cmd->ioarcb;
3801	cmd_pkt = &ioarcb->cmd_pkt;
3802	regs = &ioarcb->add_data.u.regs;
3803
3804	ioarcb->res_handle = res->cfgte.res_handle;
3805	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3806	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3807	if (ipr_is_gata(res)) {
3808		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3809		ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3810		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3811	}
3812
3813	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3814	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3815	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3816	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3817		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3818		       sizeof(struct ipr_ioasa_gata));
3819
3820	LEAVE;
3821	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3822}
3823
3824/**
3825 * ipr_sata_reset - Reset the SATA port
3826 * @ap:		SATA port to reset
3827 * @classes:	class of the attached device
3828 *
3829 * This function issues a SATA phy reset to the affected ATA port.
3830 *
3831 * Return value:
3832 *	0 on success / non-zero on failure
3833 **/
3834static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3835				unsigned long deadline)
3836{
3837	struct ipr_sata_port *sata_port = ap->private_data;
3838	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3839	struct ipr_resource_entry *res;
3840	unsigned long lock_flags = 0;
3841	int rc = -ENXIO;
3842
3843	ENTER;
3844	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3845	while(ioa_cfg->in_reset_reload) {
3846		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3847		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3848		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3849	}
3850
3851	res = sata_port->res;
3852	if (res) {
3853		rc = ipr_device_reset(ioa_cfg, res);
3854		switch(res->cfgte.proto) {
3855		case IPR_PROTO_SATA:
3856		case IPR_PROTO_SAS_STP:
3857			*classes = ATA_DEV_ATA;
3858			break;
3859		case IPR_PROTO_SATA_ATAPI:
3860		case IPR_PROTO_SAS_STP_ATAPI:
3861			*classes = ATA_DEV_ATAPI;
3862			break;
3863		default:
3864			*classes = ATA_DEV_UNKNOWN;
3865			break;
3866		};
3867	}
3868
3869	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3870	LEAVE;
3871	return rc;
3872}
3873
3874/**
3875 * ipr_eh_dev_reset - Reset the device
3876 * @scsi_cmd:	scsi command struct
3877 *
3878 * This function issues a device reset to the affected device.
3879 * A LUN reset will be sent to the device first. If that does
3880 * not work, a target reset will be sent.
3881 *
3882 * Return value:
3883 *	SUCCESS / FAILED
3884 **/
3885static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3886{
3887	struct ipr_cmnd *ipr_cmd;
3888	struct ipr_ioa_cfg *ioa_cfg;
3889	struct ipr_resource_entry *res;
3890	struct ata_port *ap;
3891	int rc = 0;
3892
3893	ENTER;
3894	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3895	res = scsi_cmd->device->hostdata;
3896
3897	if (!res)
3898		return FAILED;
3899
3900	/*
3901	 * If we are currently going through reset/reload, return failed. This will force the
3902	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3903	 * reset to complete
3904	 */
3905	if (ioa_cfg->in_reset_reload)
3906		return FAILED;
3907	if (ioa_cfg->ioa_is_dead)
3908		return FAILED;
3909
3910	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3911		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3912			if (ipr_cmd->scsi_cmd)
3913				ipr_cmd->done = ipr_scsi_eh_done;
3914			if (ipr_cmd->qc)
3915				ipr_cmd->done = ipr_sata_eh_done;
3916			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3917				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3918				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3919			}
3920		}
3921	}
3922
3923	res->resetting_device = 1;
3924	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3925
3926	if (ipr_is_gata(res) && res->sata_port) {
3927		ap = res->sata_port->ap;
3928		spin_unlock_irq(scsi_cmd->device->host->host_lock);
3929		ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3930		spin_lock_irq(scsi_cmd->device->host->host_lock);
3931	} else
3932		rc = ipr_device_reset(ioa_cfg, res);
3933	res->resetting_device = 0;
3934
3935	LEAVE;
3936	return (rc ? FAILED : SUCCESS);
3937}
3938
3939static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3940{
3941	int rc;
3942
3943	spin_lock_irq(cmd->device->host->host_lock);
3944	rc = __ipr_eh_dev_reset(cmd);
3945	spin_unlock_irq(cmd->device->host->host_lock);
3946
3947	return rc;
3948}
3949
3950/**
3951 * ipr_bus_reset_done - Op done function for bus reset.
3952 * @ipr_cmd:	ipr command struct
3953 *
3954 * This function is the op done function for a bus reset
3955 *
3956 * Return value:
3957 * 	none
3958 **/
3959static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3960{
3961	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3962	struct ipr_resource_entry *res;
3963
3964	ENTER;
3965	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3966		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3967			    sizeof(res->cfgte.res_handle))) {
3968			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3969			break;
3970		}
3971	}
3972
3973	/*
3974	 * If abort has not completed, indicate the reset has, else call the
3975	 * abort's done function to wake the sleeping eh thread
3976	 */
3977	if (ipr_cmd->sibling->sibling)
3978		ipr_cmd->sibling->sibling = NULL;
3979	else
3980		ipr_cmd->sibling->done(ipr_cmd->sibling);
3981
3982	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3983	LEAVE;
3984}
3985
3986/**
3987 * ipr_abort_timeout - An abort task has timed out
3988 * @ipr_cmd:	ipr command struct
3989 *
3990 * This function handles when an abort task times out. If this
3991 * happens we issue a bus reset since we have resources tied
3992 * up that must be freed before returning to the midlayer.
3993 *
3994 * Return value:
3995 *	none
3996 **/
3997static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3998{
3999	struct ipr_cmnd *reset_cmd;
4000	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4001	struct ipr_cmd_pkt *cmd_pkt;
4002	unsigned long lock_flags = 0;
4003
4004	ENTER;
4005	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4006	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4007		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4008		return;
4009	}
4010
4011	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4012	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4013	ipr_cmd->sibling = reset_cmd;
4014	reset_cmd->sibling = ipr_cmd;
4015	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4016	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4017	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4018	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4019	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4020
4021	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4022	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4023	LEAVE;
4024}
4025
4026/**
4027 * ipr_cancel_op - Cancel specified op
4028 * @scsi_cmd:	scsi command struct
4029 *
4030 * This function cancels specified op.
4031 *
4032 * Return value:
4033 *	SUCCESS / FAILED
4034 **/
4035static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4036{
4037	struct ipr_cmnd *ipr_cmd;
4038	struct ipr_ioa_cfg *ioa_cfg;
4039	struct ipr_resource_entry *res;
4040	struct ipr_cmd_pkt *cmd_pkt;
4041	u32 ioasc;
4042	int op_found = 0;
4043
4044	ENTER;
4045	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4046	res = scsi_cmd->device->hostdata;
4047
4048	/* If we are currently going through reset/reload, return failed.
4049	 * This will force the mid-layer to call ipr_eh_host_reset,
4050	 * which will then go to sleep and wait for the reset to complete
4051	 */
4052	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4053		return FAILED;
4054	if (!res || !ipr_is_gscsi(res))
4055		return FAILED;
4056
4057	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4058		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4059			ipr_cmd->done = ipr_scsi_eh_done;
4060			op_found = 1;
4061			break;
4062		}
4063	}
4064
4065	if (!op_found)
4066		return SUCCESS;
4067
4068	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4069	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4070	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4071	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4072	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4073	ipr_cmd->u.sdev = scsi_cmd->device;
4074
4075	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4076		    scsi_cmd->cmnd[0]);
4077	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4078	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4079
4080	/*
4081	 * If the abort task timed out and we sent a bus reset, we will get
4082	 * one the following responses to the abort
4083	 */
4084	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4085		ioasc = 0;
4086		ipr_trace;
4087	}
4088
4089	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4090	if (!ipr_is_naca_model(res))
4091		res->needs_sync_complete = 1;
4092
4093	LEAVE;
4094	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4095}
4096
4097/**
4098 * ipr_eh_abort - Abort a single op
4099 * @scsi_cmd:	scsi command struct
4100 *
4101 * Return value:
4102 * 	SUCCESS / FAILED
4103 **/
4104static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4105{
4106	unsigned long flags;
4107	int rc;
4108
4109	ENTER;
4110
4111	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4112	rc = ipr_cancel_op(scsi_cmd);
4113	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4114
4115	LEAVE;
4116	return rc;
4117}
4118
4119/**
4120 * ipr_handle_other_interrupt - Handle "other" interrupts
4121 * @ioa_cfg:	ioa config struct
4122 * @int_reg:	interrupt register
4123 *
4124 * Return value:
4125 * 	IRQ_NONE / IRQ_HANDLED
4126 **/
4127static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4128					      volatile u32 int_reg)
4129{
4130	irqreturn_t rc = IRQ_HANDLED;
4131
4132	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4133		/* Mask the interrupt */
4134		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4135
4136		/* Clear the interrupt */
4137		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4138		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4139
4140		list_del(&ioa_cfg->reset_cmd->queue);
4141		del_timer(&ioa_cfg->reset_cmd->timer);
4142		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4143	} else {
4144		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4145			ioa_cfg->ioa_unit_checked = 1;
4146		else
4147			dev_err(&ioa_cfg->pdev->dev,
4148				"Permanent IOA failure. 0x%08X\n", int_reg);
4149
4150		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4151			ioa_cfg->sdt_state = GET_DUMP;
4152
4153		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4154		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4155	}
4156
4157	return rc;
4158}
4159
4160/**
4161 * ipr_isr - Interrupt service routine
4162 * @irq:	irq number
4163 * @devp:	pointer to ioa config struct
4164 *
4165 * Return value:
4166 * 	IRQ_NONE / IRQ_HANDLED
4167 **/
4168static irqreturn_t ipr_isr(int irq, void *devp)
4169{
4170	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4171	unsigned long lock_flags = 0;
4172	volatile u32 int_reg, int_mask_reg;
4173	u32 ioasc;
4174	u16 cmd_index;
4175	struct ipr_cmnd *ipr_cmd;
4176	irqreturn_t rc = IRQ_NONE;
4177
4178	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4179
4180	/* If interrupts are disabled, ignore the interrupt */
4181	if (!ioa_cfg->allow_interrupts) {
4182		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4183		return IRQ_NONE;
4184	}
4185
4186	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4187	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4188
4189	/* If an interrupt on the adapter did not occur, ignore it */
4190	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4191		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4192		return IRQ_NONE;
4193	}
4194
4195	while (1) {
4196		ipr_cmd = NULL;
4197
4198		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4199		       ioa_cfg->toggle_bit) {
4200
4201			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4202				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4203
4204			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4205				ioa_cfg->errors_logged++;
4206				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4207
4208				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4209					ioa_cfg->sdt_state = GET_DUMP;
4210
4211				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4212				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4213				return IRQ_HANDLED;
4214			}
4215
4216			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4217
4218			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4219
4220			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4221
4222			list_del(&ipr_cmd->queue);
4223			del_timer(&ipr_cmd->timer);
4224			ipr_cmd->done(ipr_cmd);
4225
4226			rc = IRQ_HANDLED;
4227
4228			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4229				ioa_cfg->hrrq_curr++;
4230			} else {
4231				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4232				ioa_cfg->toggle_bit ^= 1u;
4233			}
4234		}
4235
4236		if (ipr_cmd != NULL) {
4237			/* Clear the PCI interrupt */
4238			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4239			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4240		} else
4241			break;
4242	}
4243
4244	if (unlikely(rc == IRQ_NONE))
4245		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4246
4247	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4248	return rc;
4249}
4250
4251/**
4252 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4253 * @ioa_cfg:	ioa config struct
4254 * @ipr_cmd:	ipr command struct
4255 *
4256 * Return value:
4257 * 	0 on success / -1 on failure
4258 **/
4259static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4260			   struct ipr_cmnd *ipr_cmd)
4261{
4262	int i, nseg;
4263	struct scatterlist *sg;
4264	u32 length;
4265	u32 ioadl_flags = 0;
4266	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4267	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4268	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4269
4270	length = scsi_bufflen(scsi_cmd);
4271	if (!length)
4272		return 0;
4273
4274	nseg = scsi_dma_map(scsi_cmd);
4275	if (nseg < 0) {
4276		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4277		return -1;
4278	}
4279
4280	ipr_cmd->dma_use_sg = nseg;
4281
4282	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4283		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4284		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4285		ioarcb->write_data_transfer_length = cpu_to_be32(length);
4286		ioarcb->write_ioadl_len =
4287			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4288	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4289		ioadl_flags = IPR_IOADL_FLAGS_READ;
4290		ioarcb->read_data_transfer_length = cpu_to_be32(length);
4291		ioarcb->read_ioadl_len =
4292			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4293	}
4294
4295	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4296		ioadl = ioarcb->add_data.u.ioadl;
4297		ioarcb->write_ioadl_addr =
4298			cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4299				    offsetof(struct ipr_ioarcb, add_data));
4300		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4301	}
4302
4303	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4304		ioadl[i].flags_and_data_len =
4305			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4306		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4307	}
4308
4309	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4310	return 0;
4311}
4312
4313/**
4314 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4315 * @scsi_cmd:	scsi command struct
4316 *
4317 * Return value:
4318 * 	task attributes
4319 **/
4320static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4321{
4322	u8 tag[2];
4323	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4324
4325	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4326		switch (tag[0]) {
4327		case MSG_SIMPLE_TAG:
4328			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4329			break;
4330		case MSG_HEAD_TAG:
4331			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4332			break;
4333		case MSG_ORDERED_TAG:
4334			rc = IPR_FLAGS_LO_ORDERED_TASK;
4335			break;
4336		};
4337	}
4338
4339	return rc;
4340}
4341
4342/**
4343 * ipr_erp_done - Process completion of ERP for a device
4344 * @ipr_cmd:		ipr command struct
4345 *
4346 * This function copies the sense buffer into the scsi_cmd
4347 * struct and pushes the scsi_done function.
4348 *
4349 * Return value:
4350 * 	nothing
4351 **/
4352static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4353{
4354	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4355	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4356	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4357	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4358
4359	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4360		scsi_cmd->result |= (DID_ERROR << 16);
4361		scmd_printk(KERN_ERR, scsi_cmd,
4362			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4363	} else {
4364		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4365		       SCSI_SENSE_BUFFERSIZE);
4366	}
4367
4368	if (res) {
4369		if (!ipr_is_naca_model(res))
4370			res->needs_sync_complete = 1;
4371		res->in_erp = 0;
4372	}
4373	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4374	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4375	scsi_cmd->scsi_done(scsi_cmd);
4376}
4377
4378/**
4379 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4380 * @ipr_cmd:	ipr command struct
4381 *
4382 * Return value:
4383 * 	none
4384 **/
4385static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4386{
4387	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4388	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4389	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4390
4391	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4392	ioarcb->write_data_transfer_length = 0;
4393	ioarcb->read_data_transfer_length = 0;
4394	ioarcb->write_ioadl_len = 0;
4395	ioarcb->read_ioadl_len = 0;
4396	ioasa->ioasc = 0;
4397	ioasa->residual_data_len = 0;
4398	ioarcb->write_ioadl_addr =
4399		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4400	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4401}
4402
4403/**
4404 * ipr_erp_request_sense - Send request sense to a device
4405 * @ipr_cmd:	ipr command struct
4406 *
4407 * This function sends a request sense to a device as a result
4408 * of a check condition.
4409 *
4410 * Return value:
4411 * 	nothing
4412 **/
4413static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4414{
4415	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4416	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4417
4418	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4419		ipr_erp_done(ipr_cmd);
4420		return;
4421	}
4422
4423	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4424
4425	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4426	cmd_pkt->cdb[0] = REQUEST_SENSE;
4427	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4428	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4429	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4430	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4431
4432	ipr_cmd->ioadl[0].flags_and_data_len =
4433		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4434	ipr_cmd->ioadl[0].address =
4435		cpu_to_be32(ipr_cmd->sense_buffer_dma);
4436
4437	ipr_cmd->ioarcb.read_ioadl_len =
4438		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4439	ipr_cmd->ioarcb.read_data_transfer_length =
4440		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4441
4442	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4443		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4444}
4445
4446/**
4447 * ipr_erp_cancel_all - Send cancel all to a device
4448 * @ipr_cmd:	ipr command struct
4449 *
4450 * This function sends a cancel all to a device to clear the
4451 * queue. If we are running TCQ on the device, QERR is set to 1,
4452 * which means all outstanding ops have been dropped on the floor.
4453 * Cancel all will return them to us.
4454 *
4455 * Return value:
4456 * 	nothing
4457 **/
4458static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4459{
4460	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4461	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4462	struct ipr_cmd_pkt *cmd_pkt;
4463
4464	res->in_erp = 1;
4465
4466	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4467
4468	if (!scsi_get_tag_type(scsi_cmd->device)) {
4469		ipr_erp_request_sense(ipr_cmd);
4470		return;
4471	}
4472
4473	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4474	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4475	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4476
4477	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4478		   IPR_CANCEL_ALL_TIMEOUT);
4479}
4480
4481/**
4482 * ipr_dump_ioasa - Dump contents of IOASA
4483 * @ioa_cfg:	ioa config struct
4484 * @ipr_cmd:	ipr command struct
4485 * @res:		resource entry struct
4486 *
4487 * This function is invoked by the interrupt handler when ops
4488 * fail. It will log the IOASA if appropriate. Only called
4489 * for GPDD ops.
4490 *
4491 * Return value:
4492 * 	none
4493 **/
4494static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4495			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4496{
4497	int i;
4498	u16 data_len;
4499	u32 ioasc, fd_ioasc;
4500	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4501	__be32 *ioasa_data = (__be32 *)ioasa;
4502	int error_index;
4503
4504	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4505	fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4506
4507	if (0 == ioasc)
4508		return;
4509
4510	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4511		return;
4512
4513	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4514		error_index = ipr_get_error(fd_ioasc);
4515	else
4516		error_index = ipr_get_error(ioasc);
4517
4518	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4519		/* Don't log an error if the IOA already logged one */
4520		if (ioasa->ilid != 0)
4521			return;
4522
4523		if (!ipr_is_gscsi(res))
4524			return;
4525
4526		if (ipr_error_table[error_index].log_ioasa == 0)
4527			return;
4528	}
4529
4530	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4531
4532	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4533		data_len = sizeof(struct ipr_ioasa);
4534	else
4535		data_len = be16_to_cpu(ioasa->ret_stat_len);
4536
4537	ipr_err("IOASA Dump:\n");
4538
4539	for (i = 0; i < data_len / 4; i += 4) {
4540		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4541			be32_to_cpu(ioasa_data[i]),
4542			be32_to_cpu(ioasa_data[i+1]),
4543			be32_to_cpu(ioasa_data[i+2]),
4544			be32_to_cpu(ioasa_data[i+3]));
4545	}
4546}
4547
4548/**
4549 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4550 * @ioasa:		IOASA
4551 * @sense_buf:	sense data buffer
4552 *
4553 * Return value:
4554 * 	none
4555 **/
4556static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4557{
4558	u32 failing_lba;
4559	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4560	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4561	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4562	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4563
4564	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4565
4566	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4567		return;
4568
4569	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4570
4571	if (ipr_is_vset_device(res) &&
4572	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4573	    ioasa->u.vset.failing_lba_hi != 0) {
4574		sense_buf[0] = 0x72;
4575		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4576		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4577		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4578
4579		sense_buf[7] = 12;
4580		sense_buf[8] = 0;
4581		sense_buf[9] = 0x0A;
4582		sense_buf[10] = 0x80;
4583
4584		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4585
4586		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4587		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4588		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4589		sense_buf[15] = failing_lba & 0x000000ff;
4590
4591		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4592
4593		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4594		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4595		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4596		sense_buf[19] = failing_lba & 0x000000ff;
4597	} else {
4598		sense_buf[0] = 0x70;
4599		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4600		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4601		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4602
4603		/* Illegal request */
4604		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4605		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4606			sense_buf[7] = 10;	/* additional length */
4607
4608			/* IOARCB was in error */
4609			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4610				sense_buf[15] = 0xC0;
4611			else	/* Parameter data was invalid */
4612				sense_buf[15] = 0x80;
4613
4614			sense_buf[16] =
4615			    ((IPR_FIELD_POINTER_MASK &
4616			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4617			sense_buf[17] =
4618			    (IPR_FIELD_POINTER_MASK &
4619			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4620		} else {
4621			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4622				if (ipr_is_vset_device(res))
4623					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4624				else
4625					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4626
4627				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4628				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4629				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4630				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4631				sense_buf[6] = failing_lba & 0x000000ff;
4632			}
4633
4634			sense_buf[7] = 6;	/* additional length */
4635		}
4636	}
4637}
4638
4639/**
4640 * ipr_get_autosense - Copy autosense data to sense buffer
4641 * @ipr_cmd:	ipr command struct
4642 *
4643 * This function copies the autosense buffer to the buffer
4644 * in the scsi_cmd, if there is autosense available.
4645 *
4646 * Return value:
4647 *	1 if autosense was available / 0 if not
4648 **/
4649static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4650{
4651	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4652
4653	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4654		return 0;
4655
4656	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4657	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4658		   SCSI_SENSE_BUFFERSIZE));
4659	return 1;
4660}
4661
4662/**
4663 * ipr_erp_start - Process an error response for a SCSI op
4664 * @ioa_cfg:	ioa config struct
4665 * @ipr_cmd:	ipr command struct
4666 *
4667 * This function determines whether or not to initiate ERP
4668 * on the affected device.
4669 *
4670 * Return value:
4671 * 	nothing
4672 **/
4673static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4674			      struct ipr_cmnd *ipr_cmd)
4675{
4676	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4677	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4678	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4679	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4680
4681	if (!res) {
4682		ipr_scsi_eh_done(ipr_cmd);
4683		return;
4684	}
4685
4686	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4687		ipr_gen_sense(ipr_cmd);
4688
4689	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4690
4691	switch (masked_ioasc) {
4692	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4693		if (ipr_is_naca_model(res))
4694			scsi_cmd->result |= (DID_ABORT << 16);
4695		else
4696			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4697		break;
4698	case IPR_IOASC_IR_RESOURCE_HANDLE:
4699	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4700		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4701		break;
4702	case IPR_IOASC_HW_SEL_TIMEOUT:
4703		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4704		if (!ipr_is_naca_model(res))
4705			res->needs_sync_complete = 1;
4706		break;
4707	case IPR_IOASC_SYNC_REQUIRED:
4708		if (!res->in_erp)
4709			res->needs_sync_complete = 1;
4710		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4711		break;
4712	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4713	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4714		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4715		break;
4716	case IPR_IOASC_BUS_WAS_RESET:
4717	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4718		/*
4719		 * Report the bus reset and ask for a retry. The device
4720		 * will give CC/UA the next command.
4721		 */
4722		if (!res->resetting_device)
4723			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4724		scsi_cmd->result |= (DID_ERROR << 16);
4725		if (!ipr_is_naca_model(res))
4726			res->needs_sync_complete = 1;
4727		break;
4728	case IPR_IOASC_HW_DEV_BUS_STATUS:
4729		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4730		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4731			if (!ipr_get_autosense(ipr_cmd)) {
4732				if (!ipr_is_naca_model(res)) {
4733					ipr_erp_cancel_all(ipr_cmd);
4734					return;
4735				}
4736			}
4737		}
4738		if (!ipr_is_naca_model(res))
4739			res->needs_sync_complete = 1;
4740		break;
4741	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4742		break;
4743	default:
4744		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4745			scsi_cmd->result |= (DID_ERROR << 16);
4746		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4747			res->needs_sync_complete = 1;
4748		break;
4749	}
4750
4751	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4752	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4753	scsi_cmd->scsi_done(scsi_cmd);
4754}
4755
4756/**
4757 * ipr_scsi_done - mid-layer done function
4758 * @ipr_cmd:	ipr command struct
4759 *
4760 * This function is invoked by the interrupt handler for
4761 * ops generated by the SCSI mid-layer
4762 *
4763 * Return value:
4764 * 	none
4765 **/
4766static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4767{
4768	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4769	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4770	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4771
4772	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4773
4774	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4775		scsi_dma_unmap(ipr_cmd->scsi_cmd);
4776		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4777		scsi_cmd->scsi_done(scsi_cmd);
4778	} else
4779		ipr_erp_start(ioa_cfg, ipr_cmd);
4780}
4781
4782/**
4783 * ipr_queuecommand - Queue a mid-layer request
4784 * @scsi_cmd:	scsi command struct
4785 * @done:		done function
4786 *
4787 * This function queues a request generated by the mid-layer.
4788 *
4789 * Return value:
4790 *	0 on success
4791 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4792 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4793 **/
4794static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4795			    void (*done) (struct scsi_cmnd *))
4796{
4797	struct ipr_ioa_cfg *ioa_cfg;
4798	struct ipr_resource_entry *res;
4799	struct ipr_ioarcb *ioarcb;
4800	struct ipr_cmnd *ipr_cmd;
4801	int rc = 0;
4802
4803	scsi_cmd->scsi_done = done;
4804	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4805	res = scsi_cmd->device->hostdata;
4806	scsi_cmd->result = (DID_OK << 16);
4807
4808	/*
4809	 * We are currently blocking all devices due to a host reset
4810	 * We have told the host to stop giving us new requests, but
4811	 * ERP ops don't count. FIXME
4812	 */
4813	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4814		return SCSI_MLQUEUE_HOST_BUSY;
4815
4816	/*
4817	 * FIXME - Create scsi_set_host_offline interface
4818	 *  and the ioa_is_dead check can be removed
4819	 */
4820	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4821		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4822		scsi_cmd->result = (DID_NO_CONNECT << 16);
4823		scsi_cmd->scsi_done(scsi_cmd);
4824		return 0;
4825	}
4826
4827	if (ipr_is_gata(res) && res->sata_port)
4828		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4829
4830	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4831	ioarcb = &ipr_cmd->ioarcb;
4832	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4833
4834	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4835	ipr_cmd->scsi_cmd = scsi_cmd;
4836	ioarcb->res_handle = res->cfgte.res_handle;
4837	ipr_cmd->done = ipr_scsi_done;
4838	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4839
4840	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4841		if (scsi_cmd->underflow == 0)
4842			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4843
4844		if (res->needs_sync_complete) {
4845			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4846			res->needs_sync_complete = 0;
4847		}
4848
4849		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4850		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4851		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4852		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4853	}
4854
4855	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4856	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4857		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4858
4859	if (likely(rc == 0))
4860		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4861
4862	if (likely(rc == 0)) {
4863		mb();
4864		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4865		       ioa_cfg->regs.ioarrin_reg);
4866	} else {
4867		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4868		 return SCSI_MLQUEUE_HOST_BUSY;
4869	}
4870
4871	return 0;
4872}
4873
4874/**
4875 * ipr_ioctl - IOCTL handler
4876 * @sdev:	scsi device struct
4877 * @cmd:	IOCTL cmd
4878 * @arg:	IOCTL arg
4879 *
4880 * Return value:
4881 * 	0 on success / other on failure
4882 **/
4883static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4884{
4885	struct ipr_resource_entry *res;
4886
4887	res = (struct ipr_resource_entry *)sdev->hostdata;
4888	if (res && ipr_is_gata(res))
4889		return ata_scsi_ioctl(sdev, cmd, arg);
4890
4891	return -EINVAL;
4892}
4893
4894/**
4895 * ipr_info - Get information about the card/driver
4896 * @scsi_host:	scsi host struct
4897 *
4898 * Return value:
4899 * 	pointer to buffer with description string
4900 **/
4901static const char * ipr_ioa_info(struct Scsi_Host *host)
4902{
4903	static char buffer[512];
4904	struct ipr_ioa_cfg *ioa_cfg;
4905	unsigned long lock_flags = 0;
4906
4907	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4908
4909	spin_lock_irqsave(host->host_lock, lock_flags);
4910	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4911	spin_unlock_irqrestore(host->host_lock, lock_flags);
4912
4913	return buffer;
4914}
4915
4916static struct scsi_host_template driver_template = {
4917	.module = THIS_MODULE,
4918	.name = "IPR",
4919	.info = ipr_ioa_info,
4920	.ioctl = ipr_ioctl,
4921	.queuecommand = ipr_queuecommand,
4922	.eh_abort_handler = ipr_eh_abort,
4923	.eh_device_reset_handler = ipr_eh_dev_reset,
4924	.eh_host_reset_handler = ipr_eh_host_reset,
4925	.slave_alloc = ipr_slave_alloc,
4926	.slave_configure = ipr_slave_configure,
4927	.slave_destroy = ipr_slave_destroy,
4928	.target_alloc = ipr_target_alloc,
4929	.target_destroy = ipr_target_destroy,
4930	.change_queue_depth = ipr_change_queue_depth,
4931	.change_queue_type = ipr_change_queue_type,
4932	.bios_param = ipr_biosparam,
4933	.can_queue = IPR_MAX_COMMANDS,
4934	.this_id = -1,
4935	.sg_tablesize = IPR_MAX_SGLIST,
4936	.max_sectors = IPR_IOA_MAX_SECTORS,
4937	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4938	.use_clustering = ENABLE_CLUSTERING,
4939	.shost_attrs = ipr_ioa_attrs,
4940	.sdev_attrs = ipr_dev_attrs,
4941	.proc_name = IPR_NAME
4942};
4943
4944/**
4945 * ipr_ata_phy_reset - libata phy_reset handler
4946 * @ap:		ata port to reset
4947 *
4948 **/
4949static void ipr_ata_phy_reset(struct ata_port *ap)
4950{
4951	unsigned long flags;
4952	struct ipr_sata_port *sata_port = ap->private_data;
4953	struct ipr_resource_entry *res = sata_port->res;
4954	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4955	int rc;
4956
4957	ENTER;
4958	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4959	while(ioa_cfg->in_reset_reload) {
4960		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4961		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4962		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4963	}
4964
4965	if (!ioa_cfg->allow_cmds)
4966		goto out_unlock;
4967
4968	rc = ipr_device_reset(ioa_cfg, res);
4969
4970	if (rc) {
4971		ap->ops->port_disable(ap);
4972		goto out_unlock;
4973	}
4974
4975	switch(res->cfgte.proto) {
4976	case IPR_PROTO_SATA:
4977	case IPR_PROTO_SAS_STP:
4978		ap->device[0].class = ATA_DEV_ATA;
4979		break;
4980	case IPR_PROTO_SATA_ATAPI:
4981	case IPR_PROTO_SAS_STP_ATAPI:
4982		ap->device[0].class = ATA_DEV_ATAPI;
4983		break;
4984	default:
4985		ap->device[0].class = ATA_DEV_UNKNOWN;
4986		ap->ops->port_disable(ap);
4987		break;
4988	};
4989
4990out_unlock:
4991	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4992	LEAVE;
4993}
4994
4995/**
4996 * ipr_ata_post_internal - Cleanup after an internal command
4997 * @qc:	ATA queued command
4998 *
4999 * Return value:
5000 * 	none
5001 **/
5002static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5003{
5004	struct ipr_sata_port *sata_port = qc->ap->private_data;
5005	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5006	struct ipr_cmnd *ipr_cmd;
5007	unsigned long flags;
5008
5009	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5010	while(ioa_cfg->in_reset_reload) {
5011		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5012		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5013		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5014	}
5015
5016	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5017		if (ipr_cmd->qc == qc) {
5018			ipr_device_reset(ioa_cfg, sata_port->res);
5019			break;
5020		}
5021	}
5022	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5023}
5024
5025/**
5026 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5027 * @ap:	ATA port
5028 * @tf:	destination ATA taskfile
5029 *
5030 * Return value:
5031 * 	none
5032 **/
5033static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5034{
5035	struct ipr_sata_port *sata_port = ap->private_data;
5036	struct ipr_ioasa_gata *g = &sata_port->ioasa;
5037
5038	tf->feature = g->error;
5039	tf->nsect = g->nsect;
5040	tf->lbal = g->lbal;
5041	tf->lbam = g->lbam;
5042	tf->lbah = g->lbah;
5043	tf->device = g->device;
5044	tf->command = g->status;
5045	tf->hob_nsect = g->hob_nsect;
5046	tf->hob_lbal = g->hob_lbal;
5047	tf->hob_lbam = g->hob_lbam;
5048	tf->hob_lbah = g->hob_lbah;
5049	tf->ctl = g->alt_status;
5050}
5051
5052/**
5053 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5054 * @regs:	destination
5055 * @tf:	source ATA taskfile
5056 *
5057 * Return value:
5058 * 	none
5059 **/
5060static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5061			     struct ata_taskfile *tf)
5062{
5063	regs->feature = tf->feature;
5064	regs->nsect = tf->nsect;
5065	regs->lbal = tf->lbal;
5066	regs->lbam = tf->lbam;
5067	regs->lbah = tf->lbah;
5068	regs->device = tf->device;
5069	regs->command = tf->command;
5070	regs->hob_feature = tf->hob_feature;
5071	regs->hob_nsect = tf->hob_nsect;
5072	regs->hob_lbal = tf->hob_lbal;
5073	regs->hob_lbam = tf->hob_lbam;
5074	regs->hob_lbah = tf->hob_lbah;
5075	regs->ctl = tf->ctl;
5076}
5077
5078/**
5079 * ipr_sata_done - done function for SATA commands
5080 * @ipr_cmd:	ipr command struct
5081 *
5082 * This function is invoked by the interrupt handler for
5083 * ops generated by the SCSI mid-layer to SATA devices
5084 *
5085 * Return value:
5086 * 	none
5087 **/
5088static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5089{
5090	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5091	struct ata_queued_cmd *qc = ipr_cmd->qc;
5092	struct ipr_sata_port *sata_port = qc->ap->private_data;
5093	struct ipr_resource_entry *res = sata_port->res;
5094	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5095
5096	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5097	       sizeof(struct ipr_ioasa_gata));
5098	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5099
5100	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5101		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5102					 res->cfgte.res_addr.target);
5103
5104	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5105		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5106	else
5107		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5108	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5109	ata_qc_complete(qc);
5110}
5111
5112/**
5113 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5114 * @ipr_cmd:	ipr command struct
5115 * @qc:		ATA queued command
5116 *
5117 **/
5118static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5119				struct ata_queued_cmd *qc)
5120{
5121	u32 ioadl_flags = 0;
5122	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5123	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5124	int len = qc->nbytes + qc->pad_len;
5125	struct scatterlist *sg;
5126
5127	if (len == 0)
5128		return;
5129
5130	if (qc->dma_dir == DMA_TO_DEVICE) {
5131		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5132		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5133		ioarcb->write_data_transfer_length = cpu_to_be32(len);
5134		ioarcb->write_ioadl_len =
5135			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5136	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5137		ioadl_flags = IPR_IOADL_FLAGS_READ;
5138		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5139		ioarcb->read_ioadl_len =
5140			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5141	}
5142
5143	ata_for_each_sg(sg, qc) {
5144		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5145		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5146		if (ata_sg_is_last(sg, qc))
5147			ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5148		else
5149			ioadl++;
5150	}
5151}
5152
5153/**
5154 * ipr_qc_issue - Issue a SATA qc to a device
5155 * @qc:	queued command
5156 *
5157 * Return value:
5158 * 	0 if success
5159 **/
5160static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5161{
5162	struct ata_port *ap = qc->ap;
5163	struct ipr_sata_port *sata_port = ap->private_data;
5164	struct ipr_resource_entry *res = sata_port->res;
5165	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5166	struct ipr_cmnd *ipr_cmd;
5167	struct ipr_ioarcb *ioarcb;
5168	struct ipr_ioarcb_ata_regs *regs;
5169
5170	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5171		return AC_ERR_SYSTEM;
5172
5173	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5174	ioarcb = &ipr_cmd->ioarcb;
5175	regs = &ioarcb->add_data.u.regs;
5176
5177	memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5178	ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5179
5180	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5181	ipr_cmd->qc = qc;
5182	ipr_cmd->done = ipr_sata_done;
5183	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5184	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5185	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5186	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5187	ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5188
5189	ipr_build_ata_ioadl(ipr_cmd, qc);
5190	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5191	ipr_copy_sata_tf(regs, &qc->tf);
5192	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5193	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5194
5195	switch (qc->tf.protocol) {
5196	case ATA_PROT_NODATA:
5197	case ATA_PROT_PIO:
5198		break;
5199
5200	case ATA_PROT_DMA:
5201		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5202		break;
5203
5204	case ATA_PROT_ATAPI:
5205	case ATA_PROT_ATAPI_NODATA:
5206		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5207		break;
5208
5209	case ATA_PROT_ATAPI_DMA:
5210		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5211		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5212		break;
5213
5214	default:
5215		WARN_ON(1);
5216		return AC_ERR_INVALID;
5217	}
5218
5219	mb();
5220	writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5221	       ioa_cfg->regs.ioarrin_reg);
5222	return 0;
5223}
5224
5225/**
5226 * ipr_ata_check_status - Return last ATA status
5227 * @ap:	ATA port
5228 *
5229 * Return value:
5230 * 	ATA status
5231 **/
5232static u8 ipr_ata_check_status(struct ata_port *ap)
5233{
5234	struct ipr_sata_port *sata_port = ap->private_data;
5235	return sata_port->ioasa.status;
5236}
5237
5238/**
5239 * ipr_ata_check_altstatus - Return last ATA altstatus
5240 * @ap:	ATA port
5241 *
5242 * Return value:
5243 * 	Alt ATA status
5244 **/
5245static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5246{
5247	struct ipr_sata_port *sata_port = ap->private_data;
5248	return sata_port->ioasa.alt_status;
5249}
5250
5251static struct ata_port_operations ipr_sata_ops = {
5252	.port_disable = ata_port_disable,
5253	.check_status = ipr_ata_check_status,
5254	.check_altstatus = ipr_ata_check_altstatus,
5255	.dev_select = ata_noop_dev_select,
5256	.phy_reset = ipr_ata_phy_reset,
5257	.post_internal_cmd = ipr_ata_post_internal,
5258	.tf_read = ipr_tf_read,
5259	.qc_prep = ata_noop_qc_prep,
5260	.qc_issue = ipr_qc_issue,
5261	.port_start = ata_sas_port_start,
5262	.port_stop = ata_sas_port_stop
5263};
5264
5265static struct ata_port_info sata_port_info = {
5266	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5267	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5268	.pio_mask	= 0x10, /* pio4 */
5269	.mwdma_mask = 0x07,
5270	.udma_mask	= 0x7f, /* udma0-6 */
5271	.port_ops	= &ipr_sata_ops
5272};
5273
5274#ifdef CONFIG_PPC_PSERIES
5275static const u16 ipr_blocked_processors[] = {
5276	PV_NORTHSTAR,
5277	PV_PULSAR,
5278	PV_POWER4,
5279	PV_ICESTAR,
5280	PV_SSTAR,
5281	PV_POWER4p,
5282	PV_630,
5283	PV_630p
5284};
5285
5286/**
5287 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5288 * @ioa_cfg:	ioa cfg struct
5289 *
5290 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5291 * certain pSeries hardware. This function determines if the given
5292 * adapter is in one of these confgurations or not.
5293 *
5294 * Return value:
5295 * 	1 if adapter is not supported / 0 if adapter is supported
5296 **/
5297static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5298{
5299	u8 rev_id;
5300	int i;
5301
5302	if (ioa_cfg->type == 0x5702) {
5303		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5304					 &rev_id) == PCIBIOS_SUCCESSFUL) {
5305			if (rev_id < 4) {
5306				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5307					if (__is_processor(ipr_blocked_processors[i]))
5308						return 1;
5309				}
5310			}
5311		}
5312	}
5313	return 0;
5314}
5315#else
5316#define ipr_invalid_adapter(ioa_cfg) 0
5317#endif
5318
5319/**
5320 * ipr_ioa_bringdown_done - IOA bring down completion.
5321 * @ipr_cmd:	ipr command struct
5322 *
5323 * This function processes the completion of an adapter bring down.
5324 * It wakes any reset sleepers.
5325 *
5326 * Return value:
5327 * 	IPR_RC_JOB_RETURN
5328 **/
5329static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5330{
5331	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5332
5333	ENTER;
5334	ioa_cfg->in_reset_reload = 0;
5335	ioa_cfg->reset_retries = 0;
5336	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5337	wake_up_all(&ioa_cfg->reset_wait_q);
5338
5339	spin_unlock_irq(ioa_cfg->host->host_lock);
5340	scsi_unblock_requests(ioa_cfg->host);
5341	spin_lock_irq(ioa_cfg->host->host_lock);
5342	LEAVE;
5343
5344	return IPR_RC_JOB_RETURN;
5345}
5346
5347/**
5348 * ipr_ioa_reset_done - IOA reset completion.
5349 * @ipr_cmd:	ipr command struct
5350 *
5351 * This function processes the completion of an adapter reset.
5352 * It schedules any necessary mid-layer add/removes and
5353 * wakes any reset sleepers.
5354 *
5355 * Return value:
5356 * 	IPR_RC_JOB_RETURN
5357 **/
5358static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5359{
5360	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5361	struct ipr_resource_entry *res;
5362	struct ipr_hostrcb *hostrcb, *temp;
5363	int i = 0;
5364
5365	ENTER;
5366	ioa_cfg->in_reset_reload = 0;
5367	ioa_cfg->allow_cmds = 1;
5368	ioa_cfg->reset_cmd = NULL;
5369	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5370
5371	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5372		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5373			ipr_trace;
5374			break;
5375		}
5376	}
5377	schedule_work(&ioa_cfg->work_q);
5378
5379	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5380		list_del(&hostrcb->queue);
5381		if (i++ < IPR_NUM_LOG_HCAMS)
5382			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5383		else
5384			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5385	}
5386
5387	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5388	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5389
5390	ioa_cfg->reset_retries = 0;
5391	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5392	wake_up_all(&ioa_cfg->reset_wait_q);
5393
5394	spin_unlock_irq(ioa_cfg->host->host_lock);
5395	scsi_unblock_requests(ioa_cfg->host);
5396	spin_lock_irq(ioa_cfg->host->host_lock);
5397
5398	if (!ioa_cfg->allow_cmds)
5399		scsi_block_requests(ioa_cfg->host);
5400
5401	LEAVE;
5402	return IPR_RC_JOB_RETURN;
5403}
5404
5405/**
5406 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5407 * @supported_dev:	supported device struct
5408 * @vpids:			vendor product id struct
5409 *
5410 * Return value:
5411 * 	none
5412 **/
5413static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5414				 struct ipr_std_inq_vpids *vpids)
5415{
5416	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5417	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5418	supported_dev->num_records = 1;
5419	supported_dev->data_length =
5420		cpu_to_be16(sizeof(struct ipr_supported_device));
5421	supported_dev->reserved = 0;
5422}
5423
5424/**
5425 * ipr_set_supported_devs - Send Set Supported Devices for a device
5426 * @ipr_cmd:	ipr command struct
5427 *
5428 * This function send a Set Supported Devices to the adapter
5429 *
5430 * Return value:
5431 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5432 **/
5433static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5434{
5435	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5436	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5437	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5438	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5439	struct ipr_resource_entry *res = ipr_cmd->u.res;
5440
5441	ipr_cmd->job_step = ipr_ioa_reset_done;
5442
5443	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5444		if (!ipr_is_scsi_disk(res))
5445			continue;
5446
5447		ipr_cmd->u.res = res;
5448		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5449
5450		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5451		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5452		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5453
5454		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5455		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5456		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5457
5458		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5459							sizeof(struct ipr_supported_device));
5460		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5461					     offsetof(struct ipr_misc_cbs, supp_dev));
5462		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5463		ioarcb->write_data_transfer_length =
5464			cpu_to_be32(sizeof(struct ipr_supported_device));
5465
5466		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5467			   IPR_SET_SUP_DEVICE_TIMEOUT);
5468
5469		ipr_cmd->job_step = ipr_set_supported_devs;
5470		return IPR_RC_JOB_RETURN;
5471	}
5472
5473	return IPR_RC_JOB_CONTINUE;
5474}
5475
5476/**
5477 * ipr_setup_write_cache - Disable write cache if needed
5478 * @ipr_cmd:	ipr command struct
5479 *
5480 * This function sets up adapters write cache to desired setting
5481 *
5482 * Return value:
5483 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5484 **/
5485static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5486{
5487	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5488
5489	ipr_cmd->job_step = ipr_set_supported_devs;
5490	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5491				    struct ipr_resource_entry, queue);
5492
5493	if (ioa_cfg->cache_state != CACHE_DISABLED)
5494		return IPR_RC_JOB_CONTINUE;
5495
5496	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5497	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5498	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5499	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5500
5501	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5502
5503	return IPR_RC_JOB_RETURN;
5504}
5505
5506/**
5507 * ipr_get_mode_page - Locate specified mode page
5508 * @mode_pages:	mode page buffer
5509 * @page_code:	page code to find
5510 * @len:		minimum required length for mode page
5511 *
5512 * Return value:
5513 * 	pointer to mode page / NULL on failure
5514 **/
5515static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5516			       u32 page_code, u32 len)
5517{
5518	struct ipr_mode_page_hdr *mode_hdr;
5519	u32 page_length;
5520	u32 length;
5521
5522	if (!mode_pages || (mode_pages->hdr.length == 0))
5523		return NULL;
5524
5525	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5526	mode_hdr = (struct ipr_mode_page_hdr *)
5527		(mode_pages->data + mode_pages->hdr.block_desc_len);
5528
5529	while (length) {
5530		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5531			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5532				return mode_hdr;
5533			break;
5534		} else {
5535			page_length = (sizeof(struct ipr_mode_page_hdr) +
5536				       mode_hdr->page_length);
5537			length -= page_length;
5538			mode_hdr = (struct ipr_mode_page_hdr *)
5539				((unsigned long)mode_hdr + page_length);
5540		}
5541	}
5542	return NULL;
5543}
5544
5545/**
5546 * ipr_check_term_power - Check for term power errors
5547 * @ioa_cfg:	ioa config struct
5548 * @mode_pages:	IOAFP mode pages buffer
5549 *
5550 * Check the IOAFP's mode page 28 for term power errors
5551 *
5552 * Return value:
5553 * 	nothing
5554 **/
5555static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5556				 struct ipr_mode_pages *mode_pages)
5557{
5558	int i;
5559	int entry_length;
5560	struct ipr_dev_bus_entry *bus;
5561	struct ipr_mode_page28 *mode_page;
5562
5563	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5564				      sizeof(struct ipr_mode_page28));
5565
5566	entry_length = mode_page->entry_length;
5567
5568	bus = mode_page->bus;
5569
5570	for (i = 0; i < mode_page->num_entries; i++) {
5571		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5572			dev_err(&ioa_cfg->pdev->dev,
5573				"Term power is absent on scsi bus %d\n",
5574				bus->res_addr.bus);
5575		}
5576
5577		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5578	}
5579}
5580
5581/**
5582 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5583 * @ioa_cfg:	ioa config struct
5584 *
5585 * Looks through the config table checking for SES devices. If
5586 * the SES device is in the SES table indicating a maximum SCSI
5587 * bus speed, the speed is limited for the bus.
5588 *
5589 * Return value:
5590 * 	none
5591 **/
5592static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5593{
5594	u32 max_xfer_rate;
5595	int i;
5596
5597	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5598		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5599						       ioa_cfg->bus_attr[i].bus_width);
5600
5601		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5602			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5603	}
5604}
5605
5606/**
5607 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5608 * @ioa_cfg:	ioa config struct
5609 * @mode_pages:	mode page 28 buffer
5610 *
5611 * Updates mode page 28 based on driver configuration
5612 *
5613 * Return value:
5614 * 	none
5615 **/
5616static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5617					  	struct ipr_mode_pages *mode_pages)
5618{
5619	int i, entry_length;
5620	struct ipr_dev_bus_entry *bus;
5621	struct ipr_bus_attributes *bus_attr;
5622	struct ipr_mode_page28 *mode_page;
5623
5624	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5625				      sizeof(struct ipr_mode_page28));
5626
5627	entry_length = mode_page->entry_length;
5628
5629	/* Loop for each device bus entry */
5630	for (i = 0, bus = mode_page->bus;
5631	     i < mode_page->num_entries;
5632	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5633		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5634			dev_err(&ioa_cfg->pdev->dev,
5635				"Invalid resource address reported: 0x%08X\n",
5636				IPR_GET_PHYS_LOC(bus->res_addr));
5637			continue;
5638		}
5639
5640		bus_attr = &ioa_cfg->bus_attr[i];
5641		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5642		bus->bus_width = bus_attr->bus_width;
5643		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5644		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5645		if (bus_attr->qas_enabled)
5646			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5647		else
5648			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5649	}
5650}
5651
5652/**
5653 * ipr_build_mode_select - Build a mode select command
5654 * @ipr_cmd:	ipr command struct
5655 * @res_handle:	resource handle to send command to
5656 * @parm:		Byte 2 of Mode Sense command
5657 * @dma_addr:	DMA buffer address
5658 * @xfer_len:	data transfer length
5659 *
5660 * Return value:
5661 * 	none
5662 **/
5663static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5664				  __be32 res_handle, u8 parm, u32 dma_addr,
5665				  u8 xfer_len)
5666{
5667	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5668	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5669
5670	ioarcb->res_handle = res_handle;
5671	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5672	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5673	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5674	ioarcb->cmd_pkt.cdb[1] = parm;
5675	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5676
5677	ioadl->flags_and_data_len =
5678		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5679	ioadl->address = cpu_to_be32(dma_addr);
5680	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5681	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5682}
5683
5684/**
5685 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5686 * @ipr_cmd:	ipr command struct
5687 *
5688 * This function sets up the SCSI bus attributes and sends
5689 * a Mode Select for Page 28 to activate them.
5690 *
5691 * Return value:
5692 * 	IPR_RC_JOB_RETURN
5693 **/
5694static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5695{
5696	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5697	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5698	int length;
5699
5700	ENTER;
5701	ipr_scsi_bus_speed_limit(ioa_cfg);
5702	ipr_check_term_power(ioa_cfg, mode_pages);
5703	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5704	length = mode_pages->hdr.length + 1;
5705	mode_pages->hdr.length = 0;
5706
5707	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5708			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5709			      length);
5710
5711	ipr_cmd->job_step = ipr_setup_write_cache;
5712	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5713
5714	LEAVE;
5715	return IPR_RC_JOB_RETURN;
5716}
5717
5718/**
5719 * ipr_build_mode_sense - Builds a mode sense command
5720 * @ipr_cmd:	ipr command struct
5721 * @res:		resource entry struct
5722 * @parm:		Byte 2 of mode sense command
5723 * @dma_addr:	DMA address of mode sense buffer
5724 * @xfer_len:	Size of DMA buffer
5725 *
5726 * Return value:
5727 * 	none
5728 **/
5729static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5730				 __be32 res_handle,
5731				 u8 parm, u32 dma_addr, u8 xfer_len)
5732{
5733	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5734	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5735
5736	ioarcb->res_handle = res_handle;
5737	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5738	ioarcb->cmd_pkt.cdb[2] = parm;
5739	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5740	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5741
5742	ioadl->flags_and_data_len =
5743		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5744	ioadl->address = cpu_to_be32(dma_addr);
5745	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5746	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5747}
5748
5749/**
5750 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5751 * @ipr_cmd:	ipr command struct
5752 *
5753 * This function handles the failure of an IOA bringup command.
5754 *
5755 * Return value:
5756 * 	IPR_RC_JOB_RETURN
5757 **/
5758static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5759{
5760	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5761	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5762
5763	dev_err(&ioa_cfg->pdev->dev,
5764		"0x%02X failed with IOASC: 0x%08X\n",
5765		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5766
5767	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5768	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5769	return IPR_RC_JOB_RETURN;
5770}
5771
5772/**
5773 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5774 * @ipr_cmd:	ipr command struct
5775 *
5776 * This function handles the failure of a Mode Sense to the IOAFP.
5777 * Some adapters do not handle all mode pages.
5778 *
5779 * Return value:
5780 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5781 **/
5782static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5783{
5784	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5785
5786	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5787		ipr_cmd->job_step = ipr_setup_write_cache;
5788		return IPR_RC_JOB_CONTINUE;
5789	}
5790
5791	return ipr_reset_cmd_failed(ipr_cmd);
5792}
5793
5794/**
5795 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5796 * @ipr_cmd:	ipr command struct
5797 *
5798 * This function send a Page 28 mode sense to the IOA to
5799 * retrieve SCSI bus attributes.
5800 *
5801 * Return value:
5802 * 	IPR_RC_JOB_RETURN
5803 **/
5804static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5805{
5806	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5807
5808	ENTER;
5809	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5810			     0x28, ioa_cfg->vpd_cbs_dma +
5811			     offsetof(struct ipr_misc_cbs, mode_pages),
5812			     sizeof(struct ipr_mode_pages));
5813
5814	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5815	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5816
5817	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5818
5819	LEAVE;
5820	return IPR_RC_JOB_RETURN;
5821}
5822
5823/**
5824 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
5825 * @ipr_cmd:	ipr command struct
5826 *
5827 * This function enables dual IOA RAID support if possible.
5828 *
5829 * Return value:
5830 * 	IPR_RC_JOB_RETURN
5831 **/
5832static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
5833{
5834	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5835	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5836	struct ipr_mode_page24 *mode_page;
5837	int length;
5838
5839	ENTER;
5840	mode_page = ipr_get_mode_page(mode_pages, 0x24,
5841				      sizeof(struct ipr_mode_page24));
5842
5843	if (mode_page)
5844		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
5845
5846	length = mode_pages->hdr.length + 1;
5847	mode_pages->hdr.length = 0;
5848
5849	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5850			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5851			      length);
5852
5853	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5854	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5855
5856	LEAVE;
5857	return IPR_RC_JOB_RETURN;
5858}
5859
5860/**
5861 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
5862 * @ipr_cmd:	ipr command struct
5863 *
5864 * This function handles the failure of a Mode Sense to the IOAFP.
5865 * Some adapters do not handle all mode pages.
5866 *
5867 * Return value:
5868 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5869 **/
5870static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
5871{
5872	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5873
5874	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5875		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5876		return IPR_RC_JOB_CONTINUE;
5877	}
5878
5879	return ipr_reset_cmd_failed(ipr_cmd);
5880}
5881
5882/**
5883 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
5884 * @ipr_cmd:	ipr command struct
5885 *
5886 * This function send a mode sense to the IOA to retrieve
5887 * the IOA Advanced Function Control mode page.
5888 *
5889 * Return value:
5890 * 	IPR_RC_JOB_RETURN
5891 **/
5892static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
5893{
5894	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5895
5896	ENTER;
5897	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5898			     0x24, ioa_cfg->vpd_cbs_dma +
5899			     offsetof(struct ipr_misc_cbs, mode_pages),
5900			     sizeof(struct ipr_mode_pages));
5901
5902	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
5903	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
5904
5905	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5906
5907	LEAVE;
5908	return IPR_RC_JOB_RETURN;
5909}
5910
5911/**
5912 * ipr_init_res_table - Initialize the resource table
5913 * @ipr_cmd:	ipr command struct
5914 *
5915 * This function looks through the existing resource table, comparing
5916 * it with the config table. This function will take care of old/new
5917 * devices and schedule adding/removing them from the mid-layer
5918 * as appropriate.
5919 *
5920 * Return value:
5921 * 	IPR_RC_JOB_CONTINUE
5922 **/
5923static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5924{
5925	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5926	struct ipr_resource_entry *res, *temp;
5927	struct ipr_config_table_entry *cfgte;
5928	int found, i;
5929	LIST_HEAD(old_res);
5930
5931	ENTER;
5932	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5933		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5934
5935	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5936		list_move_tail(&res->queue, &old_res);
5937
5938	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5939		cfgte = &ioa_cfg->cfg_table->dev[i];
5940		found = 0;
5941
5942		list_for_each_entry_safe(res, temp, &old_res, queue) {
5943			if (!memcmp(&res->cfgte.res_addr,
5944				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5945				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5946				found = 1;
5947				break;
5948			}
5949		}
5950
5951		if (!found) {
5952			if (list_empty(&ioa_cfg->free_res_q)) {
5953				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5954				break;
5955			}
5956
5957			found = 1;
5958			res = list_entry(ioa_cfg->free_res_q.next,
5959					 struct ipr_resource_entry, queue);
5960			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5961			ipr_init_res_entry(res);
5962			res->add_to_ml = 1;
5963		}
5964
5965		if (found)
5966			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5967	}
5968
5969	list_for_each_entry_safe(res, temp, &old_res, queue) {
5970		if (res->sdev) {
5971			res->del_from_ml = 1;
5972			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5973			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5974		} else {
5975			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5976		}
5977	}
5978
5979	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
5980		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
5981	else
5982		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5983
5984	LEAVE;
5985	return IPR_RC_JOB_CONTINUE;
5986}
5987
5988/**
5989 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5990 * @ipr_cmd:	ipr command struct
5991 *
5992 * This function sends a Query IOA Configuration command
5993 * to the adapter to retrieve the IOA configuration table.
5994 *
5995 * Return value:
5996 * 	IPR_RC_JOB_RETURN
5997 **/
5998static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5999{
6000	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6001	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6002	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6003	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6004	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6005
6006	ENTER;
6007	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6008		ioa_cfg->dual_raid = 1;
6009	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6010		 ucode_vpd->major_release, ucode_vpd->card_type,
6011		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6012	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6013	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6014
6015	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6016	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
6017	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
6018
6019	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6020	ioarcb->read_data_transfer_length =
6021		cpu_to_be32(sizeof(struct ipr_config_table));
6022
6023	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6024	ioadl->flags_and_data_len =
6025		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6026
6027	ipr_cmd->job_step = ipr_init_res_table;
6028
6029	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6030
6031	LEAVE;
6032	return IPR_RC_JOB_RETURN;
6033}
6034
6035/**
6036 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6037 * @ipr_cmd:	ipr command struct
6038 *
6039 * This utility function sends an inquiry to the adapter.
6040 *
6041 * Return value:
6042 * 	none
6043 **/
6044static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6045			      u32 dma_addr, u8 xfer_len)
6046{
6047	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6048	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6049
6050	ENTER;
6051	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6052	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6053
6054	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6055	ioarcb->cmd_pkt.cdb[1] = flags;
6056	ioarcb->cmd_pkt.cdb[2] = page;
6057	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6058
6059	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6060	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6061
6062	ioadl->address = cpu_to_be32(dma_addr);
6063	ioadl->flags_and_data_len =
6064		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6065
6066	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6067	LEAVE;
6068}
6069
6070/**
6071 * ipr_inquiry_page_supported - Is the given inquiry page supported
6072 * @page0:		inquiry page 0 buffer
6073 * @page:		page code.
6074 *
6075 * This function determines if the specified inquiry page is supported.
6076 *
6077 * Return value:
6078 *	1 if page is supported / 0 if not
6079 **/
6080static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6081{
6082	int i;
6083
6084	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6085		if (page0->page[i] == page)
6086			return 1;
6087
6088	return 0;
6089}
6090
6091/**
6092 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6093 * @ipr_cmd:	ipr command struct
6094 *
6095 * This function sends a Page 0xD0 inquiry to the adapter
6096 * to retrieve adapter capabilities.
6097 *
6098 * Return value:
6099 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6100 **/
6101static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6102{
6103	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6104	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6105	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6106
6107	ENTER;
6108	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6109	memset(cap, 0, sizeof(*cap));
6110
6111	if (ipr_inquiry_page_supported(page0, 0xD0)) {
6112		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6113				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6114				  sizeof(struct ipr_inquiry_cap));
6115		return IPR_RC_JOB_RETURN;
6116	}
6117
6118	LEAVE;
6119	return IPR_RC_JOB_CONTINUE;
6120}
6121
6122/**
6123 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6124 * @ipr_cmd:	ipr command struct
6125 *
6126 * This function sends a Page 3 inquiry to the adapter
6127 * to retrieve software VPD information.
6128 *
6129 * Return value:
6130 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6131 **/
6132static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6133{
6134	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6135	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6136
6137	ENTER;
6138
6139	if (!ipr_inquiry_page_supported(page0, 1))
6140		ioa_cfg->cache_state = CACHE_NONE;
6141
6142	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6143
6144	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6145			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6146			  sizeof(struct ipr_inquiry_page3));
6147
6148	LEAVE;
6149	return IPR_RC_JOB_RETURN;
6150}
6151
6152/**
6153 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6154 * @ipr_cmd:	ipr command struct
6155 *
6156 * This function sends a Page 0 inquiry to the adapter
6157 * to retrieve supported inquiry pages.
6158 *
6159 * Return value:
6160 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6161 **/
6162static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6163{
6164	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6165	char type[5];
6166
6167	ENTER;
6168
6169	/* Grab the type out of the VPD and store it away */
6170	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6171	type[4] = '\0';
6172	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6173
6174	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6175
6176	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6177			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6178			  sizeof(struct ipr_inquiry_page0));
6179
6180	LEAVE;
6181	return IPR_RC_JOB_RETURN;
6182}
6183
6184/**
6185 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6186 * @ipr_cmd:	ipr command struct
6187 *
6188 * This function sends a standard inquiry to the adapter.
6189 *
6190 * Return value:
6191 * 	IPR_RC_JOB_RETURN
6192 **/
6193static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6194{
6195	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6196
6197	ENTER;
6198	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6199
6200	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6201			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6202			  sizeof(struct ipr_ioa_vpd));
6203
6204	LEAVE;
6205	return IPR_RC_JOB_RETURN;
6206}
6207
6208/**
6209 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6210 * @ipr_cmd:	ipr command struct
6211 *
6212 * This function send an Identify Host Request Response Queue
6213 * command to establish the HRRQ with the adapter.
6214 *
6215 * Return value:
6216 * 	IPR_RC_JOB_RETURN
6217 **/
6218static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6219{
6220	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6221	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6222
6223	ENTER;
6224	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6225
6226	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6227	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6228
6229	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6230	ioarcb->cmd_pkt.cdb[2] =
6231		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6232	ioarcb->cmd_pkt.cdb[3] =
6233		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6234	ioarcb->cmd_pkt.cdb[4] =
6235		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6236	ioarcb->cmd_pkt.cdb[5] =
6237		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6238	ioarcb->cmd_pkt.cdb[7] =
6239		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6240	ioarcb->cmd_pkt.cdb[8] =
6241		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6242
6243	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6244
6245	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6246
6247	LEAVE;
6248	return IPR_RC_JOB_RETURN;
6249}
6250
6251/**
6252 * ipr_reset_timer_done - Adapter reset timer function
6253 * @ipr_cmd:	ipr command struct
6254 *
6255 * Description: This function is used in adapter reset processing
6256 * for timing events. If the reset_cmd pointer in the IOA
6257 * config struct is not this adapter's we are doing nested
6258 * resets and fail_all_ops will take care of freeing the
6259 * command block.
6260 *
6261 * Return value:
6262 * 	none
6263 **/
6264static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6265{
6266	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6267	unsigned long lock_flags = 0;
6268
6269	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6270
6271	if (ioa_cfg->reset_cmd == ipr_cmd) {
6272		list_del(&ipr_cmd->queue);
6273		ipr_cmd->done(ipr_cmd);
6274	}
6275
6276	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6277}
6278
6279/**
6280 * ipr_reset_start_timer - Start a timer for adapter reset job
6281 * @ipr_cmd:	ipr command struct
6282 * @timeout:	timeout value
6283 *
6284 * Description: This function is used in adapter reset processing
6285 * for timing events. If the reset_cmd pointer in the IOA
6286 * config struct is not this adapter's we are doing nested
6287 * resets and fail_all_ops will take care of freeing the
6288 * command block.
6289 *
6290 * Return value:
6291 * 	none
6292 **/
6293static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6294				  unsigned long timeout)
6295{
6296	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6297	ipr_cmd->done = ipr_reset_ioa_job;
6298
6299	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6300	ipr_cmd->timer.expires = jiffies + timeout;
6301	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6302	add_timer(&ipr_cmd->timer);
6303}
6304
6305/**
6306 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6307 * @ioa_cfg:	ioa cfg struct
6308 *
6309 * Return value:
6310 * 	nothing
6311 **/
6312static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6313{
6314	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6315
6316	/* Initialize Host RRQ pointers */
6317	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6318	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6319	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6320	ioa_cfg->toggle_bit = 1;
6321
6322	/* Zero out config table */
6323	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6324}
6325
6326/**
6327 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6328 * @ipr_cmd:	ipr command struct
6329 *
6330 * This function reinitializes some control blocks and
6331 * enables destructive diagnostics on the adapter.
6332 *
6333 * Return value:
6334 * 	IPR_RC_JOB_RETURN
6335 **/
6336static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6337{
6338	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6339	volatile u32 int_reg;
6340
6341	ENTER;
6342	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6343	ipr_init_ioa_mem(ioa_cfg);
6344
6345	ioa_cfg->allow_interrupts = 1;
6346	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6347
6348	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6349		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6350		       ioa_cfg->regs.clr_interrupt_mask_reg);
6351		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6352		return IPR_RC_JOB_CONTINUE;
6353	}
6354
6355	/* Enable destructive diagnostics on IOA */
6356	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6357
6358	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6359	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6360
6361	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6362
6363	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6364	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6365	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6366	ipr_cmd->done = ipr_reset_ioa_job;
6367	add_timer(&ipr_cmd->timer);
6368	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6369
6370	LEAVE;
6371	return IPR_RC_JOB_RETURN;
6372}
6373
6374/**
6375 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6376 * @ipr_cmd:	ipr command struct
6377 *
6378 * This function is invoked when an adapter dump has run out
6379 * of processing time.
6380 *
6381 * Return value:
6382 * 	IPR_RC_JOB_CONTINUE
6383 **/
6384static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6385{
6386	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6387
6388	if (ioa_cfg->sdt_state == GET_DUMP)
6389		ioa_cfg->sdt_state = ABORT_DUMP;
6390
6391	ipr_cmd->job_step = ipr_reset_alert;
6392
6393	return IPR_RC_JOB_CONTINUE;
6394}
6395
6396/**
6397 * ipr_unit_check_no_data - Log a unit check/no data error log
6398 * @ioa_cfg:		ioa config struct
6399 *
6400 * Logs an error indicating the adapter unit checked, but for some
6401 * reason, we were unable to fetch the unit check buffer.
6402 *
6403 * Return value:
6404 * 	nothing
6405 **/
6406static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6407{
6408	ioa_cfg->errors_logged++;
6409	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6410}
6411
6412/**
6413 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6414 * @ioa_cfg:		ioa config struct
6415 *
6416 * Fetches the unit check buffer from the adapter by clocking the data
6417 * through the mailbox register.
6418 *
6419 * Return value:
6420 * 	nothing
6421 **/
6422static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6423{
6424	unsigned long mailbox;
6425	struct ipr_hostrcb *hostrcb;
6426	struct ipr_uc_sdt sdt;
6427	int rc, length;
6428	u32 ioasc;
6429
6430	mailbox = readl(ioa_cfg->ioa_mailbox);
6431
6432	if (!ipr_sdt_is_fmt2(mailbox)) {
6433		ipr_unit_check_no_data(ioa_cfg);
6434		return;
6435	}
6436
6437	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6438	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6439					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6440
6441	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6442	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6443		ipr_unit_check_no_data(ioa_cfg);
6444		return;
6445	}
6446
6447	/* Find length of the first sdt entry (UC buffer) */
6448	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6449		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6450
6451	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6452			     struct ipr_hostrcb, queue);
6453	list_del(&hostrcb->queue);
6454	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6455
6456	rc = ipr_get_ldump_data_section(ioa_cfg,
6457					be32_to_cpu(sdt.entry[0].bar_str_offset),
6458					(__be32 *)&hostrcb->hcam,
6459					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6460
6461	if (!rc) {
6462		ipr_handle_log_data(ioa_cfg, hostrcb);
6463		ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6464		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6465		    ioa_cfg->sdt_state == GET_DUMP)
6466			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6467	} else
6468		ipr_unit_check_no_data(ioa_cfg);
6469
6470	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6471}
6472
6473/**
6474 * ipr_reset_restore_cfg_space - Restore PCI config space.
6475 * @ipr_cmd:	ipr command struct
6476 *
6477 * Description: This function restores the saved PCI config space of
6478 * the adapter, fails all outstanding ops back to the callers, and
6479 * fetches the dump/unit check if applicable to this reset.
6480 *
6481 * Return value:
6482 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6483 **/
6484static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6485{
6486	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6487	int rc;
6488
6489	ENTER;
6490	rc = pci_restore_state(ioa_cfg->pdev);
6491
6492	if (rc != PCIBIOS_SUCCESSFUL) {
6493		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6494		return IPR_RC_JOB_CONTINUE;
6495	}
6496
6497	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6498		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6499		return IPR_RC_JOB_CONTINUE;
6500	}
6501
6502	ipr_fail_all_ops(ioa_cfg);
6503
6504	if (ioa_cfg->ioa_unit_checked) {
6505		ioa_cfg->ioa_unit_checked = 0;
6506		ipr_get_unit_check_buffer(ioa_cfg);
6507		ipr_cmd->job_step = ipr_reset_alert;
6508		ipr_reset_start_timer(ipr_cmd, 0);
6509		return IPR_RC_JOB_RETURN;
6510	}
6511
6512	if (ioa_cfg->in_ioa_bringdown) {
6513		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6514	} else {
6515		ipr_cmd->job_step = ipr_reset_enable_ioa;
6516
6517		if (GET_DUMP == ioa_cfg->sdt_state) {
6518			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6519			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6520			schedule_work(&ioa_cfg->work_q);
6521			return IPR_RC_JOB_RETURN;
6522		}
6523	}
6524
6525	ENTER;
6526	return IPR_RC_JOB_CONTINUE;
6527}
6528
6529/**
6530 * ipr_reset_bist_done - BIST has completed on the adapter.
6531 * @ipr_cmd:	ipr command struct
6532 *
6533 * Description: Unblock config space and resume the reset process.
6534 *
6535 * Return value:
6536 * 	IPR_RC_JOB_CONTINUE
6537 **/
6538static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6539{
6540	ENTER;
6541	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6542	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6543	LEAVE;
6544	return IPR_RC_JOB_CONTINUE;
6545}
6546
6547/**
6548 * ipr_reset_start_bist - Run BIST on the adapter.
6549 * @ipr_cmd:	ipr command struct
6550 *
6551 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6552 *
6553 * Return value:
6554 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6555 **/
6556static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6557{
6558	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6559	int rc;
6560
6561	ENTER;
6562	pci_block_user_cfg_access(ioa_cfg->pdev);
6563	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6564
6565	if (rc != PCIBIOS_SUCCESSFUL) {
6566		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6567		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6568		rc = IPR_RC_JOB_CONTINUE;
6569	} else {
6570		ipr_cmd->job_step = ipr_reset_bist_done;
6571		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6572		rc = IPR_RC_JOB_RETURN;
6573	}
6574
6575	LEAVE;
6576	return rc;
6577}
6578
6579/**
6580 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6581 * @ipr_cmd:	ipr command struct
6582 *
6583 * Description: This clears PCI reset to the adapter and delays two seconds.
6584 *
6585 * Return value:
6586 * 	IPR_RC_JOB_RETURN
6587 **/
6588static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6589{
6590	ENTER;
6591	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6592	ipr_cmd->job_step = ipr_reset_bist_done;
6593	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6594	LEAVE;
6595	return IPR_RC_JOB_RETURN;
6596}
6597
6598/**
6599 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6600 * @ipr_cmd:	ipr command struct
6601 *
6602 * Description: This asserts PCI reset to the adapter.
6603 *
6604 * Return value:
6605 * 	IPR_RC_JOB_RETURN
6606 **/
6607static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6608{
6609	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6610	struct pci_dev *pdev = ioa_cfg->pdev;
6611
6612	ENTER;
6613	pci_block_user_cfg_access(pdev);
6614	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6615	ipr_cmd->job_step = ipr_reset_slot_reset_done;
6616	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6617	LEAVE;
6618	return IPR_RC_JOB_RETURN;
6619}
6620
6621/**
6622 * ipr_reset_allowed - Query whether or not IOA can be reset
6623 * @ioa_cfg:	ioa config struct
6624 *
6625 * Return value:
6626 * 	0 if reset not allowed / non-zero if reset is allowed
6627 **/
6628static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6629{
6630	volatile u32 temp_reg;
6631
6632	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6633	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6634}
6635
6636/**
6637 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6638 * @ipr_cmd:	ipr command struct
6639 *
6640 * Description: This function waits for adapter permission to run BIST,
6641 * then runs BIST. If the adapter does not give permission after a
6642 * reasonable time, we will reset the adapter anyway. The impact of
6643 * resetting the adapter without warning the adapter is the risk of
6644 * losing the persistent error log on the adapter. If the adapter is
6645 * reset while it is writing to the flash on the adapter, the flash
6646 * segment will have bad ECC and be zeroed.
6647 *
6648 * Return value:
6649 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6650 **/
6651static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6652{
6653	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6654	int rc = IPR_RC_JOB_RETURN;
6655
6656	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6657		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6658		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6659	} else {
6660		ipr_cmd->job_step = ioa_cfg->reset;
6661		rc = IPR_RC_JOB_CONTINUE;
6662	}
6663
6664	return rc;
6665}
6666
6667/**
6668 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6669 * @ipr_cmd:	ipr command struct
6670 *
6671 * Description: This function alerts the adapter that it will be reset.
6672 * If memory space is not currently enabled, proceed directly
6673 * to running BIST on the adapter. The timer must always be started
6674 * so we guarantee we do not run BIST from ipr_isr.
6675 *
6676 * Return value:
6677 * 	IPR_RC_JOB_RETURN
6678 **/
6679static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6680{
6681	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6682	u16 cmd_reg;
6683	int rc;
6684
6685	ENTER;
6686	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6687
6688	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6689		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6690		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6691		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6692	} else {
6693		ipr_cmd->job_step = ioa_cfg->reset;
6694	}
6695
6696	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6697	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6698
6699	LEAVE;
6700	return IPR_RC_JOB_RETURN;
6701}
6702
6703/**
6704 * ipr_reset_ucode_download_done - Microcode download completion
6705 * @ipr_cmd:	ipr command struct
6706 *
6707 * Description: This function unmaps the microcode download buffer.
6708 *
6709 * Return value:
6710 * 	IPR_RC_JOB_CONTINUE
6711 **/
6712static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6713{
6714	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6715	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6716
6717	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6718		     sglist->num_sg, DMA_TO_DEVICE);
6719
6720	ipr_cmd->job_step = ipr_reset_alert;
6721	return IPR_RC_JOB_CONTINUE;
6722}
6723
6724/**
6725 * ipr_reset_ucode_download - Download microcode to the adapter
6726 * @ipr_cmd:	ipr command struct
6727 *
6728 * Description: This function checks to see if it there is microcode
6729 * to download to the adapter. If there is, a download is performed.
6730 *
6731 * Return value:
6732 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6733 **/
6734static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6735{
6736	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6737	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6738
6739	ENTER;
6740	ipr_cmd->job_step = ipr_reset_alert;
6741
6742	if (!sglist)
6743		return IPR_RC_JOB_CONTINUE;
6744
6745	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6746	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6747	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6748	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6749	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6750	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6751	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6752
6753	ipr_build_ucode_ioadl(ipr_cmd, sglist);
6754	ipr_cmd->job_step = ipr_reset_ucode_download_done;
6755
6756	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6757		   IPR_WRITE_BUFFER_TIMEOUT);
6758
6759	LEAVE;
6760	return IPR_RC_JOB_RETURN;
6761}
6762
6763/**
6764 * ipr_reset_shutdown_ioa - Shutdown the adapter
6765 * @ipr_cmd:	ipr command struct
6766 *
6767 * Description: This function issues an adapter shutdown of the
6768 * specified type to the specified adapter as part of the
6769 * adapter reset job.
6770 *
6771 * Return value:
6772 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6773 **/
6774static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6775{
6776	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6777	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6778	unsigned long timeout;
6779	int rc = IPR_RC_JOB_CONTINUE;
6780
6781	ENTER;
6782	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6783		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6784		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6785		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6786		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6787
6788		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
6789			timeout = IPR_SHUTDOWN_TIMEOUT;
6790		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6791			timeout = IPR_INTERNAL_TIMEOUT;
6792		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6793			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
6794		else
6795			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6796
6797		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6798
6799		rc = IPR_RC_JOB_RETURN;
6800		ipr_cmd->job_step = ipr_reset_ucode_download;
6801	} else
6802		ipr_cmd->job_step = ipr_reset_alert;
6803
6804	LEAVE;
6805	return rc;
6806}
6807
6808/**
6809 * ipr_reset_ioa_job - Adapter reset job
6810 * @ipr_cmd:	ipr command struct
6811 *
6812 * Description: This function is the job router for the adapter reset job.
6813 *
6814 * Return value:
6815 * 	none
6816 **/
6817static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6818{
6819	u32 rc, ioasc;
6820	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6821
6822	do {
6823		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6824
6825		if (ioa_cfg->reset_cmd != ipr_cmd) {
6826			/*
6827			 * We are doing nested adapter resets and this is
6828			 * not the current reset job.
6829			 */
6830			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6831			return;
6832		}
6833
6834		if (IPR_IOASC_SENSE_KEY(ioasc)) {
6835			rc = ipr_cmd->job_step_failed(ipr_cmd);
6836			if (rc == IPR_RC_JOB_RETURN)
6837				return;
6838		}
6839
6840		ipr_reinit_ipr_cmnd(ipr_cmd);
6841		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6842		rc = ipr_cmd->job_step(ipr_cmd);
6843	} while(rc == IPR_RC_JOB_CONTINUE);
6844}
6845
6846/**
6847 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6848 * @ioa_cfg:		ioa config struct
6849 * @job_step:		first job step of reset job
6850 * @shutdown_type:	shutdown type
6851 *
6852 * Description: This function will initiate the reset of the given adapter
6853 * starting at the selected job step.
6854 * If the caller needs to wait on the completion of the reset,
6855 * the caller must sleep on the reset_wait_q.
6856 *
6857 * Return value:
6858 * 	none
6859 **/
6860static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6861				    int (*job_step) (struct ipr_cmnd *),
6862				    enum ipr_shutdown_type shutdown_type)
6863{
6864	struct ipr_cmnd *ipr_cmd;
6865
6866	ioa_cfg->in_reset_reload = 1;
6867	ioa_cfg->allow_cmds = 0;
6868	scsi_block_requests(ioa_cfg->host);
6869
6870	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6871	ioa_cfg->reset_cmd = ipr_cmd;
6872	ipr_cmd->job_step = job_step;
6873	ipr_cmd->u.shutdown_type = shutdown_type;
6874
6875	ipr_reset_ioa_job(ipr_cmd);
6876}
6877
6878/**
6879 * ipr_initiate_ioa_reset - Initiate an adapter reset
6880 * @ioa_cfg:		ioa config struct
6881 * @shutdown_type:	shutdown type
6882 *
6883 * Description: This function will initiate the reset of the given adapter.
6884 * If the caller needs to wait on the completion of the reset,
6885 * the caller must sleep on the reset_wait_q.
6886 *
6887 * Return value:
6888 * 	none
6889 **/
6890static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6891				   enum ipr_shutdown_type shutdown_type)
6892{
6893	if (ioa_cfg->ioa_is_dead)
6894		return;
6895
6896	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6897		ioa_cfg->sdt_state = ABORT_DUMP;
6898
6899	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6900		dev_err(&ioa_cfg->pdev->dev,
6901			"IOA taken offline - error recovery failed\n");
6902
6903		ioa_cfg->reset_retries = 0;
6904		ioa_cfg->ioa_is_dead = 1;
6905
6906		if (ioa_cfg->in_ioa_bringdown) {
6907			ioa_cfg->reset_cmd = NULL;
6908			ioa_cfg->in_reset_reload = 0;
6909			ipr_fail_all_ops(ioa_cfg);
6910			wake_up_all(&ioa_cfg->reset_wait_q);
6911
6912			spin_unlock_irq(ioa_cfg->host->host_lock);
6913			scsi_unblock_requests(ioa_cfg->host);
6914			spin_lock_irq(ioa_cfg->host->host_lock);
6915			return;
6916		} else {
6917			ioa_cfg->in_ioa_bringdown = 1;
6918			shutdown_type = IPR_SHUTDOWN_NONE;
6919		}
6920	}
6921
6922	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6923				shutdown_type);
6924}
6925
6926/**
6927 * ipr_reset_freeze - Hold off all I/O activity
6928 * @ipr_cmd:	ipr command struct
6929 *
6930 * Description: If the PCI slot is frozen, hold off all I/O
6931 * activity; then, as soon as the slot is available again,
6932 * initiate an adapter reset.
6933 */
6934static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6935{
6936	/* Disallow new interrupts, avoid loop */
6937	ipr_cmd->ioa_cfg->allow_interrupts = 0;
6938	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6939	ipr_cmd->done = ipr_reset_ioa_job;
6940	return IPR_RC_JOB_RETURN;
6941}
6942
6943/**
6944 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6945 * @pdev:	PCI device struct
6946 *
6947 * Description: This routine is called to tell us that the PCI bus
6948 * is down. Can't do anything here, except put the device driver
6949 * into a holding pattern, waiting for the PCI bus to come back.
6950 */
6951static void ipr_pci_frozen(struct pci_dev *pdev)
6952{
6953	unsigned long flags = 0;
6954	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6955
6956	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6957	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6958	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6959}
6960
6961/**
6962 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6963 * @pdev:	PCI device struct
6964 *
6965 * Description: This routine is called by the pci error recovery
6966 * code after the PCI slot has been reset, just before we
6967 * should resume normal operations.
6968 */
6969static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6970{
6971	unsigned long flags = 0;
6972	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6973
6974	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6975	if (ioa_cfg->needs_warm_reset)
6976		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6977	else
6978		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6979					IPR_SHUTDOWN_NONE);
6980	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6981	return PCI_ERS_RESULT_RECOVERED;
6982}
6983
6984/**
6985 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6986 * @pdev:	PCI device struct
6987 *
6988 * Description: This routine is called when the PCI bus has
6989 * permanently failed.
6990 */
6991static void ipr_pci_perm_failure(struct pci_dev *pdev)
6992{
6993	unsigned long flags = 0;
6994	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6995
6996	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6997	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6998		ioa_cfg->sdt_state = ABORT_DUMP;
6999	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7000	ioa_cfg->in_ioa_bringdown = 1;
7001	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7002	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7003}
7004
7005/**
7006 * ipr_pci_error_detected - Called when a PCI error is detected.
7007 * @pdev:	PCI device struct
7008 * @state:	PCI channel state
7009 *
7010 * Description: Called when a PCI error is detected.
7011 *
7012 * Return value:
7013 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7014 */
7015static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7016					       pci_channel_state_t state)
7017{
7018	switch (state) {
7019	case pci_channel_io_frozen:
7020		ipr_pci_frozen(pdev);
7021		return PCI_ERS_RESULT_NEED_RESET;
7022	case pci_channel_io_perm_failure:
7023		ipr_pci_perm_failure(pdev);
7024		return PCI_ERS_RESULT_DISCONNECT;
7025		break;
7026	default:
7027		break;
7028	}
7029	return PCI_ERS_RESULT_NEED_RESET;
7030}
7031
7032/**
7033 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7034 * @ioa_cfg:	ioa cfg struct
7035 *
7036 * Description: This is the second phase of adapter intialization
7037 * This function takes care of initilizing the adapter to the point
7038 * where it can accept new commands.
7039
7040 * Return value:
7041 * 	0 on sucess / -EIO on failure
7042 **/
7043static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7044{
7045	int rc = 0;
7046	unsigned long host_lock_flags = 0;
7047
7048	ENTER;
7049	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7050	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7051	if (ioa_cfg->needs_hard_reset) {
7052		ioa_cfg->needs_hard_reset = 0;
7053		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7054	} else
7055		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7056					IPR_SHUTDOWN_NONE);
7057
7058	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7059	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7060	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7061
7062	if (ioa_cfg->ioa_is_dead) {
7063		rc = -EIO;
7064	} else if (ipr_invalid_adapter(ioa_cfg)) {
7065		if (!ipr_testmode)
7066			rc = -EIO;
7067
7068		dev_err(&ioa_cfg->pdev->dev,
7069			"Adapter not supported in this hardware configuration.\n");
7070	}
7071
7072	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7073
7074	LEAVE;
7075	return rc;
7076}
7077
7078/**
7079 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7080 * @ioa_cfg:	ioa config struct
7081 *
7082 * Return value:
7083 * 	none
7084 **/
7085static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7086{
7087	int i;
7088
7089	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7090		if (ioa_cfg->ipr_cmnd_list[i])
7091			pci_pool_free(ioa_cfg->ipr_cmd_pool,
7092				      ioa_cfg->ipr_cmnd_list[i],
7093				      ioa_cfg->ipr_cmnd_list_dma[i]);
7094
7095		ioa_cfg->ipr_cmnd_list[i] = NULL;
7096	}
7097
7098	if (ioa_cfg->ipr_cmd_pool)
7099		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7100
7101	ioa_cfg->ipr_cmd_pool = NULL;
7102}
7103
7104/**
7105 * ipr_free_mem - Frees memory allocated for an adapter
7106 * @ioa_cfg:	ioa cfg struct
7107 *
7108 * Return value:
7109 * 	nothing
7110 **/
7111static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7112{
7113	int i;
7114
7115	kfree(ioa_cfg->res_entries);
7116	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7117			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7118	ipr_free_cmd_blks(ioa_cfg);
7119	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7120			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7121	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7122			    ioa_cfg->cfg_table,
7123			    ioa_cfg->cfg_table_dma);
7124
7125	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7126		pci_free_consistent(ioa_cfg->pdev,
7127				    sizeof(struct ipr_hostrcb),
7128				    ioa_cfg->hostrcb[i],
7129				    ioa_cfg->hostrcb_dma[i]);
7130	}
7131
7132	ipr_free_dump(ioa_cfg);
7133	kfree(ioa_cfg->trace);
7134}
7135
7136/**
7137 * ipr_free_all_resources - Free all allocated resources for an adapter.
7138 * @ipr_cmd:	ipr command struct
7139 *
7140 * This function frees all allocated resources for the
7141 * specified adapter.
7142 *
7143 * Return value:
7144 * 	none
7145 **/
7146static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7147{
7148	struct pci_dev *pdev = ioa_cfg->pdev;
7149
7150	ENTER;
7151	free_irq(pdev->irq, ioa_cfg);
7152	iounmap(ioa_cfg->hdw_dma_regs);
7153	pci_release_regions(pdev);
7154	ipr_free_mem(ioa_cfg);
7155	scsi_host_put(ioa_cfg->host);
7156	pci_disable_device(pdev);
7157	LEAVE;
7158}
7159
7160/**
7161 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7162 * @ioa_cfg:	ioa config struct
7163 *
7164 * Return value:
7165 * 	0 on success / -ENOMEM on allocation failure
7166 **/
7167static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7168{
7169	struct ipr_cmnd *ipr_cmd;
7170	struct ipr_ioarcb *ioarcb;
7171	dma_addr_t dma_addr;
7172	int i;
7173
7174	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7175						 sizeof(struct ipr_cmnd), 8, 0);
7176
7177	if (!ioa_cfg->ipr_cmd_pool)
7178		return -ENOMEM;
7179
7180	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7181		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7182
7183		if (!ipr_cmd) {
7184			ipr_free_cmd_blks(ioa_cfg);
7185			return -ENOMEM;
7186		}
7187
7188		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7189		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7190		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7191
7192		ioarcb = &ipr_cmd->ioarcb;
7193		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7194		ioarcb->host_response_handle = cpu_to_be32(i << 2);
7195		ioarcb->write_ioadl_addr =
7196			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7197		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7198		ioarcb->ioasa_host_pci_addr =
7199			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7200		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7201		ipr_cmd->cmd_index = i;
7202		ipr_cmd->ioa_cfg = ioa_cfg;
7203		ipr_cmd->sense_buffer_dma = dma_addr +
7204			offsetof(struct ipr_cmnd, sense_buffer);
7205
7206		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7207	}
7208
7209	return 0;
7210}
7211
7212/**
7213 * ipr_alloc_mem - Allocate memory for an adapter
7214 * @ioa_cfg:	ioa config struct
7215 *
7216 * Return value:
7217 * 	0 on success / non-zero for error
7218 **/
7219static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7220{
7221	struct pci_dev *pdev = ioa_cfg->pdev;
7222	int i, rc = -ENOMEM;
7223
7224	ENTER;
7225	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7226				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7227
7228	if (!ioa_cfg->res_entries)
7229		goto out;
7230
7231	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7232		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7233
7234	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7235						sizeof(struct ipr_misc_cbs),
7236						&ioa_cfg->vpd_cbs_dma);
7237
7238	if (!ioa_cfg->vpd_cbs)
7239		goto out_free_res_entries;
7240
7241	if (ipr_alloc_cmd_blks(ioa_cfg))
7242		goto out_free_vpd_cbs;
7243
7244	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7245						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7246						 &ioa_cfg->host_rrq_dma);
7247
7248	if (!ioa_cfg->host_rrq)
7249		goto out_ipr_free_cmd_blocks;
7250
7251	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7252						  sizeof(struct ipr_config_table),
7253						  &ioa_cfg->cfg_table_dma);
7254
7255	if (!ioa_cfg->cfg_table)
7256		goto out_free_host_rrq;
7257
7258	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7259		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7260							   sizeof(struct ipr_hostrcb),
7261							   &ioa_cfg->hostrcb_dma[i]);
7262
7263		if (!ioa_cfg->hostrcb[i])
7264			goto out_free_hostrcb_dma;
7265
7266		ioa_cfg->hostrcb[i]->hostrcb_dma =
7267			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7268		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7269		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7270	}
7271
7272	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7273				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7274
7275	if (!ioa_cfg->trace)
7276		goto out_free_hostrcb_dma;
7277
7278	rc = 0;
7279out:
7280	LEAVE;
7281	return rc;
7282
7283out_free_hostrcb_dma:
7284	while (i-- > 0) {
7285		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7286				    ioa_cfg->hostrcb[i],
7287				    ioa_cfg->hostrcb_dma[i]);
7288	}
7289	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7290			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7291out_free_host_rrq:
7292	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7293			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7294out_ipr_free_cmd_blocks:
7295	ipr_free_cmd_blks(ioa_cfg);
7296out_free_vpd_cbs:
7297	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7298			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7299out_free_res_entries:
7300	kfree(ioa_cfg->res_entries);
7301	goto out;
7302}
7303
7304/**
7305 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7306 * @ioa_cfg:	ioa config struct
7307 *
7308 * Return value:
7309 * 	none
7310 **/
7311static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7312{
7313	int i;
7314
7315	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7316		ioa_cfg->bus_attr[i].bus = i;
7317		ioa_cfg->bus_attr[i].qas_enabled = 0;
7318		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7319		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7320			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7321		else
7322			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7323	}
7324}
7325
7326/**
7327 * ipr_init_ioa_cfg - Initialize IOA config struct
7328 * @ioa_cfg:	ioa config struct
7329 * @host:		scsi host struct
7330 * @pdev:		PCI dev struct
7331 *
7332 * Return value:
7333 * 	none
7334 **/
7335static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7336				       struct Scsi_Host *host, struct pci_dev *pdev)
7337{
7338	const struct ipr_interrupt_offsets *p;
7339	struct ipr_interrupts *t;
7340	void __iomem *base;
7341
7342	ioa_cfg->host = host;
7343	ioa_cfg->pdev = pdev;
7344	ioa_cfg->log_level = ipr_log_level;
7345	ioa_cfg->doorbell = IPR_DOORBELL;
7346	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7347	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7348	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7349	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7350	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7351	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7352	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7353	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7354
7355	INIT_LIST_HEAD(&ioa_cfg->free_q);
7356	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7357	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7358	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7359	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7360	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7361	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7362	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7363	ioa_cfg->sdt_state = INACTIVE;
7364	if (ipr_enable_cache)
7365		ioa_cfg->cache_state = CACHE_ENABLED;
7366	else
7367		ioa_cfg->cache_state = CACHE_DISABLED;
7368
7369	ipr_initialize_bus_attr(ioa_cfg);
7370
7371	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7372	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7373	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7374	host->unique_id = host->host_no;
7375	host->max_cmd_len = IPR_MAX_CDB_LEN;
7376	pci_set_drvdata(pdev, ioa_cfg);
7377
7378	p = &ioa_cfg->chip_cfg->regs;
7379	t = &ioa_cfg->regs;
7380	base = ioa_cfg->hdw_dma_regs;
7381
7382	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7383	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7384	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7385	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7386	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7387	t->ioarrin_reg = base + p->ioarrin_reg;
7388	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7389	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7390	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7391}
7392
7393/**
7394 * ipr_get_chip_cfg - Find adapter chip configuration
7395 * @dev_id:		PCI device id struct
7396 *
7397 * Return value:
7398 * 	ptr to chip config on success / NULL on failure
7399 **/
7400static const struct ipr_chip_cfg_t * __devinit
7401ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7402{
7403	int i;
7404
7405	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7406		if (ipr_chip[i].vendor == dev_id->vendor &&
7407		    ipr_chip[i].device == dev_id->device)
7408			return ipr_chip[i].cfg;
7409	return NULL;
7410}
7411
7412/**
7413 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7414 * @pdev:		PCI device struct
7415 * @dev_id:		PCI device id struct
7416 *
7417 * Return value:
7418 * 	0 on success / non-zero on failure
7419 **/
7420static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7421				   const struct pci_device_id *dev_id)
7422{
7423	struct ipr_ioa_cfg *ioa_cfg;
7424	struct Scsi_Host *host;
7425	unsigned long ipr_regs_pci;
7426	void __iomem *ipr_regs;
7427	int rc = PCIBIOS_SUCCESSFUL;
7428	volatile u32 mask, uproc, interrupts;
7429
7430	ENTER;
7431
7432	if ((rc = pci_enable_device(pdev))) {
7433		dev_err(&pdev->dev, "Cannot enable adapter\n");
7434		goto out;
7435	}
7436
7437	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7438
7439	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7440
7441	if (!host) {
7442		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7443		rc = -ENOMEM;
7444		goto out_disable;
7445	}
7446
7447	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7448	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7449	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7450		      sata_port_info.flags, &ipr_sata_ops);
7451
7452	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7453
7454	if (!ioa_cfg->chip_cfg) {
7455		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7456			dev_id->vendor, dev_id->device);
7457		goto out_scsi_host_put;
7458	}
7459
7460	if (ipr_transop_timeout)
7461		ioa_cfg->transop_timeout = ipr_transop_timeout;
7462	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7463		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7464	else
7465		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7466
7467	rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &ioa_cfg->revid);
7468
7469	if (rc != PCIBIOS_SUCCESSFUL) {
7470		dev_err(&pdev->dev, "Failed to read PCI revision ID\n");
7471		rc = -EIO;
7472		goto out_scsi_host_put;
7473	}
7474
7475	ipr_regs_pci = pci_resource_start(pdev, 0);
7476
7477	rc = pci_request_regions(pdev, IPR_NAME);
7478	if (rc < 0) {
7479		dev_err(&pdev->dev,
7480			"Couldn't register memory range of registers\n");
7481		goto out_scsi_host_put;
7482	}
7483
7484	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7485
7486	if (!ipr_regs) {
7487		dev_err(&pdev->dev,
7488			"Couldn't map memory range of registers\n");
7489		rc = -ENOMEM;
7490		goto out_release_regions;
7491	}
7492
7493	ioa_cfg->hdw_dma_regs = ipr_regs;
7494	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7495	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7496
7497	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7498
7499	pci_set_master(pdev);
7500
7501	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7502	if (rc < 0) {
7503		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7504		goto cleanup_nomem;
7505	}
7506
7507	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7508				   ioa_cfg->chip_cfg->cache_line_size);
7509
7510	if (rc != PCIBIOS_SUCCESSFUL) {
7511		dev_err(&pdev->dev, "Write of cache line size failed\n");
7512		rc = -EIO;
7513		goto cleanup_nomem;
7514	}
7515
7516	/* Save away PCI config space for use following IOA reset */
7517	rc = pci_save_state(pdev);
7518
7519	if (rc != PCIBIOS_SUCCESSFUL) {
7520		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7521		rc = -EIO;
7522		goto cleanup_nomem;
7523	}
7524
7525	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7526		goto cleanup_nomem;
7527
7528	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7529		goto cleanup_nomem;
7530
7531	rc = ipr_alloc_mem(ioa_cfg);
7532	if (rc < 0) {
7533		dev_err(&pdev->dev,
7534			"Couldn't allocate enough memory for device driver!\n");
7535		goto cleanup_nomem;
7536	}
7537
7538	/*
7539	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7540	 * the card is in an unknown state and needs a hard reset
7541	 */
7542	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7543	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
7544	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7545	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7546		ioa_cfg->needs_hard_reset = 1;
7547	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7548		ioa_cfg->needs_hard_reset = 1;
7549	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7550		ioa_cfg->ioa_unit_checked = 1;
7551
7552	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7553	rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7554
7555	if (rc) {
7556		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7557			pdev->irq, rc);
7558		goto cleanup_nolog;
7559	}
7560
7561	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7562	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7563		ioa_cfg->needs_warm_reset = 1;
7564		ioa_cfg->reset = ipr_reset_slot_reset;
7565	} else
7566		ioa_cfg->reset = ipr_reset_start_bist;
7567
7568	spin_lock(&ipr_driver_lock);
7569	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7570	spin_unlock(&ipr_driver_lock);
7571
7572	LEAVE;
7573out:
7574	return rc;
7575
7576cleanup_nolog:
7577	ipr_free_mem(ioa_cfg);
7578cleanup_nomem:
7579	iounmap(ipr_regs);
7580out_release_regions:
7581	pci_release_regions(pdev);
7582out_scsi_host_put:
7583	scsi_host_put(host);
7584out_disable:
7585	pci_disable_device(pdev);
7586	goto out;
7587}
7588
7589/**
7590 * ipr_scan_vsets - Scans for VSET devices
7591 * @ioa_cfg:	ioa config struct
7592 *
7593 * Description: Since the VSET resources do not follow SAM in that we can have
7594 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7595 *
7596 * Return value:
7597 * 	none
7598 **/
7599static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7600{
7601	int target, lun;
7602
7603	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7604		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7605			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7606}
7607
7608/**
7609 * ipr_initiate_ioa_bringdown - Bring down an adapter
7610 * @ioa_cfg:		ioa config struct
7611 * @shutdown_type:	shutdown type
7612 *
7613 * Description: This function will initiate bringing down the adapter.
7614 * This consists of issuing an IOA shutdown to the adapter
7615 * to flush the cache, and running BIST.
7616 * If the caller needs to wait on the completion of the reset,
7617 * the caller must sleep on the reset_wait_q.
7618 *
7619 * Return value:
7620 * 	none
7621 **/
7622static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7623				       enum ipr_shutdown_type shutdown_type)
7624{
7625	ENTER;
7626	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7627		ioa_cfg->sdt_state = ABORT_DUMP;
7628	ioa_cfg->reset_retries = 0;
7629	ioa_cfg->in_ioa_bringdown = 1;
7630	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7631	LEAVE;
7632}
7633
7634/**
7635 * __ipr_remove - Remove a single adapter
7636 * @pdev:	pci device struct
7637 *
7638 * Adapter hot plug remove entry point.
7639 *
7640 * Return value:
7641 * 	none
7642 **/
7643static void __ipr_remove(struct pci_dev *pdev)
7644{
7645	unsigned long host_lock_flags = 0;
7646	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7647	ENTER;
7648
7649	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7650	while(ioa_cfg->in_reset_reload) {
7651		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7652		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7653		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7654	}
7655
7656	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7657
7658	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7659	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7660	flush_scheduled_work();
7661	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7662
7663	spin_lock(&ipr_driver_lock);
7664	list_del(&ioa_cfg->queue);
7665	spin_unlock(&ipr_driver_lock);
7666
7667	if (ioa_cfg->sdt_state == ABORT_DUMP)
7668		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7669	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7670
7671	ipr_free_all_resources(ioa_cfg);
7672
7673	LEAVE;
7674}
7675
7676/**
7677 * ipr_remove - IOA hot plug remove entry point
7678 * @pdev:	pci device struct
7679 *
7680 * Adapter hot plug remove entry point.
7681 *
7682 * Return value:
7683 * 	none
7684 **/
7685static void ipr_remove(struct pci_dev *pdev)
7686{
7687	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7688
7689	ENTER;
7690
7691	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7692			      &ipr_trace_attr);
7693	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7694			     &ipr_dump_attr);
7695	scsi_remove_host(ioa_cfg->host);
7696
7697	__ipr_remove(pdev);
7698
7699	LEAVE;
7700}
7701
7702/**
7703 * ipr_probe - Adapter hot plug add entry point
7704 *
7705 * Return value:
7706 * 	0 on success / non-zero on failure
7707 **/
7708static int __devinit ipr_probe(struct pci_dev *pdev,
7709			       const struct pci_device_id *dev_id)
7710{
7711	struct ipr_ioa_cfg *ioa_cfg;
7712	int rc;
7713
7714	rc = ipr_probe_ioa(pdev, dev_id);
7715
7716	if (rc)
7717		return rc;
7718
7719	ioa_cfg = pci_get_drvdata(pdev);
7720	rc = ipr_probe_ioa_part2(ioa_cfg);
7721
7722	if (rc) {
7723		__ipr_remove(pdev);
7724		return rc;
7725	}
7726
7727	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7728
7729	if (rc) {
7730		__ipr_remove(pdev);
7731		return rc;
7732	}
7733
7734	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7735				   &ipr_trace_attr);
7736
7737	if (rc) {
7738		scsi_remove_host(ioa_cfg->host);
7739		__ipr_remove(pdev);
7740		return rc;
7741	}
7742
7743	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7744				   &ipr_dump_attr);
7745
7746	if (rc) {
7747		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7748				      &ipr_trace_attr);
7749		scsi_remove_host(ioa_cfg->host);
7750		__ipr_remove(pdev);
7751		return rc;
7752	}
7753
7754	scsi_scan_host(ioa_cfg->host);
7755	ipr_scan_vsets(ioa_cfg);
7756	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7757	ioa_cfg->allow_ml_add_del = 1;
7758	ioa_cfg->host->max_channel = IPR_VSET_BUS;
7759	schedule_work(&ioa_cfg->work_q);
7760	return 0;
7761}
7762
7763/**
7764 * ipr_shutdown - Shutdown handler.
7765 * @pdev:	pci device struct
7766 *
7767 * This function is invoked upon system shutdown/reboot. It will issue
7768 * an adapter shutdown to the adapter to flush the write cache.
7769 *
7770 * Return value:
7771 * 	none
7772 **/
7773static void ipr_shutdown(struct pci_dev *pdev)
7774{
7775	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7776	unsigned long lock_flags = 0;
7777
7778	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7779	while(ioa_cfg->in_reset_reload) {
7780		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7781		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7782		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7783	}
7784
7785	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7786	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7787	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7788}
7789
7790static struct pci_device_id ipr_pci_table[] __devinitdata = {
7791	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7792		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7793	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7794		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7795	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7796		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7797	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7798		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7799	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7800		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7801	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7802		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7803	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7804		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7805	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7806		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7807		IPR_USE_LONG_TRANSOP_TIMEOUT },
7808	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7809	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7810	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7811	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7812	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7813	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7814	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7815	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7816	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7817	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7818	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7819	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7820	      IPR_USE_LONG_TRANSOP_TIMEOUT},
7821	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7822	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7823	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7824	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7825	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7826	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7827	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7828	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7829	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7830	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7831	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7832	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7833	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7834	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
7835	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7836		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7837	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7838		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7839	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7840		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7841		IPR_USE_LONG_TRANSOP_TIMEOUT },
7842	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7843		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7844		IPR_USE_LONG_TRANSOP_TIMEOUT },
7845	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7846		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7847		IPR_USE_LONG_TRANSOP_TIMEOUT },
7848	{ }
7849};
7850MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7851
7852static struct pci_error_handlers ipr_err_handler = {
7853	.error_detected = ipr_pci_error_detected,
7854	.slot_reset = ipr_pci_slot_reset,
7855};
7856
7857static struct pci_driver ipr_driver = {
7858	.name = IPR_NAME,
7859	.id_table = ipr_pci_table,
7860	.probe = ipr_probe,
7861	.remove = ipr_remove,
7862	.shutdown = ipr_shutdown,
7863	.err_handler = &ipr_err_handler,
7864	.dynids.use_driver_data = 1
7865};
7866
7867/**
7868 * ipr_init - Module entry point
7869 *
7870 * Return value:
7871 * 	0 on success / negative value on failure
7872 **/
7873static int __init ipr_init(void)
7874{
7875	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7876		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7877
7878	return pci_register_driver(&ipr_driver);
7879}
7880
7881/**
7882 * ipr_exit - Module unload
7883 *
7884 * Module unload entry point.
7885 *
7886 * Return value:
7887 * 	none
7888 **/
7889static void __exit ipr_exit(void)
7890{
7891	pci_unregister_driver(&ipr_driver);
7892}
7893
7894module_init(ipr_init);
7895module_exit(ipr_exit);
7896