ipr.c revision 5bc65793cbf8da0d35f19ef025dda22887e79e80
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/libata.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include "ipr.h"
83
84/*
85 *   Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = 0;
93static unsigned int ipr_enable_cache = 1;
94static unsigned int ipr_debug = 0;
95static unsigned int ipr_dual_ioa_raid = 1;
96static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101		.mailbox = 0x0042C,
102		.cache_line_size = 0x20,
103		{
104			.set_interrupt_mask_reg = 0x0022C,
105			.clr_interrupt_mask_reg = 0x00230,
106			.sense_interrupt_mask_reg = 0x0022C,
107			.clr_interrupt_reg = 0x00228,
108			.sense_interrupt_reg = 0x00224,
109			.ioarrin_reg = 0x00404,
110			.sense_uproc_interrupt_reg = 0x00214,
111			.set_uproc_interrupt_reg = 0x00214,
112			.clr_uproc_interrupt_reg = 0x00218
113		}
114	},
115	{ /* Snipe and Scamp */
116		.mailbox = 0x0052C,
117		.cache_line_size = 0x20,
118		{
119			.set_interrupt_mask_reg = 0x00288,
120			.clr_interrupt_mask_reg = 0x0028C,
121			.sense_interrupt_mask_reg = 0x00288,
122			.clr_interrupt_reg = 0x00284,
123			.sense_interrupt_reg = 0x00280,
124			.ioarrin_reg = 0x00504,
125			.sense_uproc_interrupt_reg = 0x00290,
126			.set_uproc_interrupt_reg = 0x00290,
127			.clr_uproc_interrupt_reg = 0x00294
128		}
129	},
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
138	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140};
141
142static int ipr_max_bus_speeds [] = {
143	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144};
145
146MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148module_param_named(max_speed, ipr_max_speed, uint, 0);
149MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150module_param_named(log_level, ipr_log_level, uint, 0);
151MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152module_param_named(testmode, ipr_testmode, int, 0);
153MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154module_param_named(fastfail, ipr_fastfail, int, 0);
155MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
163MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
164MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION);
166
167/*  A constant array of IOASCs/URCs/Error Messages */
168static const
169struct ipr_error_table_t ipr_error_table[] = {
170	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
171	"8155: An unknown error was received"},
172	{0x00330000, 0, 0,
173	"Soft underlength error"},
174	{0x005A0000, 0, 0,
175	"Command to be cancelled not found"},
176	{0x00808000, 0, 0,
177	"Qualified success"},
178	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
179	"FFFE: Soft device bus error recovered by the IOA"},
180	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
181	"4101: Soft device bus fabric error"},
182	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
183	"FFF9: Device sector reassign successful"},
184	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
185	"FFF7: Media error recovered by device rewrite procedures"},
186	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
187	"7001: IOA sector reassignment successful"},
188	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
189	"FFF9: Soft media error. Sector reassignment recommended"},
190	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
191	"FFF7: Media error recovered by IOA rewrite procedures"},
192	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
193	"FF3D: Soft PCI bus error recovered by the IOA"},
194	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
195	"FFF6: Device hardware error recovered by the IOA"},
196	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
197	"FFF6: Device hardware error recovered by the device"},
198	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
199	"FF3D: Soft IOA error recovered by the IOA"},
200	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
201	"FFFA: Undefined device response recovered by the IOA"},
202	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
203	"FFF6: Device bus error, message or command phase"},
204	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
205	"FFFE: Task Management Function failed"},
206	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
207	"FFF6: Failure prediction threshold exceeded"},
208	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
209	"8009: Impending cache battery pack failure"},
210	{0x02040400, 0, 0,
211	"34FF: Disk device format in progress"},
212	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
213	"9070: IOA requested reset"},
214	{0x023F0000, 0, 0,
215	"Synchronization required"},
216	{0x024E0000, 0, 0,
217	"No ready, IOA shutdown"},
218	{0x025A0000, 0, 0,
219	"Not ready, IOA has been shutdown"},
220	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
221	"3020: Storage subsystem configuration error"},
222	{0x03110B00, 0, 0,
223	"FFF5: Medium error, data unreadable, recommend reassign"},
224	{0x03110C00, 0, 0,
225	"7000: Medium error, data unreadable, do not reassign"},
226	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
227	"FFF3: Disk media format bad"},
228	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
229	"3002: Addressed device failed to respond to selection"},
230	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
231	"3100: Device bus error"},
232	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
233	"3109: IOA timed out a device command"},
234	{0x04088000, 0, 0,
235	"3120: SCSI bus is not operational"},
236	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
237	"4100: Hard device bus fabric error"},
238	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
239	"9000: IOA reserved area data check"},
240	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
241	"9001: IOA reserved area invalid data pattern"},
242	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
243	"9002: IOA reserved area LRC error"},
244	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
245	"102E: Out of alternate sectors for disk storage"},
246	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
247	"FFF4: Data transfer underlength error"},
248	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
249	"FFF4: Data transfer overlength error"},
250	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
251	"3400: Logical unit failure"},
252	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
253	"FFF4: Device microcode is corrupt"},
254	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
255	"8150: PCI bus error"},
256	{0x04430000, 1, 0,
257	"Unsupported device bus message received"},
258	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
259	"FFF4: Disk device problem"},
260	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
261	"8150: Permanent IOA failure"},
262	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
263	"3010: Disk device returned wrong response to IOA"},
264	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
265	"8151: IOA microcode error"},
266	{0x04448500, 0, 0,
267	"Device bus status error"},
268	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
269	"8157: IOA error requiring IOA reset to recover"},
270	{0x04448700, 0, 0,
271	"ATA device status error"},
272	{0x04490000, 0, 0,
273	"Message reject received from the device"},
274	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
275	"8008: A permanent cache battery pack failure occurred"},
276	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
277	"9090: Disk unit has been modified after the last known status"},
278	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
279	"9081: IOA detected device error"},
280	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
281	"9082: IOA detected device error"},
282	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
283	"3110: Device bus error, message or command phase"},
284	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
285	"3110: SAS Command / Task Management Function failed"},
286	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
287	"9091: Incorrect hardware configuration change has been detected"},
288	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
289	"9073: Invalid multi-adapter configuration"},
290	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
291	"4010: Incorrect connection between cascaded expanders"},
292	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
293	"4020: Connections exceed IOA design limits"},
294	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
295	"4030: Incorrect multipath connection"},
296	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
297	"4110: Unsupported enclosure function"},
298	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
299	"FFF4: Command to logical unit failed"},
300	{0x05240000, 1, 0,
301	"Illegal request, invalid request type or request packet"},
302	{0x05250000, 0, 0,
303	"Illegal request, invalid resource handle"},
304	{0x05258000, 0, 0,
305	"Illegal request, commands not allowed to this device"},
306	{0x05258100, 0, 0,
307	"Illegal request, command not allowed to a secondary adapter"},
308	{0x05260000, 0, 0,
309	"Illegal request, invalid field in parameter list"},
310	{0x05260100, 0, 0,
311	"Illegal request, parameter not supported"},
312	{0x05260200, 0, 0,
313	"Illegal request, parameter value invalid"},
314	{0x052C0000, 0, 0,
315	"Illegal request, command sequence error"},
316	{0x052C8000, 1, 0,
317	"Illegal request, dual adapter support not enabled"},
318	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
319	"9031: Array protection temporarily suspended, protection resuming"},
320	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
321	"9040: Array protection temporarily suspended, protection resuming"},
322	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
323	"3140: Device bus not ready to ready transition"},
324	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
325	"FFFB: SCSI bus was reset"},
326	{0x06290500, 0, 0,
327	"FFFE: SCSI bus transition to single ended"},
328	{0x06290600, 0, 0,
329	"FFFE: SCSI bus transition to LVD"},
330	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
331	"FFFB: SCSI bus was reset by another initiator"},
332	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
333	"3029: A device replacement has occurred"},
334	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
335	"9051: IOA cache data exists for a missing or failed device"},
336	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
337	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
338	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
339	"9025: Disk unit is not supported at its physical location"},
340	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
341	"3020: IOA detected a SCSI bus configuration error"},
342	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
343	"3150: SCSI bus configuration error"},
344	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
345	"9074: Asymmetric advanced function disk configuration"},
346	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
347	"4040: Incomplete multipath connection between IOA and enclosure"},
348	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
349	"4041: Incomplete multipath connection between enclosure and device"},
350	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
351	"9075: Incomplete multipath connection between IOA and remote IOA"},
352	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
353	"9076: Configuration error, missing remote IOA"},
354	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
355	"4050: Enclosure does not support a required multipath function"},
356	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
357	"9041: Array protection temporarily suspended"},
358	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
359	"9042: Corrupt array parity detected on specified device"},
360	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
361	"9030: Array no longer protected due to missing or failed disk unit"},
362	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
363	"9071: Link operational transition"},
364	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
365	"9072: Link not operational transition"},
366	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
367	"9032: Array exposed but still protected"},
368	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
369	"70DD: Device forced failed by disrupt device command"},
370	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
371	"4061: Multipath redundancy level got better"},
372	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
373	"4060: Multipath redundancy level got worse"},
374	{0x07270000, 0, 0,
375	"Failure due to other device"},
376	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
377	"9008: IOA does not support functions expected by devices"},
378	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
379	"9010: Cache data associated with attached devices cannot be found"},
380	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
381	"9011: Cache data belongs to devices other than those attached"},
382	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
383	"9020: Array missing 2 or more devices with only 1 device present"},
384	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
385	"9021: Array missing 2 or more devices with 2 or more devices present"},
386	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
387	"9022: Exposed array is missing a required device"},
388	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
389	"9023: Array member(s) not at required physical locations"},
390	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
391	"9024: Array not functional due to present hardware configuration"},
392	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
393	"9026: Array not functional due to present hardware configuration"},
394	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
395	"9027: Array is missing a device and parity is out of sync"},
396	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
397	"9028: Maximum number of arrays already exist"},
398	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
399	"9050: Required cache data cannot be located for a disk unit"},
400	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
401	"9052: Cache data exists for a device that has been modified"},
402	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
403	"9054: IOA resources not available due to previous problems"},
404	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
405	"9092: Disk unit requires initialization before use"},
406	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
407	"9029: Incorrect hardware configuration change has been detected"},
408	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
409	"9060: One or more disk pairs are missing from an array"},
410	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
411	"9061: One or more disks are missing from an array"},
412	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
413	"9062: One or more disks are missing from an array"},
414	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
415	"9063: Maximum number of functional arrays has been exceeded"},
416	{0x0B260000, 0, 0,
417	"Aborted command, invalid descriptor"},
418	{0x0B5A0000, 0, 0,
419	"Command terminated by host"}
420};
421
422static const struct ipr_ses_table_entry ipr_ses_table[] = {
423	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
424	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
425	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
426	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
427	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
428	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
429	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
430	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
431	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
432	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
433	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
434	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
435	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
436};
437
438/*
439 *  Function Prototypes
440 */
441static int ipr_reset_alert(struct ipr_cmnd *);
442static void ipr_process_ccn(struct ipr_cmnd *);
443static void ipr_process_error(struct ipr_cmnd *);
444static void ipr_reset_ioa_job(struct ipr_cmnd *);
445static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
446				   enum ipr_shutdown_type);
447
448#ifdef CONFIG_SCSI_IPR_TRACE
449/**
450 * ipr_trc_hook - Add a trace entry to the driver trace
451 * @ipr_cmd:	ipr command struct
452 * @type:		trace type
453 * @add_data:	additional data
454 *
455 * Return value:
456 * 	none
457 **/
458static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
459			 u8 type, u32 add_data)
460{
461	struct ipr_trace_entry *trace_entry;
462	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463
464	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
465	trace_entry->time = jiffies;
466	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
467	trace_entry->type = type;
468	trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
469	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
470	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
471	trace_entry->u.add_data = add_data;
472}
473#else
474#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
475#endif
476
477/**
478 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
479 * @ipr_cmd:	ipr command struct
480 *
481 * Return value:
482 * 	none
483 **/
484static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
485{
486	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
487	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
488	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
489
490	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
491	ioarcb->write_data_transfer_length = 0;
492	ioarcb->read_data_transfer_length = 0;
493	ioarcb->write_ioadl_len = 0;
494	ioarcb->read_ioadl_len = 0;
495	ioarcb->write_ioadl_addr =
496		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
497	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
498	ioasa->ioasc = 0;
499	ioasa->residual_data_len = 0;
500	ioasa->u.gata.status = 0;
501
502	ipr_cmd->scsi_cmd = NULL;
503	ipr_cmd->qc = NULL;
504	ipr_cmd->sense_buffer[0] = 0;
505	ipr_cmd->dma_use_sg = 0;
506}
507
508/**
509 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
510 * @ipr_cmd:	ipr command struct
511 *
512 * Return value:
513 * 	none
514 **/
515static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
516{
517	ipr_reinit_ipr_cmnd(ipr_cmd);
518	ipr_cmd->u.scratch = 0;
519	ipr_cmd->sibling = NULL;
520	init_timer(&ipr_cmd->timer);
521}
522
523/**
524 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
525 * @ioa_cfg:	ioa config struct
526 *
527 * Return value:
528 * 	pointer to ipr command struct
529 **/
530static
531struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
532{
533	struct ipr_cmnd *ipr_cmd;
534
535	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
536	list_del(&ipr_cmd->queue);
537	ipr_init_ipr_cmnd(ipr_cmd);
538
539	return ipr_cmd;
540}
541
542/**
543 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
544 * @ioa_cfg:	ioa config struct
545 * @clr_ints:     interrupts to clear
546 *
547 * This function masks all interrupts on the adapter, then clears the
548 * interrupts specified in the mask
549 *
550 * Return value:
551 * 	none
552 **/
553static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
554					  u32 clr_ints)
555{
556	volatile u32 int_reg;
557
558	/* Stop new interrupts */
559	ioa_cfg->allow_interrupts = 0;
560
561	/* Set interrupt mask to stop all new interrupts */
562	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
563
564	/* Clear any pending interrupts */
565	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
566	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
567}
568
569/**
570 * ipr_save_pcix_cmd_reg - Save PCI-X command register
571 * @ioa_cfg:	ioa config struct
572 *
573 * Return value:
574 * 	0 on success / -EIO on failure
575 **/
576static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
577{
578	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
579
580	if (pcix_cmd_reg == 0)
581		return 0;
582
583	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
584				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
585		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
586		return -EIO;
587	}
588
589	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
590	return 0;
591}
592
593/**
594 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
595 * @ioa_cfg:	ioa config struct
596 *
597 * Return value:
598 * 	0 on success / -EIO on failure
599 **/
600static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
601{
602	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
603
604	if (pcix_cmd_reg) {
605		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
606					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
607			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
608			return -EIO;
609		}
610	}
611
612	return 0;
613}
614
615/**
616 * ipr_sata_eh_done - done function for aborted SATA commands
617 * @ipr_cmd:	ipr command struct
618 *
619 * This function is invoked for ops generated to SATA
620 * devices which are being aborted.
621 *
622 * Return value:
623 * 	none
624 **/
625static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
626{
627	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
628	struct ata_queued_cmd *qc = ipr_cmd->qc;
629	struct ipr_sata_port *sata_port = qc->ap->private_data;
630
631	qc->err_mask |= AC_ERR_OTHER;
632	sata_port->ioasa.status |= ATA_BUSY;
633	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
634	ata_qc_complete(qc);
635}
636
637/**
638 * ipr_scsi_eh_done - mid-layer done function for aborted ops
639 * @ipr_cmd:	ipr command struct
640 *
641 * This function is invoked by the interrupt handler for
642 * ops generated by the SCSI mid-layer which are being aborted.
643 *
644 * Return value:
645 * 	none
646 **/
647static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
648{
649	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
650	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
651
652	scsi_cmd->result |= (DID_ERROR << 16);
653
654	scsi_dma_unmap(ipr_cmd->scsi_cmd);
655	scsi_cmd->scsi_done(scsi_cmd);
656	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
657}
658
659/**
660 * ipr_fail_all_ops - Fails all outstanding ops.
661 * @ioa_cfg:	ioa config struct
662 *
663 * This function fails all outstanding ops.
664 *
665 * Return value:
666 * 	none
667 **/
668static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
669{
670	struct ipr_cmnd *ipr_cmd, *temp;
671
672	ENTER;
673	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
674		list_del(&ipr_cmd->queue);
675
676		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
677		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
678
679		if (ipr_cmd->scsi_cmd)
680			ipr_cmd->done = ipr_scsi_eh_done;
681		else if (ipr_cmd->qc)
682			ipr_cmd->done = ipr_sata_eh_done;
683
684		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
685		del_timer(&ipr_cmd->timer);
686		ipr_cmd->done(ipr_cmd);
687	}
688
689	LEAVE;
690}
691
692/**
693 * ipr_do_req -  Send driver initiated requests.
694 * @ipr_cmd:		ipr command struct
695 * @done:			done function
696 * @timeout_func:	timeout function
697 * @timeout:		timeout value
698 *
699 * This function sends the specified command to the adapter with the
700 * timeout given. The done function is invoked on command completion.
701 *
702 * Return value:
703 * 	none
704 **/
705static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
706		       void (*done) (struct ipr_cmnd *),
707		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
708{
709	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
710
711	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
712
713	ipr_cmd->done = done;
714
715	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
716	ipr_cmd->timer.expires = jiffies + timeout;
717	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
718
719	add_timer(&ipr_cmd->timer);
720
721	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
722
723	mb();
724	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
725	       ioa_cfg->regs.ioarrin_reg);
726}
727
728/**
729 * ipr_internal_cmd_done - Op done function for an internally generated op.
730 * @ipr_cmd:	ipr command struct
731 *
732 * This function is the op done function for an internally generated,
733 * blocking op. It simply wakes the sleeping thread.
734 *
735 * Return value:
736 * 	none
737 **/
738static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
739{
740	if (ipr_cmd->sibling)
741		ipr_cmd->sibling = NULL;
742	else
743		complete(&ipr_cmd->completion);
744}
745
746/**
747 * ipr_send_blocking_cmd - Send command and sleep on its completion.
748 * @ipr_cmd:	ipr command struct
749 * @timeout_func:	function to invoke if command times out
750 * @timeout:	timeout
751 *
752 * Return value:
753 * 	none
754 **/
755static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
756				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
757				  u32 timeout)
758{
759	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
760
761	init_completion(&ipr_cmd->completion);
762	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
763
764	spin_unlock_irq(ioa_cfg->host->host_lock);
765	wait_for_completion(&ipr_cmd->completion);
766	spin_lock_irq(ioa_cfg->host->host_lock);
767}
768
769/**
770 * ipr_send_hcam - Send an HCAM to the adapter.
771 * @ioa_cfg:	ioa config struct
772 * @type:		HCAM type
773 * @hostrcb:	hostrcb struct
774 *
775 * This function will send a Host Controlled Async command to the adapter.
776 * If HCAMs are currently not allowed to be issued to the adapter, it will
777 * place the hostrcb on the free queue.
778 *
779 * Return value:
780 * 	none
781 **/
782static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
783			  struct ipr_hostrcb *hostrcb)
784{
785	struct ipr_cmnd *ipr_cmd;
786	struct ipr_ioarcb *ioarcb;
787
788	if (ioa_cfg->allow_cmds) {
789		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
790		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
791		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
792
793		ipr_cmd->u.hostrcb = hostrcb;
794		ioarcb = &ipr_cmd->ioarcb;
795
796		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
797		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
798		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
799		ioarcb->cmd_pkt.cdb[1] = type;
800		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
801		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
802
803		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
804		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
805		ipr_cmd->ioadl[0].flags_and_data_len =
806			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
807		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
808
809		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
810			ipr_cmd->done = ipr_process_ccn;
811		else
812			ipr_cmd->done = ipr_process_error;
813
814		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
815
816		mb();
817		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
818		       ioa_cfg->regs.ioarrin_reg);
819	} else {
820		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
821	}
822}
823
824/**
825 * ipr_init_res_entry - Initialize a resource entry struct.
826 * @res:	resource entry struct
827 *
828 * Return value:
829 * 	none
830 **/
831static void ipr_init_res_entry(struct ipr_resource_entry *res)
832{
833	res->needs_sync_complete = 0;
834	res->in_erp = 0;
835	res->add_to_ml = 0;
836	res->del_from_ml = 0;
837	res->resetting_device = 0;
838	res->sdev = NULL;
839	res->sata_port = NULL;
840}
841
842/**
843 * ipr_handle_config_change - Handle a config change from the adapter
844 * @ioa_cfg:	ioa config struct
845 * @hostrcb:	hostrcb
846 *
847 * Return value:
848 * 	none
849 **/
850static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
851			      struct ipr_hostrcb *hostrcb)
852{
853	struct ipr_resource_entry *res = NULL;
854	struct ipr_config_table_entry *cfgte;
855	u32 is_ndn = 1;
856
857	cfgte = &hostrcb->hcam.u.ccn.cfgte;
858
859	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
860		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
861			    sizeof(cfgte->res_addr))) {
862			is_ndn = 0;
863			break;
864		}
865	}
866
867	if (is_ndn) {
868		if (list_empty(&ioa_cfg->free_res_q)) {
869			ipr_send_hcam(ioa_cfg,
870				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
871				      hostrcb);
872			return;
873		}
874
875		res = list_entry(ioa_cfg->free_res_q.next,
876				 struct ipr_resource_entry, queue);
877
878		list_del(&res->queue);
879		ipr_init_res_entry(res);
880		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
881	}
882
883	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
884
885	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
886		if (res->sdev) {
887			res->del_from_ml = 1;
888			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
889			if (ioa_cfg->allow_ml_add_del)
890				schedule_work(&ioa_cfg->work_q);
891		} else
892			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
893	} else if (!res->sdev) {
894		res->add_to_ml = 1;
895		if (ioa_cfg->allow_ml_add_del)
896			schedule_work(&ioa_cfg->work_q);
897	}
898
899	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
900}
901
902/**
903 * ipr_process_ccn - Op done function for a CCN.
904 * @ipr_cmd:	ipr command struct
905 *
906 * This function is the op done function for a configuration
907 * change notification host controlled async from the adapter.
908 *
909 * Return value:
910 * 	none
911 **/
912static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
913{
914	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
915	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
916	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
917
918	list_del(&hostrcb->queue);
919	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
920
921	if (ioasc) {
922		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
923			dev_err(&ioa_cfg->pdev->dev,
924				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
925
926		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
927	} else {
928		ipr_handle_config_change(ioa_cfg, hostrcb);
929	}
930}
931
932/**
933 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
934 * @i:		index into buffer
935 * @buf:		string to modify
936 *
937 * This function will strip all trailing whitespace, pad the end
938 * of the string with a single space, and NULL terminate the string.
939 *
940 * Return value:
941 * 	new length of string
942 **/
943static int strip_and_pad_whitespace(int i, char *buf)
944{
945	while (i && buf[i] == ' ')
946		i--;
947	buf[i+1] = ' ';
948	buf[i+2] = '\0';
949	return i + 2;
950}
951
952/**
953 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
954 * @prefix:		string to print at start of printk
955 * @hostrcb:	hostrcb pointer
956 * @vpd:		vendor/product id/sn struct
957 *
958 * Return value:
959 * 	none
960 **/
961static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
962				struct ipr_vpd *vpd)
963{
964	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
965	int i = 0;
966
967	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
968	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
969
970	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
971	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
972
973	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
974	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
975
976	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
977}
978
979/**
980 * ipr_log_vpd - Log the passed VPD to the error log.
981 * @vpd:		vendor/product id/sn struct
982 *
983 * Return value:
984 * 	none
985 **/
986static void ipr_log_vpd(struct ipr_vpd *vpd)
987{
988	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
989		    + IPR_SERIAL_NUM_LEN];
990
991	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
992	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
993	       IPR_PROD_ID_LEN);
994	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
995	ipr_err("Vendor/Product ID: %s\n", buffer);
996
997	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
998	buffer[IPR_SERIAL_NUM_LEN] = '\0';
999	ipr_err("    Serial Number: %s\n", buffer);
1000}
1001
1002/**
1003 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1004 * @prefix:		string to print at start of printk
1005 * @hostrcb:	hostrcb pointer
1006 * @vpd:		vendor/product id/sn/wwn struct
1007 *
1008 * Return value:
1009 * 	none
1010 **/
1011static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1012				    struct ipr_ext_vpd *vpd)
1013{
1014	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1015	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1016		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1017}
1018
1019/**
1020 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1021 * @vpd:		vendor/product id/sn/wwn struct
1022 *
1023 * Return value:
1024 * 	none
1025 **/
1026static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1027{
1028	ipr_log_vpd(&vpd->vpd);
1029	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1030		be32_to_cpu(vpd->wwid[1]));
1031}
1032
1033/**
1034 * ipr_log_enhanced_cache_error - Log a cache error.
1035 * @ioa_cfg:	ioa config struct
1036 * @hostrcb:	hostrcb struct
1037 *
1038 * Return value:
1039 * 	none
1040 **/
1041static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1042					 struct ipr_hostrcb *hostrcb)
1043{
1044	struct ipr_hostrcb_type_12_error *error =
1045		&hostrcb->hcam.u.error.u.type_12_error;
1046
1047	ipr_err("-----Current Configuration-----\n");
1048	ipr_err("Cache Directory Card Information:\n");
1049	ipr_log_ext_vpd(&error->ioa_vpd);
1050	ipr_err("Adapter Card Information:\n");
1051	ipr_log_ext_vpd(&error->cfc_vpd);
1052
1053	ipr_err("-----Expected Configuration-----\n");
1054	ipr_err("Cache Directory Card Information:\n");
1055	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1056	ipr_err("Adapter Card Information:\n");
1057	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1058
1059	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1060		     be32_to_cpu(error->ioa_data[0]),
1061		     be32_to_cpu(error->ioa_data[1]),
1062		     be32_to_cpu(error->ioa_data[2]));
1063}
1064
1065/**
1066 * ipr_log_cache_error - Log a cache error.
1067 * @ioa_cfg:	ioa config struct
1068 * @hostrcb:	hostrcb struct
1069 *
1070 * Return value:
1071 * 	none
1072 **/
1073static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1074				struct ipr_hostrcb *hostrcb)
1075{
1076	struct ipr_hostrcb_type_02_error *error =
1077		&hostrcb->hcam.u.error.u.type_02_error;
1078
1079	ipr_err("-----Current Configuration-----\n");
1080	ipr_err("Cache Directory Card Information:\n");
1081	ipr_log_vpd(&error->ioa_vpd);
1082	ipr_err("Adapter Card Information:\n");
1083	ipr_log_vpd(&error->cfc_vpd);
1084
1085	ipr_err("-----Expected Configuration-----\n");
1086	ipr_err("Cache Directory Card Information:\n");
1087	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1088	ipr_err("Adapter Card Information:\n");
1089	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1090
1091	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1092		     be32_to_cpu(error->ioa_data[0]),
1093		     be32_to_cpu(error->ioa_data[1]),
1094		     be32_to_cpu(error->ioa_data[2]));
1095}
1096
1097/**
1098 * ipr_log_enhanced_config_error - Log a configuration error.
1099 * @ioa_cfg:	ioa config struct
1100 * @hostrcb:	hostrcb struct
1101 *
1102 * Return value:
1103 * 	none
1104 **/
1105static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1106					  struct ipr_hostrcb *hostrcb)
1107{
1108	int errors_logged, i;
1109	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1110	struct ipr_hostrcb_type_13_error *error;
1111
1112	error = &hostrcb->hcam.u.error.u.type_13_error;
1113	errors_logged = be32_to_cpu(error->errors_logged);
1114
1115	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1116		be32_to_cpu(error->errors_detected), errors_logged);
1117
1118	dev_entry = error->dev;
1119
1120	for (i = 0; i < errors_logged; i++, dev_entry++) {
1121		ipr_err_separator;
1122
1123		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1124		ipr_log_ext_vpd(&dev_entry->vpd);
1125
1126		ipr_err("-----New Device Information-----\n");
1127		ipr_log_ext_vpd(&dev_entry->new_vpd);
1128
1129		ipr_err("Cache Directory Card Information:\n");
1130		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1131
1132		ipr_err("Adapter Card Information:\n");
1133		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1134	}
1135}
1136
1137/**
1138 * ipr_log_config_error - Log a configuration error.
1139 * @ioa_cfg:	ioa config struct
1140 * @hostrcb:	hostrcb struct
1141 *
1142 * Return value:
1143 * 	none
1144 **/
1145static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1146				 struct ipr_hostrcb *hostrcb)
1147{
1148	int errors_logged, i;
1149	struct ipr_hostrcb_device_data_entry *dev_entry;
1150	struct ipr_hostrcb_type_03_error *error;
1151
1152	error = &hostrcb->hcam.u.error.u.type_03_error;
1153	errors_logged = be32_to_cpu(error->errors_logged);
1154
1155	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1156		be32_to_cpu(error->errors_detected), errors_logged);
1157
1158	dev_entry = error->dev;
1159
1160	for (i = 0; i < errors_logged; i++, dev_entry++) {
1161		ipr_err_separator;
1162
1163		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1164		ipr_log_vpd(&dev_entry->vpd);
1165
1166		ipr_err("-----New Device Information-----\n");
1167		ipr_log_vpd(&dev_entry->new_vpd);
1168
1169		ipr_err("Cache Directory Card Information:\n");
1170		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1171
1172		ipr_err("Adapter Card Information:\n");
1173		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1174
1175		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1176			be32_to_cpu(dev_entry->ioa_data[0]),
1177			be32_to_cpu(dev_entry->ioa_data[1]),
1178			be32_to_cpu(dev_entry->ioa_data[2]),
1179			be32_to_cpu(dev_entry->ioa_data[3]),
1180			be32_to_cpu(dev_entry->ioa_data[4]));
1181	}
1182}
1183
1184/**
1185 * ipr_log_enhanced_array_error - Log an array configuration error.
1186 * @ioa_cfg:	ioa config struct
1187 * @hostrcb:	hostrcb struct
1188 *
1189 * Return value:
1190 * 	none
1191 **/
1192static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1193					 struct ipr_hostrcb *hostrcb)
1194{
1195	int i, num_entries;
1196	struct ipr_hostrcb_type_14_error *error;
1197	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1198	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1199
1200	error = &hostrcb->hcam.u.error.u.type_14_error;
1201
1202	ipr_err_separator;
1203
1204	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1205		error->protection_level,
1206		ioa_cfg->host->host_no,
1207		error->last_func_vset_res_addr.bus,
1208		error->last_func_vset_res_addr.target,
1209		error->last_func_vset_res_addr.lun);
1210
1211	ipr_err_separator;
1212
1213	array_entry = error->array_member;
1214	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1215			    sizeof(error->array_member));
1216
1217	for (i = 0; i < num_entries; i++, array_entry++) {
1218		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1219			continue;
1220
1221		if (be32_to_cpu(error->exposed_mode_adn) == i)
1222			ipr_err("Exposed Array Member %d:\n", i);
1223		else
1224			ipr_err("Array Member %d:\n", i);
1225
1226		ipr_log_ext_vpd(&array_entry->vpd);
1227		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1228		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1229				 "Expected Location");
1230
1231		ipr_err_separator;
1232	}
1233}
1234
1235/**
1236 * ipr_log_array_error - Log an array configuration error.
1237 * @ioa_cfg:	ioa config struct
1238 * @hostrcb:	hostrcb struct
1239 *
1240 * Return value:
1241 * 	none
1242 **/
1243static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1244				struct ipr_hostrcb *hostrcb)
1245{
1246	int i;
1247	struct ipr_hostrcb_type_04_error *error;
1248	struct ipr_hostrcb_array_data_entry *array_entry;
1249	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1250
1251	error = &hostrcb->hcam.u.error.u.type_04_error;
1252
1253	ipr_err_separator;
1254
1255	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1256		error->protection_level,
1257		ioa_cfg->host->host_no,
1258		error->last_func_vset_res_addr.bus,
1259		error->last_func_vset_res_addr.target,
1260		error->last_func_vset_res_addr.lun);
1261
1262	ipr_err_separator;
1263
1264	array_entry = error->array_member;
1265
1266	for (i = 0; i < 18; i++) {
1267		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1268			continue;
1269
1270		if (be32_to_cpu(error->exposed_mode_adn) == i)
1271			ipr_err("Exposed Array Member %d:\n", i);
1272		else
1273			ipr_err("Array Member %d:\n", i);
1274
1275		ipr_log_vpd(&array_entry->vpd);
1276
1277		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1278		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1279				 "Expected Location");
1280
1281		ipr_err_separator;
1282
1283		if (i == 9)
1284			array_entry = error->array_member2;
1285		else
1286			array_entry++;
1287	}
1288}
1289
1290/**
1291 * ipr_log_hex_data - Log additional hex IOA error data.
1292 * @ioa_cfg:	ioa config struct
1293 * @data:		IOA error data
1294 * @len:		data length
1295 *
1296 * Return value:
1297 * 	none
1298 **/
1299static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1300{
1301	int i;
1302
1303	if (len == 0)
1304		return;
1305
1306	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1307		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1308
1309	for (i = 0; i < len / 4; i += 4) {
1310		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1311			be32_to_cpu(data[i]),
1312			be32_to_cpu(data[i+1]),
1313			be32_to_cpu(data[i+2]),
1314			be32_to_cpu(data[i+3]));
1315	}
1316}
1317
1318/**
1319 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1320 * @ioa_cfg:	ioa config struct
1321 * @hostrcb:	hostrcb struct
1322 *
1323 * Return value:
1324 * 	none
1325 **/
1326static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1327					    struct ipr_hostrcb *hostrcb)
1328{
1329	struct ipr_hostrcb_type_17_error *error;
1330
1331	error = &hostrcb->hcam.u.error.u.type_17_error;
1332	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1333	strstrip(error->failure_reason);
1334
1335	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1336		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1337	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1338	ipr_log_hex_data(ioa_cfg, error->data,
1339			 be32_to_cpu(hostrcb->hcam.length) -
1340			 (offsetof(struct ipr_hostrcb_error, u) +
1341			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1342}
1343
1344/**
1345 * ipr_log_dual_ioa_error - Log a dual adapter error.
1346 * @ioa_cfg:	ioa config struct
1347 * @hostrcb:	hostrcb struct
1348 *
1349 * Return value:
1350 * 	none
1351 **/
1352static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1353				   struct ipr_hostrcb *hostrcb)
1354{
1355	struct ipr_hostrcb_type_07_error *error;
1356
1357	error = &hostrcb->hcam.u.error.u.type_07_error;
1358	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1359	strstrip(error->failure_reason);
1360
1361	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1362		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1363	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1364	ipr_log_hex_data(ioa_cfg, error->data,
1365			 be32_to_cpu(hostrcb->hcam.length) -
1366			 (offsetof(struct ipr_hostrcb_error, u) +
1367			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1368}
1369
1370static const struct {
1371	u8 active;
1372	char *desc;
1373} path_active_desc[] = {
1374	{ IPR_PATH_NO_INFO, "Path" },
1375	{ IPR_PATH_ACTIVE, "Active path" },
1376	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1377};
1378
1379static const struct {
1380	u8 state;
1381	char *desc;
1382} path_state_desc[] = {
1383	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1384	{ IPR_PATH_HEALTHY, "is healthy" },
1385	{ IPR_PATH_DEGRADED, "is degraded" },
1386	{ IPR_PATH_FAILED, "is failed" }
1387};
1388
1389/**
1390 * ipr_log_fabric_path - Log a fabric path error
1391 * @hostrcb:	hostrcb struct
1392 * @fabric:		fabric descriptor
1393 *
1394 * Return value:
1395 * 	none
1396 **/
1397static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1398				struct ipr_hostrcb_fabric_desc *fabric)
1399{
1400	int i, j;
1401	u8 path_state = fabric->path_state;
1402	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1403	u8 state = path_state & IPR_PATH_STATE_MASK;
1404
1405	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1406		if (path_active_desc[i].active != active)
1407			continue;
1408
1409		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1410			if (path_state_desc[j].state != state)
1411				continue;
1412
1413			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1414				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1415					     path_active_desc[i].desc, path_state_desc[j].desc,
1416					     fabric->ioa_port);
1417			} else if (fabric->cascaded_expander == 0xff) {
1418				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1419					     path_active_desc[i].desc, path_state_desc[j].desc,
1420					     fabric->ioa_port, fabric->phy);
1421			} else if (fabric->phy == 0xff) {
1422				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1423					     path_active_desc[i].desc, path_state_desc[j].desc,
1424					     fabric->ioa_port, fabric->cascaded_expander);
1425			} else {
1426				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1427					     path_active_desc[i].desc, path_state_desc[j].desc,
1428					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1429			}
1430			return;
1431		}
1432	}
1433
1434	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1435		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1436}
1437
1438static const struct {
1439	u8 type;
1440	char *desc;
1441} path_type_desc[] = {
1442	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1443	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1444	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1445	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1446};
1447
1448static const struct {
1449	u8 status;
1450	char *desc;
1451} path_status_desc[] = {
1452	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1453	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1454	{ IPR_PATH_CFG_FAILED, "Failed" },
1455	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1456	{ IPR_PATH_NOT_DETECTED, "Missing" },
1457	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1458};
1459
1460static const char *link_rate[] = {
1461	"unknown",
1462	"disabled",
1463	"phy reset problem",
1464	"spinup hold",
1465	"port selector",
1466	"unknown",
1467	"unknown",
1468	"unknown",
1469	"1.5Gbps",
1470	"3.0Gbps",
1471	"unknown",
1472	"unknown",
1473	"unknown",
1474	"unknown",
1475	"unknown",
1476	"unknown"
1477};
1478
1479/**
1480 * ipr_log_path_elem - Log a fabric path element.
1481 * @hostrcb:	hostrcb struct
1482 * @cfg:		fabric path element struct
1483 *
1484 * Return value:
1485 * 	none
1486 **/
1487static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1488			      struct ipr_hostrcb_config_element *cfg)
1489{
1490	int i, j;
1491	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1492	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1493
1494	if (type == IPR_PATH_CFG_NOT_EXIST)
1495		return;
1496
1497	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1498		if (path_type_desc[i].type != type)
1499			continue;
1500
1501		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1502			if (path_status_desc[j].status != status)
1503				continue;
1504
1505			if (type == IPR_PATH_CFG_IOA_PORT) {
1506				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1507					     path_status_desc[j].desc, path_type_desc[i].desc,
1508					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1509					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1510			} else {
1511				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1512					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1513						     path_status_desc[j].desc, path_type_desc[i].desc,
1514						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1515						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1516				} else if (cfg->cascaded_expander == 0xff) {
1517					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1518						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1519						     path_type_desc[i].desc, cfg->phy,
1520						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1521						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1522				} else if (cfg->phy == 0xff) {
1523					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1524						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1525						     path_type_desc[i].desc, cfg->cascaded_expander,
1526						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1527						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1528				} else {
1529					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1530						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1531						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1532						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1533						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1534				}
1535			}
1536			return;
1537		}
1538	}
1539
1540	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1541		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1542		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1543		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1544}
1545
1546/**
1547 * ipr_log_fabric_error - Log a fabric error.
1548 * @ioa_cfg:	ioa config struct
1549 * @hostrcb:	hostrcb struct
1550 *
1551 * Return value:
1552 * 	none
1553 **/
1554static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1555				 struct ipr_hostrcb *hostrcb)
1556{
1557	struct ipr_hostrcb_type_20_error *error;
1558	struct ipr_hostrcb_fabric_desc *fabric;
1559	struct ipr_hostrcb_config_element *cfg;
1560	int i, add_len;
1561
1562	error = &hostrcb->hcam.u.error.u.type_20_error;
1563	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1564	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1565
1566	add_len = be32_to_cpu(hostrcb->hcam.length) -
1567		(offsetof(struct ipr_hostrcb_error, u) +
1568		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1569
1570	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1571		ipr_log_fabric_path(hostrcb, fabric);
1572		for_each_fabric_cfg(fabric, cfg)
1573			ipr_log_path_elem(hostrcb, cfg);
1574
1575		add_len -= be16_to_cpu(fabric->length);
1576		fabric = (struct ipr_hostrcb_fabric_desc *)
1577			((unsigned long)fabric + be16_to_cpu(fabric->length));
1578	}
1579
1580	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1581}
1582
1583/**
1584 * ipr_log_generic_error - Log an adapter error.
1585 * @ioa_cfg:	ioa config struct
1586 * @hostrcb:	hostrcb struct
1587 *
1588 * Return value:
1589 * 	none
1590 **/
1591static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1592				  struct ipr_hostrcb *hostrcb)
1593{
1594	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1595			 be32_to_cpu(hostrcb->hcam.length));
1596}
1597
1598/**
1599 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1600 * @ioasc:	IOASC
1601 *
1602 * This function will return the index of into the ipr_error_table
1603 * for the specified IOASC. If the IOASC is not in the table,
1604 * 0 will be returned, which points to the entry used for unknown errors.
1605 *
1606 * Return value:
1607 * 	index into the ipr_error_table
1608 **/
1609static u32 ipr_get_error(u32 ioasc)
1610{
1611	int i;
1612
1613	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1614		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1615			return i;
1616
1617	return 0;
1618}
1619
1620/**
1621 * ipr_handle_log_data - Log an adapter error.
1622 * @ioa_cfg:	ioa config struct
1623 * @hostrcb:	hostrcb struct
1624 *
1625 * This function logs an adapter error to the system.
1626 *
1627 * Return value:
1628 * 	none
1629 **/
1630static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1631				struct ipr_hostrcb *hostrcb)
1632{
1633	u32 ioasc;
1634	int error_index;
1635
1636	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1637		return;
1638
1639	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1640		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1641
1642	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1643
1644	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1645	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1646		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1647		scsi_report_bus_reset(ioa_cfg->host,
1648				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1649	}
1650
1651	error_index = ipr_get_error(ioasc);
1652
1653	if (!ipr_error_table[error_index].log_hcam)
1654		return;
1655
1656	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1657
1658	/* Set indication we have logged an error */
1659	ioa_cfg->errors_logged++;
1660
1661	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1662		return;
1663	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1664		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1665
1666	switch (hostrcb->hcam.overlay_id) {
1667	case IPR_HOST_RCB_OVERLAY_ID_2:
1668		ipr_log_cache_error(ioa_cfg, hostrcb);
1669		break;
1670	case IPR_HOST_RCB_OVERLAY_ID_3:
1671		ipr_log_config_error(ioa_cfg, hostrcb);
1672		break;
1673	case IPR_HOST_RCB_OVERLAY_ID_4:
1674	case IPR_HOST_RCB_OVERLAY_ID_6:
1675		ipr_log_array_error(ioa_cfg, hostrcb);
1676		break;
1677	case IPR_HOST_RCB_OVERLAY_ID_7:
1678		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1679		break;
1680	case IPR_HOST_RCB_OVERLAY_ID_12:
1681		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1682		break;
1683	case IPR_HOST_RCB_OVERLAY_ID_13:
1684		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1685		break;
1686	case IPR_HOST_RCB_OVERLAY_ID_14:
1687	case IPR_HOST_RCB_OVERLAY_ID_16:
1688		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1689		break;
1690	case IPR_HOST_RCB_OVERLAY_ID_17:
1691		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1692		break;
1693	case IPR_HOST_RCB_OVERLAY_ID_20:
1694		ipr_log_fabric_error(ioa_cfg, hostrcb);
1695		break;
1696	case IPR_HOST_RCB_OVERLAY_ID_1:
1697	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1698	default:
1699		ipr_log_generic_error(ioa_cfg, hostrcb);
1700		break;
1701	}
1702}
1703
1704/**
1705 * ipr_process_error - Op done function for an adapter error log.
1706 * @ipr_cmd:	ipr command struct
1707 *
1708 * This function is the op done function for an error log host
1709 * controlled async from the adapter. It will log the error and
1710 * send the HCAM back to the adapter.
1711 *
1712 * Return value:
1713 * 	none
1714 **/
1715static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1716{
1717	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1718	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1719	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1720	u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1721
1722	list_del(&hostrcb->queue);
1723	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1724
1725	if (!ioasc) {
1726		ipr_handle_log_data(ioa_cfg, hostrcb);
1727		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1728			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1729	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1730		dev_err(&ioa_cfg->pdev->dev,
1731			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1732	}
1733
1734	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1735}
1736
1737/**
1738 * ipr_timeout -  An internally generated op has timed out.
1739 * @ipr_cmd:	ipr command struct
1740 *
1741 * This function blocks host requests and initiates an
1742 * adapter reset.
1743 *
1744 * Return value:
1745 * 	none
1746 **/
1747static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1748{
1749	unsigned long lock_flags = 0;
1750	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1751
1752	ENTER;
1753	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1754
1755	ioa_cfg->errors_logged++;
1756	dev_err(&ioa_cfg->pdev->dev,
1757		"Adapter being reset due to command timeout.\n");
1758
1759	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1760		ioa_cfg->sdt_state = GET_DUMP;
1761
1762	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1763		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1764
1765	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1766	LEAVE;
1767}
1768
1769/**
1770 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1771 * @ipr_cmd:	ipr command struct
1772 *
1773 * This function blocks host requests and initiates an
1774 * adapter reset.
1775 *
1776 * Return value:
1777 * 	none
1778 **/
1779static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1780{
1781	unsigned long lock_flags = 0;
1782	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1783
1784	ENTER;
1785	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1786
1787	ioa_cfg->errors_logged++;
1788	dev_err(&ioa_cfg->pdev->dev,
1789		"Adapter timed out transitioning to operational.\n");
1790
1791	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1792		ioa_cfg->sdt_state = GET_DUMP;
1793
1794	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1795		if (ipr_fastfail)
1796			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1797		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1798	}
1799
1800	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1801	LEAVE;
1802}
1803
1804/**
1805 * ipr_reset_reload - Reset/Reload the IOA
1806 * @ioa_cfg:		ioa config struct
1807 * @shutdown_type:	shutdown type
1808 *
1809 * This function resets the adapter and re-initializes it.
1810 * This function assumes that all new host commands have been stopped.
1811 * Return value:
1812 * 	SUCCESS / FAILED
1813 **/
1814static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1815			    enum ipr_shutdown_type shutdown_type)
1816{
1817	if (!ioa_cfg->in_reset_reload)
1818		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1819
1820	spin_unlock_irq(ioa_cfg->host->host_lock);
1821	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1822	spin_lock_irq(ioa_cfg->host->host_lock);
1823
1824	/* If we got hit with a host reset while we were already resetting
1825	 the adapter for some reason, and the reset failed. */
1826	if (ioa_cfg->ioa_is_dead) {
1827		ipr_trace;
1828		return FAILED;
1829	}
1830
1831	return SUCCESS;
1832}
1833
1834/**
1835 * ipr_find_ses_entry - Find matching SES in SES table
1836 * @res:	resource entry struct of SES
1837 *
1838 * Return value:
1839 * 	pointer to SES table entry / NULL on failure
1840 **/
1841static const struct ipr_ses_table_entry *
1842ipr_find_ses_entry(struct ipr_resource_entry *res)
1843{
1844	int i, j, matches;
1845	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1846
1847	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1848		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1849			if (ste->compare_product_id_byte[j] == 'X') {
1850				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1851					matches++;
1852				else
1853					break;
1854			} else
1855				matches++;
1856		}
1857
1858		if (matches == IPR_PROD_ID_LEN)
1859			return ste;
1860	}
1861
1862	return NULL;
1863}
1864
1865/**
1866 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1867 * @ioa_cfg:	ioa config struct
1868 * @bus:		SCSI bus
1869 * @bus_width:	bus width
1870 *
1871 * Return value:
1872 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1873 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1874 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1875 *	max 160MHz = max 320MB/sec).
1876 **/
1877static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1878{
1879	struct ipr_resource_entry *res;
1880	const struct ipr_ses_table_entry *ste;
1881	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1882
1883	/* Loop through each config table entry in the config table buffer */
1884	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1885		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1886			continue;
1887
1888		if (bus != res->cfgte.res_addr.bus)
1889			continue;
1890
1891		if (!(ste = ipr_find_ses_entry(res)))
1892			continue;
1893
1894		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1895	}
1896
1897	return max_xfer_rate;
1898}
1899
1900/**
1901 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1902 * @ioa_cfg:		ioa config struct
1903 * @max_delay:		max delay in micro-seconds to wait
1904 *
1905 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1906 *
1907 * Return value:
1908 * 	0 on success / other on failure
1909 **/
1910static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1911{
1912	volatile u32 pcii_reg;
1913	int delay = 1;
1914
1915	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1916	while (delay < max_delay) {
1917		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1918
1919		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1920			return 0;
1921
1922		/* udelay cannot be used if delay is more than a few milliseconds */
1923		if ((delay / 1000) > MAX_UDELAY_MS)
1924			mdelay(delay / 1000);
1925		else
1926			udelay(delay);
1927
1928		delay += delay;
1929	}
1930	return -EIO;
1931}
1932
1933/**
1934 * ipr_get_ldump_data_section - Dump IOA memory
1935 * @ioa_cfg:			ioa config struct
1936 * @start_addr:			adapter address to dump
1937 * @dest:				destination kernel buffer
1938 * @length_in_words:	length to dump in 4 byte words
1939 *
1940 * Return value:
1941 * 	0 on success / -EIO on failure
1942 **/
1943static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1944				      u32 start_addr,
1945				      __be32 *dest, u32 length_in_words)
1946{
1947	volatile u32 temp_pcii_reg;
1948	int i, delay = 0;
1949
1950	/* Write IOA interrupt reg starting LDUMP state  */
1951	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1952	       ioa_cfg->regs.set_uproc_interrupt_reg);
1953
1954	/* Wait for IO debug acknowledge */
1955	if (ipr_wait_iodbg_ack(ioa_cfg,
1956			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1957		dev_err(&ioa_cfg->pdev->dev,
1958			"IOA dump long data transfer timeout\n");
1959		return -EIO;
1960	}
1961
1962	/* Signal LDUMP interlocked - clear IO debug ack */
1963	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1964	       ioa_cfg->regs.clr_interrupt_reg);
1965
1966	/* Write Mailbox with starting address */
1967	writel(start_addr, ioa_cfg->ioa_mailbox);
1968
1969	/* Signal address valid - clear IOA Reset alert */
1970	writel(IPR_UPROCI_RESET_ALERT,
1971	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1972
1973	for (i = 0; i < length_in_words; i++) {
1974		/* Wait for IO debug acknowledge */
1975		if (ipr_wait_iodbg_ack(ioa_cfg,
1976				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1977			dev_err(&ioa_cfg->pdev->dev,
1978				"IOA dump short data transfer timeout\n");
1979			return -EIO;
1980		}
1981
1982		/* Read data from mailbox and increment destination pointer */
1983		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1984		dest++;
1985
1986		/* For all but the last word of data, signal data received */
1987		if (i < (length_in_words - 1)) {
1988			/* Signal dump data received - Clear IO debug Ack */
1989			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1990			       ioa_cfg->regs.clr_interrupt_reg);
1991		}
1992	}
1993
1994	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1995	writel(IPR_UPROCI_RESET_ALERT,
1996	       ioa_cfg->regs.set_uproc_interrupt_reg);
1997
1998	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1999	       ioa_cfg->regs.clr_uproc_interrupt_reg);
2000
2001	/* Signal dump data received - Clear IO debug Ack */
2002	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2003	       ioa_cfg->regs.clr_interrupt_reg);
2004
2005	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2006	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2007		temp_pcii_reg =
2008		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2009
2010		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2011			return 0;
2012
2013		udelay(10);
2014		delay += 10;
2015	}
2016
2017	return 0;
2018}
2019
2020#ifdef CONFIG_SCSI_IPR_DUMP
2021/**
2022 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2023 * @ioa_cfg:		ioa config struct
2024 * @pci_address:	adapter address
2025 * @length:			length of data to copy
2026 *
2027 * Copy data from PCI adapter to kernel buffer.
2028 * Note: length MUST be a 4 byte multiple
2029 * Return value:
2030 * 	0 on success / other on failure
2031 **/
2032static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2033			unsigned long pci_address, u32 length)
2034{
2035	int bytes_copied = 0;
2036	int cur_len, rc, rem_len, rem_page_len;
2037	__be32 *page;
2038	unsigned long lock_flags = 0;
2039	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2040
2041	while (bytes_copied < length &&
2042	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2043		if (ioa_dump->page_offset >= PAGE_SIZE ||
2044		    ioa_dump->page_offset == 0) {
2045			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2046
2047			if (!page) {
2048				ipr_trace;
2049				return bytes_copied;
2050			}
2051
2052			ioa_dump->page_offset = 0;
2053			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2054			ioa_dump->next_page_index++;
2055		} else
2056			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2057
2058		rem_len = length - bytes_copied;
2059		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2060		cur_len = min(rem_len, rem_page_len);
2061
2062		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2063		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2064			rc = -EIO;
2065		} else {
2066			rc = ipr_get_ldump_data_section(ioa_cfg,
2067							pci_address + bytes_copied,
2068							&page[ioa_dump->page_offset / 4],
2069							(cur_len / sizeof(u32)));
2070		}
2071		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2072
2073		if (!rc) {
2074			ioa_dump->page_offset += cur_len;
2075			bytes_copied += cur_len;
2076		} else {
2077			ipr_trace;
2078			break;
2079		}
2080		schedule();
2081	}
2082
2083	return bytes_copied;
2084}
2085
2086/**
2087 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2088 * @hdr:	dump entry header struct
2089 *
2090 * Return value:
2091 * 	nothing
2092 **/
2093static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2094{
2095	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2096	hdr->num_elems = 1;
2097	hdr->offset = sizeof(*hdr);
2098	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2099}
2100
2101/**
2102 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2103 * @ioa_cfg:	ioa config struct
2104 * @driver_dump:	driver dump struct
2105 *
2106 * Return value:
2107 * 	nothing
2108 **/
2109static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2110				   struct ipr_driver_dump *driver_dump)
2111{
2112	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2113
2114	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2115	driver_dump->ioa_type_entry.hdr.len =
2116		sizeof(struct ipr_dump_ioa_type_entry) -
2117		sizeof(struct ipr_dump_entry_header);
2118	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2119	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2120	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2121	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2122		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2123		ucode_vpd->minor_release[1];
2124	driver_dump->hdr.num_entries++;
2125}
2126
2127/**
2128 * ipr_dump_version_data - Fill in the driver version in the dump.
2129 * @ioa_cfg:	ioa config struct
2130 * @driver_dump:	driver dump struct
2131 *
2132 * Return value:
2133 * 	nothing
2134 **/
2135static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2136				  struct ipr_driver_dump *driver_dump)
2137{
2138	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2139	driver_dump->version_entry.hdr.len =
2140		sizeof(struct ipr_dump_version_entry) -
2141		sizeof(struct ipr_dump_entry_header);
2142	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2143	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2144	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2145	driver_dump->hdr.num_entries++;
2146}
2147
2148/**
2149 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2150 * @ioa_cfg:	ioa config struct
2151 * @driver_dump:	driver dump struct
2152 *
2153 * Return value:
2154 * 	nothing
2155 **/
2156static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2157				   struct ipr_driver_dump *driver_dump)
2158{
2159	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2160	driver_dump->trace_entry.hdr.len =
2161		sizeof(struct ipr_dump_trace_entry) -
2162		sizeof(struct ipr_dump_entry_header);
2163	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2164	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2165	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2166	driver_dump->hdr.num_entries++;
2167}
2168
2169/**
2170 * ipr_dump_location_data - Fill in the IOA location in the dump.
2171 * @ioa_cfg:	ioa config struct
2172 * @driver_dump:	driver dump struct
2173 *
2174 * Return value:
2175 * 	nothing
2176 **/
2177static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2178				   struct ipr_driver_dump *driver_dump)
2179{
2180	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2181	driver_dump->location_entry.hdr.len =
2182		sizeof(struct ipr_dump_location_entry) -
2183		sizeof(struct ipr_dump_entry_header);
2184	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2185	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2186	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2187	driver_dump->hdr.num_entries++;
2188}
2189
2190/**
2191 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2192 * @ioa_cfg:	ioa config struct
2193 * @dump:		dump struct
2194 *
2195 * Return value:
2196 * 	nothing
2197 **/
2198static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2199{
2200	unsigned long start_addr, sdt_word;
2201	unsigned long lock_flags = 0;
2202	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2203	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2204	u32 num_entries, start_off, end_off;
2205	u32 bytes_to_copy, bytes_copied, rc;
2206	struct ipr_sdt *sdt;
2207	int i;
2208
2209	ENTER;
2210
2211	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2212
2213	if (ioa_cfg->sdt_state != GET_DUMP) {
2214		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2215		return;
2216	}
2217
2218	start_addr = readl(ioa_cfg->ioa_mailbox);
2219
2220	if (!ipr_sdt_is_fmt2(start_addr)) {
2221		dev_err(&ioa_cfg->pdev->dev,
2222			"Invalid dump table format: %lx\n", start_addr);
2223		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2224		return;
2225	}
2226
2227	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2228
2229	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2230
2231	/* Initialize the overall dump header */
2232	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2233	driver_dump->hdr.num_entries = 1;
2234	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2235	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2236	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2237	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2238
2239	ipr_dump_version_data(ioa_cfg, driver_dump);
2240	ipr_dump_location_data(ioa_cfg, driver_dump);
2241	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2242	ipr_dump_trace_data(ioa_cfg, driver_dump);
2243
2244	/* Update dump_header */
2245	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2246
2247	/* IOA Dump entry */
2248	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2249	ioa_dump->format = IPR_SDT_FMT2;
2250	ioa_dump->hdr.len = 0;
2251	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2252	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2253
2254	/* First entries in sdt are actually a list of dump addresses and
2255	 lengths to gather the real dump data.  sdt represents the pointer
2256	 to the ioa generated dump table.  Dump data will be extracted based
2257	 on entries in this table */
2258	sdt = &ioa_dump->sdt;
2259
2260	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2261					sizeof(struct ipr_sdt) / sizeof(__be32));
2262
2263	/* Smart Dump table is ready to use and the first entry is valid */
2264	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2265		dev_err(&ioa_cfg->pdev->dev,
2266			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2267			rc, be32_to_cpu(sdt->hdr.state));
2268		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2269		ioa_cfg->sdt_state = DUMP_OBTAINED;
2270		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2271		return;
2272	}
2273
2274	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2275
2276	if (num_entries > IPR_NUM_SDT_ENTRIES)
2277		num_entries = IPR_NUM_SDT_ENTRIES;
2278
2279	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2280
2281	for (i = 0; i < num_entries; i++) {
2282		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2283			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2284			break;
2285		}
2286
2287		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2288			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2289			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2290			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2291
2292			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2293				bytes_to_copy = end_off - start_off;
2294				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2295					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2296					continue;
2297				}
2298
2299				/* Copy data from adapter to driver buffers */
2300				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2301							    bytes_to_copy);
2302
2303				ioa_dump->hdr.len += bytes_copied;
2304
2305				if (bytes_copied != bytes_to_copy) {
2306					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2307					break;
2308				}
2309			}
2310		}
2311	}
2312
2313	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2314
2315	/* Update dump_header */
2316	driver_dump->hdr.len += ioa_dump->hdr.len;
2317	wmb();
2318	ioa_cfg->sdt_state = DUMP_OBTAINED;
2319	LEAVE;
2320}
2321
2322#else
2323#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2324#endif
2325
2326/**
2327 * ipr_release_dump - Free adapter dump memory
2328 * @kref:	kref struct
2329 *
2330 * Return value:
2331 *	nothing
2332 **/
2333static void ipr_release_dump(struct kref *kref)
2334{
2335	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2336	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2337	unsigned long lock_flags = 0;
2338	int i;
2339
2340	ENTER;
2341	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2342	ioa_cfg->dump = NULL;
2343	ioa_cfg->sdt_state = INACTIVE;
2344	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2345
2346	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2347		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2348
2349	kfree(dump);
2350	LEAVE;
2351}
2352
2353/**
2354 * ipr_worker_thread - Worker thread
2355 * @work:		ioa config struct
2356 *
2357 * Called at task level from a work thread. This function takes care
2358 * of adding and removing device from the mid-layer as configuration
2359 * changes are detected by the adapter.
2360 *
2361 * Return value:
2362 * 	nothing
2363 **/
2364static void ipr_worker_thread(struct work_struct *work)
2365{
2366	unsigned long lock_flags;
2367	struct ipr_resource_entry *res;
2368	struct scsi_device *sdev;
2369	struct ipr_dump *dump;
2370	struct ipr_ioa_cfg *ioa_cfg =
2371		container_of(work, struct ipr_ioa_cfg, work_q);
2372	u8 bus, target, lun;
2373	int did_work;
2374
2375	ENTER;
2376	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2377
2378	if (ioa_cfg->sdt_state == GET_DUMP) {
2379		dump = ioa_cfg->dump;
2380		if (!dump) {
2381			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2382			return;
2383		}
2384		kref_get(&dump->kref);
2385		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2386		ipr_get_ioa_dump(ioa_cfg, dump);
2387		kref_put(&dump->kref, ipr_release_dump);
2388
2389		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2390		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2391			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2392		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2393		return;
2394	}
2395
2396restart:
2397	do {
2398		did_work = 0;
2399		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2400			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2401			return;
2402		}
2403
2404		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2405			if (res->del_from_ml && res->sdev) {
2406				did_work = 1;
2407				sdev = res->sdev;
2408				if (!scsi_device_get(sdev)) {
2409					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2410					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2411					scsi_remove_device(sdev);
2412					scsi_device_put(sdev);
2413					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2414				}
2415				break;
2416			}
2417		}
2418	} while(did_work);
2419
2420	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2421		if (res->add_to_ml) {
2422			bus = res->cfgte.res_addr.bus;
2423			target = res->cfgte.res_addr.target;
2424			lun = res->cfgte.res_addr.lun;
2425			res->add_to_ml = 0;
2426			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2427			scsi_add_device(ioa_cfg->host, bus, target, lun);
2428			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2429			goto restart;
2430		}
2431	}
2432
2433	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2434	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2435	LEAVE;
2436}
2437
2438#ifdef CONFIG_SCSI_IPR_TRACE
2439/**
2440 * ipr_read_trace - Dump the adapter trace
2441 * @kobj:		kobject struct
2442 * @buf:		buffer
2443 * @off:		offset
2444 * @count:		buffer size
2445 *
2446 * Return value:
2447 *	number of bytes printed to buffer
2448 **/
2449static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2450			      loff_t off, size_t count)
2451{
2452	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2453	struct Scsi_Host *shost = class_to_shost(cdev);
2454	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2455	unsigned long lock_flags = 0;
2456	int size = IPR_TRACE_SIZE;
2457	char *src = (char *)ioa_cfg->trace;
2458
2459	if (off > size)
2460		return 0;
2461	if (off + count > size) {
2462		size -= off;
2463		count = size;
2464	}
2465
2466	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2467	memcpy(buf, &src[off], count);
2468	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2469	return count;
2470}
2471
2472static struct bin_attribute ipr_trace_attr = {
2473	.attr =	{
2474		.name = "trace",
2475		.mode = S_IRUGO,
2476	},
2477	.size = 0,
2478	.read = ipr_read_trace,
2479};
2480#endif
2481
2482static const struct {
2483	enum ipr_cache_state state;
2484	char *name;
2485} cache_state [] = {
2486	{ CACHE_NONE, "none" },
2487	{ CACHE_DISABLED, "disabled" },
2488	{ CACHE_ENABLED, "enabled" }
2489};
2490
2491/**
2492 * ipr_show_write_caching - Show the write caching attribute
2493 * @class_dev:	class device struct
2494 * @buf:		buffer
2495 *
2496 * Return value:
2497 *	number of bytes printed to buffer
2498 **/
2499static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2500{
2501	struct Scsi_Host *shost = class_to_shost(class_dev);
2502	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2503	unsigned long lock_flags = 0;
2504	int i, len = 0;
2505
2506	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2507	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2508		if (cache_state[i].state == ioa_cfg->cache_state) {
2509			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2510			break;
2511		}
2512	}
2513	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514	return len;
2515}
2516
2517
2518/**
2519 * ipr_store_write_caching - Enable/disable adapter write cache
2520 * @class_dev:	class_device struct
2521 * @buf:		buffer
2522 * @count:		buffer size
2523 *
2524 * This function will enable/disable adapter write cache.
2525 *
2526 * Return value:
2527 * 	count on success / other on failure
2528 **/
2529static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2530					const char *buf, size_t count)
2531{
2532	struct Scsi_Host *shost = class_to_shost(class_dev);
2533	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2534	unsigned long lock_flags = 0;
2535	enum ipr_cache_state new_state = CACHE_INVALID;
2536	int i;
2537
2538	if (!capable(CAP_SYS_ADMIN))
2539		return -EACCES;
2540	if (ioa_cfg->cache_state == CACHE_NONE)
2541		return -EINVAL;
2542
2543	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2544		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2545			new_state = cache_state[i].state;
2546			break;
2547		}
2548	}
2549
2550	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2551		return -EINVAL;
2552
2553	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2554	if (ioa_cfg->cache_state == new_state) {
2555		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556		return count;
2557	}
2558
2559	ioa_cfg->cache_state = new_state;
2560	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2561		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2562	if (!ioa_cfg->in_reset_reload)
2563		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2564	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2565	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2566
2567	return count;
2568}
2569
2570static struct class_device_attribute ipr_ioa_cache_attr = {
2571	.attr = {
2572		.name =		"write_cache",
2573		.mode =		S_IRUGO | S_IWUSR,
2574	},
2575	.show = ipr_show_write_caching,
2576	.store = ipr_store_write_caching
2577};
2578
2579/**
2580 * ipr_show_fw_version - Show the firmware version
2581 * @class_dev:	class device struct
2582 * @buf:		buffer
2583 *
2584 * Return value:
2585 *	number of bytes printed to buffer
2586 **/
2587static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2588{
2589	struct Scsi_Host *shost = class_to_shost(class_dev);
2590	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2591	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2592	unsigned long lock_flags = 0;
2593	int len;
2594
2595	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2596	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2597		       ucode_vpd->major_release, ucode_vpd->card_type,
2598		       ucode_vpd->minor_release[0],
2599		       ucode_vpd->minor_release[1]);
2600	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2601	return len;
2602}
2603
2604static struct class_device_attribute ipr_fw_version_attr = {
2605	.attr = {
2606		.name =		"fw_version",
2607		.mode =		S_IRUGO,
2608	},
2609	.show = ipr_show_fw_version,
2610};
2611
2612/**
2613 * ipr_show_log_level - Show the adapter's error logging level
2614 * @class_dev:	class device struct
2615 * @buf:		buffer
2616 *
2617 * Return value:
2618 * 	number of bytes printed to buffer
2619 **/
2620static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2621{
2622	struct Scsi_Host *shost = class_to_shost(class_dev);
2623	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2624	unsigned long lock_flags = 0;
2625	int len;
2626
2627	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2628	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2629	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2630	return len;
2631}
2632
2633/**
2634 * ipr_store_log_level - Change the adapter's error logging level
2635 * @class_dev:	class device struct
2636 * @buf:		buffer
2637 *
2638 * Return value:
2639 * 	number of bytes printed to buffer
2640 **/
2641static ssize_t ipr_store_log_level(struct class_device *class_dev,
2642				   const char *buf, size_t count)
2643{
2644	struct Scsi_Host *shost = class_to_shost(class_dev);
2645	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2646	unsigned long lock_flags = 0;
2647
2648	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2649	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2650	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2651	return strlen(buf);
2652}
2653
2654static struct class_device_attribute ipr_log_level_attr = {
2655	.attr = {
2656		.name =		"log_level",
2657		.mode =		S_IRUGO | S_IWUSR,
2658	},
2659	.show = ipr_show_log_level,
2660	.store = ipr_store_log_level
2661};
2662
2663/**
2664 * ipr_store_diagnostics - IOA Diagnostics interface
2665 * @class_dev:	class_device struct
2666 * @buf:		buffer
2667 * @count:		buffer size
2668 *
2669 * This function will reset the adapter and wait a reasonable
2670 * amount of time for any errors that the adapter might log.
2671 *
2672 * Return value:
2673 * 	count on success / other on failure
2674 **/
2675static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2676				     const char *buf, size_t count)
2677{
2678	struct Scsi_Host *shost = class_to_shost(class_dev);
2679	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2680	unsigned long lock_flags = 0;
2681	int rc = count;
2682
2683	if (!capable(CAP_SYS_ADMIN))
2684		return -EACCES;
2685
2686	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687	while(ioa_cfg->in_reset_reload) {
2688		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2689		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2690		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2691	}
2692
2693	ioa_cfg->errors_logged = 0;
2694	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2695
2696	if (ioa_cfg->in_reset_reload) {
2697		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2698		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2699
2700		/* Wait for a second for any errors to be logged */
2701		msleep(1000);
2702	} else {
2703		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2704		return -EIO;
2705	}
2706
2707	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2708	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2709		rc = -EIO;
2710	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2711
2712	return rc;
2713}
2714
2715static struct class_device_attribute ipr_diagnostics_attr = {
2716	.attr = {
2717		.name =		"run_diagnostics",
2718		.mode =		S_IWUSR,
2719	},
2720	.store = ipr_store_diagnostics
2721};
2722
2723/**
2724 * ipr_show_adapter_state - Show the adapter's state
2725 * @class_dev:	class device struct
2726 * @buf:		buffer
2727 *
2728 * Return value:
2729 * 	number of bytes printed to buffer
2730 **/
2731static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2732{
2733	struct Scsi_Host *shost = class_to_shost(class_dev);
2734	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2735	unsigned long lock_flags = 0;
2736	int len;
2737
2738	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2739	if (ioa_cfg->ioa_is_dead)
2740		len = snprintf(buf, PAGE_SIZE, "offline\n");
2741	else
2742		len = snprintf(buf, PAGE_SIZE, "online\n");
2743	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2744	return len;
2745}
2746
2747/**
2748 * ipr_store_adapter_state - Change adapter state
2749 * @class_dev:	class_device struct
2750 * @buf:		buffer
2751 * @count:		buffer size
2752 *
2753 * This function will change the adapter's state.
2754 *
2755 * Return value:
2756 * 	count on success / other on failure
2757 **/
2758static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2759				       const char *buf, size_t count)
2760{
2761	struct Scsi_Host *shost = class_to_shost(class_dev);
2762	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2763	unsigned long lock_flags;
2764	int result = count;
2765
2766	if (!capable(CAP_SYS_ADMIN))
2767		return -EACCES;
2768
2769	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2770	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2771		ioa_cfg->ioa_is_dead = 0;
2772		ioa_cfg->reset_retries = 0;
2773		ioa_cfg->in_ioa_bringdown = 0;
2774		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2775	}
2776	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2777	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2778
2779	return result;
2780}
2781
2782static struct class_device_attribute ipr_ioa_state_attr = {
2783	.attr = {
2784		.name =		"state",
2785		.mode =		S_IRUGO | S_IWUSR,
2786	},
2787	.show = ipr_show_adapter_state,
2788	.store = ipr_store_adapter_state
2789};
2790
2791/**
2792 * ipr_store_reset_adapter - Reset the adapter
2793 * @class_dev:	class_device struct
2794 * @buf:		buffer
2795 * @count:		buffer size
2796 *
2797 * This function will reset the adapter.
2798 *
2799 * Return value:
2800 * 	count on success / other on failure
2801 **/
2802static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2803				       const char *buf, size_t count)
2804{
2805	struct Scsi_Host *shost = class_to_shost(class_dev);
2806	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2807	unsigned long lock_flags;
2808	int result = count;
2809
2810	if (!capable(CAP_SYS_ADMIN))
2811		return -EACCES;
2812
2813	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2814	if (!ioa_cfg->in_reset_reload)
2815		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2816	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2817	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2818
2819	return result;
2820}
2821
2822static struct class_device_attribute ipr_ioa_reset_attr = {
2823	.attr = {
2824		.name =		"reset_host",
2825		.mode =		S_IWUSR,
2826	},
2827	.store = ipr_store_reset_adapter
2828};
2829
2830/**
2831 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2832 * @buf_len:		buffer length
2833 *
2834 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2835 * list to use for microcode download
2836 *
2837 * Return value:
2838 * 	pointer to sglist / NULL on failure
2839 **/
2840static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2841{
2842	int sg_size, order, bsize_elem, num_elem, i, j;
2843	struct ipr_sglist *sglist;
2844	struct scatterlist *scatterlist;
2845	struct page *page;
2846
2847	/* Get the minimum size per scatter/gather element */
2848	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2849
2850	/* Get the actual size per element */
2851	order = get_order(sg_size);
2852
2853	/* Determine the actual number of bytes per element */
2854	bsize_elem = PAGE_SIZE * (1 << order);
2855
2856	/* Determine the actual number of sg entries needed */
2857	if (buf_len % bsize_elem)
2858		num_elem = (buf_len / bsize_elem) + 1;
2859	else
2860		num_elem = buf_len / bsize_elem;
2861
2862	/* Allocate a scatter/gather list for the DMA */
2863	sglist = kzalloc(sizeof(struct ipr_sglist) +
2864			 (sizeof(struct scatterlist) * (num_elem - 1)),
2865			 GFP_KERNEL);
2866
2867	if (sglist == NULL) {
2868		ipr_trace;
2869		return NULL;
2870	}
2871
2872	scatterlist = sglist->scatterlist;
2873
2874	sglist->order = order;
2875	sglist->num_sg = num_elem;
2876
2877	/* Allocate a bunch of sg elements */
2878	for (i = 0; i < num_elem; i++) {
2879		page = alloc_pages(GFP_KERNEL, order);
2880		if (!page) {
2881			ipr_trace;
2882
2883			/* Free up what we already allocated */
2884			for (j = i - 1; j >= 0; j--)
2885				__free_pages(scatterlist[j].page, order);
2886			kfree(sglist);
2887			return NULL;
2888		}
2889
2890		scatterlist[i].page = page;
2891	}
2892
2893	return sglist;
2894}
2895
2896/**
2897 * ipr_free_ucode_buffer - Frees a microcode download buffer
2898 * @p_dnld:		scatter/gather list pointer
2899 *
2900 * Free a DMA'able ucode download buffer previously allocated with
2901 * ipr_alloc_ucode_buffer
2902 *
2903 * Return value:
2904 * 	nothing
2905 **/
2906static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2907{
2908	int i;
2909
2910	for (i = 0; i < sglist->num_sg; i++)
2911		__free_pages(sglist->scatterlist[i].page, sglist->order);
2912
2913	kfree(sglist);
2914}
2915
2916/**
2917 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2918 * @sglist:		scatter/gather list pointer
2919 * @buffer:		buffer pointer
2920 * @len:		buffer length
2921 *
2922 * Copy a microcode image from a user buffer into a buffer allocated by
2923 * ipr_alloc_ucode_buffer
2924 *
2925 * Return value:
2926 * 	0 on success / other on failure
2927 **/
2928static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2929				 u8 *buffer, u32 len)
2930{
2931	int bsize_elem, i, result = 0;
2932	struct scatterlist *scatterlist;
2933	void *kaddr;
2934
2935	/* Determine the actual number of bytes per element */
2936	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2937
2938	scatterlist = sglist->scatterlist;
2939
2940	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2941		kaddr = kmap(scatterlist[i].page);
2942		memcpy(kaddr, buffer, bsize_elem);
2943		kunmap(scatterlist[i].page);
2944
2945		scatterlist[i].length = bsize_elem;
2946
2947		if (result != 0) {
2948			ipr_trace;
2949			return result;
2950		}
2951	}
2952
2953	if (len % bsize_elem) {
2954		kaddr = kmap(scatterlist[i].page);
2955		memcpy(kaddr, buffer, len % bsize_elem);
2956		kunmap(scatterlist[i].page);
2957
2958		scatterlist[i].length = len % bsize_elem;
2959	}
2960
2961	sglist->buffer_len = len;
2962	return result;
2963}
2964
2965/**
2966 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2967 * @ipr_cmd:	ipr command struct
2968 * @sglist:		scatter/gather list
2969 *
2970 * Builds a microcode download IOA data list (IOADL).
2971 *
2972 **/
2973static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2974				  struct ipr_sglist *sglist)
2975{
2976	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2977	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2978	struct scatterlist *scatterlist = sglist->scatterlist;
2979	int i;
2980
2981	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2982	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2983	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2984	ioarcb->write_ioadl_len =
2985		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2986
2987	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2988		ioadl[i].flags_and_data_len =
2989			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2990		ioadl[i].address =
2991			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2992	}
2993
2994	ioadl[i-1].flags_and_data_len |=
2995		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2996}
2997
2998/**
2999 * ipr_update_ioa_ucode - Update IOA's microcode
3000 * @ioa_cfg:	ioa config struct
3001 * @sglist:		scatter/gather list
3002 *
3003 * Initiate an adapter reset to update the IOA's microcode
3004 *
3005 * Return value:
3006 * 	0 on success / -EIO on failure
3007 **/
3008static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3009				struct ipr_sglist *sglist)
3010{
3011	unsigned long lock_flags;
3012
3013	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3014	while(ioa_cfg->in_reset_reload) {
3015		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3016		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3017		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3018	}
3019
3020	if (ioa_cfg->ucode_sglist) {
3021		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3022		dev_err(&ioa_cfg->pdev->dev,
3023			"Microcode download already in progress\n");
3024		return -EIO;
3025	}
3026
3027	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3028					sglist->num_sg, DMA_TO_DEVICE);
3029
3030	if (!sglist->num_dma_sg) {
3031		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3032		dev_err(&ioa_cfg->pdev->dev,
3033			"Failed to map microcode download buffer!\n");
3034		return -EIO;
3035	}
3036
3037	ioa_cfg->ucode_sglist = sglist;
3038	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3039	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3040	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3041
3042	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3043	ioa_cfg->ucode_sglist = NULL;
3044	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3045	return 0;
3046}
3047
3048/**
3049 * ipr_store_update_fw - Update the firmware on the adapter
3050 * @class_dev:	class_device struct
3051 * @buf:		buffer
3052 * @count:		buffer size
3053 *
3054 * This function will update the firmware on the adapter.
3055 *
3056 * Return value:
3057 * 	count on success / other on failure
3058 **/
3059static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3060				       const char *buf, size_t count)
3061{
3062	struct Scsi_Host *shost = class_to_shost(class_dev);
3063	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3064	struct ipr_ucode_image_header *image_hdr;
3065	const struct firmware *fw_entry;
3066	struct ipr_sglist *sglist;
3067	char fname[100];
3068	char *src;
3069	int len, result, dnld_size;
3070
3071	if (!capable(CAP_SYS_ADMIN))
3072		return -EACCES;
3073
3074	len = snprintf(fname, 99, "%s", buf);
3075	fname[len-1] = '\0';
3076
3077	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3078		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3079		return -EIO;
3080	}
3081
3082	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3083
3084	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3085	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3086	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3087		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3088		release_firmware(fw_entry);
3089		return -EINVAL;
3090	}
3091
3092	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3093	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3094	sglist = ipr_alloc_ucode_buffer(dnld_size);
3095
3096	if (!sglist) {
3097		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3098		release_firmware(fw_entry);
3099		return -ENOMEM;
3100	}
3101
3102	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3103
3104	if (result) {
3105		dev_err(&ioa_cfg->pdev->dev,
3106			"Microcode buffer copy to DMA buffer failed\n");
3107		goto out;
3108	}
3109
3110	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3111
3112	if (!result)
3113		result = count;
3114out:
3115	ipr_free_ucode_buffer(sglist);
3116	release_firmware(fw_entry);
3117	return result;
3118}
3119
3120static struct class_device_attribute ipr_update_fw_attr = {
3121	.attr = {
3122		.name =		"update_fw",
3123		.mode =		S_IWUSR,
3124	},
3125	.store = ipr_store_update_fw
3126};
3127
3128static struct class_device_attribute *ipr_ioa_attrs[] = {
3129	&ipr_fw_version_attr,
3130	&ipr_log_level_attr,
3131	&ipr_diagnostics_attr,
3132	&ipr_ioa_state_attr,
3133	&ipr_ioa_reset_attr,
3134	&ipr_update_fw_attr,
3135	&ipr_ioa_cache_attr,
3136	NULL,
3137};
3138
3139#ifdef CONFIG_SCSI_IPR_DUMP
3140/**
3141 * ipr_read_dump - Dump the adapter
3142 * @kobj:		kobject struct
3143 * @buf:		buffer
3144 * @off:		offset
3145 * @count:		buffer size
3146 *
3147 * Return value:
3148 *	number of bytes printed to buffer
3149 **/
3150static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3151			      loff_t off, size_t count)
3152{
3153	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3154	struct Scsi_Host *shost = class_to_shost(cdev);
3155	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3156	struct ipr_dump *dump;
3157	unsigned long lock_flags = 0;
3158	char *src;
3159	int len;
3160	size_t rc = count;
3161
3162	if (!capable(CAP_SYS_ADMIN))
3163		return -EACCES;
3164
3165	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166	dump = ioa_cfg->dump;
3167
3168	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3169		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3170		return 0;
3171	}
3172	kref_get(&dump->kref);
3173	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174
3175	if (off > dump->driver_dump.hdr.len) {
3176		kref_put(&dump->kref, ipr_release_dump);
3177		return 0;
3178	}
3179
3180	if (off + count > dump->driver_dump.hdr.len) {
3181		count = dump->driver_dump.hdr.len - off;
3182		rc = count;
3183	}
3184
3185	if (count && off < sizeof(dump->driver_dump)) {
3186		if (off + count > sizeof(dump->driver_dump))
3187			len = sizeof(dump->driver_dump) - off;
3188		else
3189			len = count;
3190		src = (u8 *)&dump->driver_dump + off;
3191		memcpy(buf, src, len);
3192		buf += len;
3193		off += len;
3194		count -= len;
3195	}
3196
3197	off -= sizeof(dump->driver_dump);
3198
3199	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3200		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3201			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3202		else
3203			len = count;
3204		src = (u8 *)&dump->ioa_dump + off;
3205		memcpy(buf, src, len);
3206		buf += len;
3207		off += len;
3208		count -= len;
3209	}
3210
3211	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3212
3213	while (count) {
3214		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3215			len = PAGE_ALIGN(off) - off;
3216		else
3217			len = count;
3218		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3219		src += off & ~PAGE_MASK;
3220		memcpy(buf, src, len);
3221		buf += len;
3222		off += len;
3223		count -= len;
3224	}
3225
3226	kref_put(&dump->kref, ipr_release_dump);
3227	return rc;
3228}
3229
3230/**
3231 * ipr_alloc_dump - Prepare for adapter dump
3232 * @ioa_cfg:	ioa config struct
3233 *
3234 * Return value:
3235 *	0 on success / other on failure
3236 **/
3237static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3238{
3239	struct ipr_dump *dump;
3240	unsigned long lock_flags = 0;
3241
3242	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3243
3244	if (!dump) {
3245		ipr_err("Dump memory allocation failed\n");
3246		return -ENOMEM;
3247	}
3248
3249	kref_init(&dump->kref);
3250	dump->ioa_cfg = ioa_cfg;
3251
3252	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3253
3254	if (INACTIVE != ioa_cfg->sdt_state) {
3255		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3256		kfree(dump);
3257		return 0;
3258	}
3259
3260	ioa_cfg->dump = dump;
3261	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3262	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3263		ioa_cfg->dump_taken = 1;
3264		schedule_work(&ioa_cfg->work_q);
3265	}
3266	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267
3268	return 0;
3269}
3270
3271/**
3272 * ipr_free_dump - Free adapter dump memory
3273 * @ioa_cfg:	ioa config struct
3274 *
3275 * Return value:
3276 *	0 on success / other on failure
3277 **/
3278static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3279{
3280	struct ipr_dump *dump;
3281	unsigned long lock_flags = 0;
3282
3283	ENTER;
3284
3285	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3286	dump = ioa_cfg->dump;
3287	if (!dump) {
3288		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289		return 0;
3290	}
3291
3292	ioa_cfg->dump = NULL;
3293	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294
3295	kref_put(&dump->kref, ipr_release_dump);
3296
3297	LEAVE;
3298	return 0;
3299}
3300
3301/**
3302 * ipr_write_dump - Setup dump state of adapter
3303 * @kobj:		kobject struct
3304 * @buf:		buffer
3305 * @off:		offset
3306 * @count:		buffer size
3307 *
3308 * Return value:
3309 *	number of bytes printed to buffer
3310 **/
3311static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3312			      loff_t off, size_t count)
3313{
3314	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3315	struct Scsi_Host *shost = class_to_shost(cdev);
3316	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3317	int rc;
3318
3319	if (!capable(CAP_SYS_ADMIN))
3320		return -EACCES;
3321
3322	if (buf[0] == '1')
3323		rc = ipr_alloc_dump(ioa_cfg);
3324	else if (buf[0] == '0')
3325		rc = ipr_free_dump(ioa_cfg);
3326	else
3327		return -EINVAL;
3328
3329	if (rc)
3330		return rc;
3331	else
3332		return count;
3333}
3334
3335static struct bin_attribute ipr_dump_attr = {
3336	.attr =	{
3337		.name = "dump",
3338		.mode = S_IRUSR | S_IWUSR,
3339	},
3340	.size = 0,
3341	.read = ipr_read_dump,
3342	.write = ipr_write_dump
3343};
3344#else
3345static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3346#endif
3347
3348/**
3349 * ipr_change_queue_depth - Change the device's queue depth
3350 * @sdev:	scsi device struct
3351 * @qdepth:	depth to set
3352 *
3353 * Return value:
3354 * 	actual depth set
3355 **/
3356static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3357{
3358	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3359	struct ipr_resource_entry *res;
3360	unsigned long lock_flags = 0;
3361
3362	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3363	res = (struct ipr_resource_entry *)sdev->hostdata;
3364
3365	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3366		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3367	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3368
3369	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3370	return sdev->queue_depth;
3371}
3372
3373/**
3374 * ipr_change_queue_type - Change the device's queue type
3375 * @dsev:		scsi device struct
3376 * @tag_type:	type of tags to use
3377 *
3378 * Return value:
3379 * 	actual queue type set
3380 **/
3381static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3382{
3383	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3384	struct ipr_resource_entry *res;
3385	unsigned long lock_flags = 0;
3386
3387	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3388	res = (struct ipr_resource_entry *)sdev->hostdata;
3389
3390	if (res) {
3391		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3392			/*
3393			 * We don't bother quiescing the device here since the
3394			 * adapter firmware does it for us.
3395			 */
3396			scsi_set_tag_type(sdev, tag_type);
3397
3398			if (tag_type)
3399				scsi_activate_tcq(sdev, sdev->queue_depth);
3400			else
3401				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3402		} else
3403			tag_type = 0;
3404	} else
3405		tag_type = 0;
3406
3407	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3408	return tag_type;
3409}
3410
3411/**
3412 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3413 * @dev:	device struct
3414 * @buf:	buffer
3415 *
3416 * Return value:
3417 * 	number of bytes printed to buffer
3418 **/
3419static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3420{
3421	struct scsi_device *sdev = to_scsi_device(dev);
3422	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3423	struct ipr_resource_entry *res;
3424	unsigned long lock_flags = 0;
3425	ssize_t len = -ENXIO;
3426
3427	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428	res = (struct ipr_resource_entry *)sdev->hostdata;
3429	if (res)
3430		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3431	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3432	return len;
3433}
3434
3435static struct device_attribute ipr_adapter_handle_attr = {
3436	.attr = {
3437		.name = 	"adapter_handle",
3438		.mode =		S_IRUSR,
3439	},
3440	.show = ipr_show_adapter_handle
3441};
3442
3443static struct device_attribute *ipr_dev_attrs[] = {
3444	&ipr_adapter_handle_attr,
3445	NULL,
3446};
3447
3448/**
3449 * ipr_biosparam - Return the HSC mapping
3450 * @sdev:			scsi device struct
3451 * @block_device:	block device pointer
3452 * @capacity:		capacity of the device
3453 * @parm:			Array containing returned HSC values.
3454 *
3455 * This function generates the HSC parms that fdisk uses.
3456 * We want to make sure we return something that places partitions
3457 * on 4k boundaries for best performance with the IOA.
3458 *
3459 * Return value:
3460 * 	0 on success
3461 **/
3462static int ipr_biosparam(struct scsi_device *sdev,
3463			 struct block_device *block_device,
3464			 sector_t capacity, int *parm)
3465{
3466	int heads, sectors;
3467	sector_t cylinders;
3468
3469	heads = 128;
3470	sectors = 32;
3471
3472	cylinders = capacity;
3473	sector_div(cylinders, (128 * 32));
3474
3475	/* return result */
3476	parm[0] = heads;
3477	parm[1] = sectors;
3478	parm[2] = cylinders;
3479
3480	return 0;
3481}
3482
3483/**
3484 * ipr_find_starget - Find target based on bus/target.
3485 * @starget:	scsi target struct
3486 *
3487 * Return value:
3488 * 	resource entry pointer if found / NULL if not found
3489 **/
3490static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3491{
3492	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3493	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3494	struct ipr_resource_entry *res;
3495
3496	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3497		if ((res->cfgte.res_addr.bus == starget->channel) &&
3498		    (res->cfgte.res_addr.target == starget->id) &&
3499		    (res->cfgte.res_addr.lun == 0)) {
3500			return res;
3501		}
3502	}
3503
3504	return NULL;
3505}
3506
3507static struct ata_port_info sata_port_info;
3508
3509/**
3510 * ipr_target_alloc - Prepare for commands to a SCSI target
3511 * @starget:	scsi target struct
3512 *
3513 * If the device is a SATA device, this function allocates an
3514 * ATA port with libata, else it does nothing.
3515 *
3516 * Return value:
3517 * 	0 on success / non-0 on failure
3518 **/
3519static int ipr_target_alloc(struct scsi_target *starget)
3520{
3521	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3522	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3523	struct ipr_sata_port *sata_port;
3524	struct ata_port *ap;
3525	struct ipr_resource_entry *res;
3526	unsigned long lock_flags;
3527
3528	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3529	res = ipr_find_starget(starget);
3530	starget->hostdata = NULL;
3531
3532	if (res && ipr_is_gata(res)) {
3533		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3534		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3535		if (!sata_port)
3536			return -ENOMEM;
3537
3538		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3539		if (ap) {
3540			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3541			sata_port->ioa_cfg = ioa_cfg;
3542			sata_port->ap = ap;
3543			sata_port->res = res;
3544
3545			res->sata_port = sata_port;
3546			ap->private_data = sata_port;
3547			starget->hostdata = sata_port;
3548		} else {
3549			kfree(sata_port);
3550			return -ENOMEM;
3551		}
3552	}
3553	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3554
3555	return 0;
3556}
3557
3558/**
3559 * ipr_target_destroy - Destroy a SCSI target
3560 * @starget:	scsi target struct
3561 *
3562 * If the device was a SATA device, this function frees the libata
3563 * ATA port, else it does nothing.
3564 *
3565 **/
3566static void ipr_target_destroy(struct scsi_target *starget)
3567{
3568	struct ipr_sata_port *sata_port = starget->hostdata;
3569
3570	if (sata_port) {
3571		starget->hostdata = NULL;
3572		ata_sas_port_destroy(sata_port->ap);
3573		kfree(sata_port);
3574	}
3575}
3576
3577/**
3578 * ipr_find_sdev - Find device based on bus/target/lun.
3579 * @sdev:	scsi device struct
3580 *
3581 * Return value:
3582 * 	resource entry pointer if found / NULL if not found
3583 **/
3584static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3585{
3586	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3587	struct ipr_resource_entry *res;
3588
3589	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3590		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3591		    (res->cfgte.res_addr.target == sdev->id) &&
3592		    (res->cfgte.res_addr.lun == sdev->lun))
3593			return res;
3594	}
3595
3596	return NULL;
3597}
3598
3599/**
3600 * ipr_slave_destroy - Unconfigure a SCSI device
3601 * @sdev:	scsi device struct
3602 *
3603 * Return value:
3604 * 	nothing
3605 **/
3606static void ipr_slave_destroy(struct scsi_device *sdev)
3607{
3608	struct ipr_resource_entry *res;
3609	struct ipr_ioa_cfg *ioa_cfg;
3610	unsigned long lock_flags = 0;
3611
3612	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3613
3614	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3615	res = (struct ipr_resource_entry *) sdev->hostdata;
3616	if (res) {
3617		if (res->sata_port)
3618			ata_port_disable(res->sata_port->ap);
3619		sdev->hostdata = NULL;
3620		res->sdev = NULL;
3621		res->sata_port = NULL;
3622	}
3623	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624}
3625
3626/**
3627 * ipr_slave_configure - Configure a SCSI device
3628 * @sdev:	scsi device struct
3629 *
3630 * This function configures the specified scsi device.
3631 *
3632 * Return value:
3633 * 	0 on success
3634 **/
3635static int ipr_slave_configure(struct scsi_device *sdev)
3636{
3637	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3638	struct ipr_resource_entry *res;
3639	unsigned long lock_flags = 0;
3640
3641	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3642	res = sdev->hostdata;
3643	if (res) {
3644		if (ipr_is_af_dasd_device(res))
3645			sdev->type = TYPE_RAID;
3646		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3647			sdev->scsi_level = 4;
3648			sdev->no_uld_attach = 1;
3649		}
3650		if (ipr_is_vset_device(res)) {
3651			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3652			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3653		}
3654		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3655			sdev->allow_restart = 1;
3656		if (ipr_is_gata(res) && res->sata_port) {
3657			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3658			ata_sas_slave_configure(sdev, res->sata_port->ap);
3659		} else {
3660			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3661		}
3662	}
3663	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3664	return 0;
3665}
3666
3667/**
3668 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3669 * @sdev:	scsi device struct
3670 *
3671 * This function initializes an ATA port so that future commands
3672 * sent through queuecommand will work.
3673 *
3674 * Return value:
3675 * 	0 on success
3676 **/
3677static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3678{
3679	struct ipr_sata_port *sata_port = NULL;
3680	int rc = -ENXIO;
3681
3682	ENTER;
3683	if (sdev->sdev_target)
3684		sata_port = sdev->sdev_target->hostdata;
3685	if (sata_port)
3686		rc = ata_sas_port_init(sata_port->ap);
3687	if (rc)
3688		ipr_slave_destroy(sdev);
3689
3690	LEAVE;
3691	return rc;
3692}
3693
3694/**
3695 * ipr_slave_alloc - Prepare for commands to a device.
3696 * @sdev:	scsi device struct
3697 *
3698 * This function saves a pointer to the resource entry
3699 * in the scsi device struct if the device exists. We
3700 * can then use this pointer in ipr_queuecommand when
3701 * handling new commands.
3702 *
3703 * Return value:
3704 * 	0 on success / -ENXIO if device does not exist
3705 **/
3706static int ipr_slave_alloc(struct scsi_device *sdev)
3707{
3708	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3709	struct ipr_resource_entry *res;
3710	unsigned long lock_flags;
3711	int rc = -ENXIO;
3712
3713	sdev->hostdata = NULL;
3714
3715	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3716
3717	res = ipr_find_sdev(sdev);
3718	if (res) {
3719		res->sdev = sdev;
3720		res->add_to_ml = 0;
3721		res->in_erp = 0;
3722		sdev->hostdata = res;
3723		if (!ipr_is_naca_model(res))
3724			res->needs_sync_complete = 1;
3725		rc = 0;
3726		if (ipr_is_gata(res)) {
3727			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3728			return ipr_ata_slave_alloc(sdev);
3729		}
3730	}
3731
3732	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3733
3734	return rc;
3735}
3736
3737/**
3738 * ipr_eh_host_reset - Reset the host adapter
3739 * @scsi_cmd:	scsi command struct
3740 *
3741 * Return value:
3742 * 	SUCCESS / FAILED
3743 **/
3744static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3745{
3746	struct ipr_ioa_cfg *ioa_cfg;
3747	int rc;
3748
3749	ENTER;
3750	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3751
3752	dev_err(&ioa_cfg->pdev->dev,
3753		"Adapter being reset as a result of error recovery.\n");
3754
3755	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3756		ioa_cfg->sdt_state = GET_DUMP;
3757
3758	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3759
3760	LEAVE;
3761	return rc;
3762}
3763
3764static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3765{
3766	int rc;
3767
3768	spin_lock_irq(cmd->device->host->host_lock);
3769	rc = __ipr_eh_host_reset(cmd);
3770	spin_unlock_irq(cmd->device->host->host_lock);
3771
3772	return rc;
3773}
3774
3775/**
3776 * ipr_device_reset - Reset the device
3777 * @ioa_cfg:	ioa config struct
3778 * @res:		resource entry struct
3779 *
3780 * This function issues a device reset to the affected device.
3781 * If the device is a SCSI device, a LUN reset will be sent
3782 * to the device first. If that does not work, a target reset
3783 * will be sent. If the device is a SATA device, a PHY reset will
3784 * be sent.
3785 *
3786 * Return value:
3787 *	0 on success / non-zero on failure
3788 **/
3789static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3790			    struct ipr_resource_entry *res)
3791{
3792	struct ipr_cmnd *ipr_cmd;
3793	struct ipr_ioarcb *ioarcb;
3794	struct ipr_cmd_pkt *cmd_pkt;
3795	struct ipr_ioarcb_ata_regs *regs;
3796	u32 ioasc;
3797
3798	ENTER;
3799	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3800	ioarcb = &ipr_cmd->ioarcb;
3801	cmd_pkt = &ioarcb->cmd_pkt;
3802	regs = &ioarcb->add_data.u.regs;
3803
3804	ioarcb->res_handle = res->cfgte.res_handle;
3805	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3806	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3807	if (ipr_is_gata(res)) {
3808		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3809		ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3810		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3811	}
3812
3813	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3814	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3815	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3816	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3817		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3818		       sizeof(struct ipr_ioasa_gata));
3819
3820	LEAVE;
3821	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3822}
3823
3824/**
3825 * ipr_sata_reset - Reset the SATA port
3826 * @ap:		SATA port to reset
3827 * @classes:	class of the attached device
3828 *
3829 * This function issues a SATA phy reset to the affected ATA port.
3830 *
3831 * Return value:
3832 *	0 on success / non-zero on failure
3833 **/
3834static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3835				unsigned long deadline)
3836{
3837	struct ipr_sata_port *sata_port = ap->private_data;
3838	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3839	struct ipr_resource_entry *res;
3840	unsigned long lock_flags = 0;
3841	int rc = -ENXIO;
3842
3843	ENTER;
3844	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3845	while(ioa_cfg->in_reset_reload) {
3846		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3847		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3848		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3849	}
3850
3851	res = sata_port->res;
3852	if (res) {
3853		rc = ipr_device_reset(ioa_cfg, res);
3854		switch(res->cfgte.proto) {
3855		case IPR_PROTO_SATA:
3856		case IPR_PROTO_SAS_STP:
3857			*classes = ATA_DEV_ATA;
3858			break;
3859		case IPR_PROTO_SATA_ATAPI:
3860		case IPR_PROTO_SAS_STP_ATAPI:
3861			*classes = ATA_DEV_ATAPI;
3862			break;
3863		default:
3864			*classes = ATA_DEV_UNKNOWN;
3865			break;
3866		};
3867	}
3868
3869	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3870	LEAVE;
3871	return rc;
3872}
3873
3874/**
3875 * ipr_eh_dev_reset - Reset the device
3876 * @scsi_cmd:	scsi command struct
3877 *
3878 * This function issues a device reset to the affected device.
3879 * A LUN reset will be sent to the device first. If that does
3880 * not work, a target reset will be sent.
3881 *
3882 * Return value:
3883 *	SUCCESS / FAILED
3884 **/
3885static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3886{
3887	struct ipr_cmnd *ipr_cmd;
3888	struct ipr_ioa_cfg *ioa_cfg;
3889	struct ipr_resource_entry *res;
3890	struct ata_port *ap;
3891	int rc = 0;
3892
3893	ENTER;
3894	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3895	res = scsi_cmd->device->hostdata;
3896
3897	if (!res)
3898		return FAILED;
3899
3900	/*
3901	 * If we are currently going through reset/reload, return failed. This will force the
3902	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3903	 * reset to complete
3904	 */
3905	if (ioa_cfg->in_reset_reload)
3906		return FAILED;
3907	if (ioa_cfg->ioa_is_dead)
3908		return FAILED;
3909
3910	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3911		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3912			if (ipr_cmd->scsi_cmd)
3913				ipr_cmd->done = ipr_scsi_eh_done;
3914			if (ipr_cmd->qc)
3915				ipr_cmd->done = ipr_sata_eh_done;
3916			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3917				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3918				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3919			}
3920		}
3921	}
3922
3923	res->resetting_device = 1;
3924	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3925
3926	if (ipr_is_gata(res) && res->sata_port) {
3927		ap = res->sata_port->ap;
3928		spin_unlock_irq(scsi_cmd->device->host->host_lock);
3929		ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3930		spin_lock_irq(scsi_cmd->device->host->host_lock);
3931
3932		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3933			if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3934				rc = -EIO;
3935				break;
3936			}
3937		}
3938	} else
3939		rc = ipr_device_reset(ioa_cfg, res);
3940	res->resetting_device = 0;
3941
3942	LEAVE;
3943	return (rc ? FAILED : SUCCESS);
3944}
3945
3946static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3947{
3948	int rc;
3949
3950	spin_lock_irq(cmd->device->host->host_lock);
3951	rc = __ipr_eh_dev_reset(cmd);
3952	spin_unlock_irq(cmd->device->host->host_lock);
3953
3954	return rc;
3955}
3956
3957/**
3958 * ipr_bus_reset_done - Op done function for bus reset.
3959 * @ipr_cmd:	ipr command struct
3960 *
3961 * This function is the op done function for a bus reset
3962 *
3963 * Return value:
3964 * 	none
3965 **/
3966static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3967{
3968	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3969	struct ipr_resource_entry *res;
3970
3971	ENTER;
3972	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3973		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3974			    sizeof(res->cfgte.res_handle))) {
3975			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3976			break;
3977		}
3978	}
3979
3980	/*
3981	 * If abort has not completed, indicate the reset has, else call the
3982	 * abort's done function to wake the sleeping eh thread
3983	 */
3984	if (ipr_cmd->sibling->sibling)
3985		ipr_cmd->sibling->sibling = NULL;
3986	else
3987		ipr_cmd->sibling->done(ipr_cmd->sibling);
3988
3989	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3990	LEAVE;
3991}
3992
3993/**
3994 * ipr_abort_timeout - An abort task has timed out
3995 * @ipr_cmd:	ipr command struct
3996 *
3997 * This function handles when an abort task times out. If this
3998 * happens we issue a bus reset since we have resources tied
3999 * up that must be freed before returning to the midlayer.
4000 *
4001 * Return value:
4002 *	none
4003 **/
4004static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4005{
4006	struct ipr_cmnd *reset_cmd;
4007	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4008	struct ipr_cmd_pkt *cmd_pkt;
4009	unsigned long lock_flags = 0;
4010
4011	ENTER;
4012	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4013	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4014		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4015		return;
4016	}
4017
4018	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4019	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4020	ipr_cmd->sibling = reset_cmd;
4021	reset_cmd->sibling = ipr_cmd;
4022	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4023	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4024	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4025	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4026	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4027
4028	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4029	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4030	LEAVE;
4031}
4032
4033/**
4034 * ipr_cancel_op - Cancel specified op
4035 * @scsi_cmd:	scsi command struct
4036 *
4037 * This function cancels specified op.
4038 *
4039 * Return value:
4040 *	SUCCESS / FAILED
4041 **/
4042static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4043{
4044	struct ipr_cmnd *ipr_cmd;
4045	struct ipr_ioa_cfg *ioa_cfg;
4046	struct ipr_resource_entry *res;
4047	struct ipr_cmd_pkt *cmd_pkt;
4048	u32 ioasc;
4049	int op_found = 0;
4050
4051	ENTER;
4052	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4053	res = scsi_cmd->device->hostdata;
4054
4055	/* If we are currently going through reset/reload, return failed.
4056	 * This will force the mid-layer to call ipr_eh_host_reset,
4057	 * which will then go to sleep and wait for the reset to complete
4058	 */
4059	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4060		return FAILED;
4061	if (!res || !ipr_is_gscsi(res))
4062		return FAILED;
4063
4064	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4065		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4066			ipr_cmd->done = ipr_scsi_eh_done;
4067			op_found = 1;
4068			break;
4069		}
4070	}
4071
4072	if (!op_found)
4073		return SUCCESS;
4074
4075	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4076	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4077	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4078	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4079	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4080	ipr_cmd->u.sdev = scsi_cmd->device;
4081
4082	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4083		    scsi_cmd->cmnd[0]);
4084	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4085	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4086
4087	/*
4088	 * If the abort task timed out and we sent a bus reset, we will get
4089	 * one the following responses to the abort
4090	 */
4091	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4092		ioasc = 0;
4093		ipr_trace;
4094	}
4095
4096	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4097	if (!ipr_is_naca_model(res))
4098		res->needs_sync_complete = 1;
4099
4100	LEAVE;
4101	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4102}
4103
4104/**
4105 * ipr_eh_abort - Abort a single op
4106 * @scsi_cmd:	scsi command struct
4107 *
4108 * Return value:
4109 * 	SUCCESS / FAILED
4110 **/
4111static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4112{
4113	unsigned long flags;
4114	int rc;
4115
4116	ENTER;
4117
4118	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4119	rc = ipr_cancel_op(scsi_cmd);
4120	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4121
4122	LEAVE;
4123	return rc;
4124}
4125
4126/**
4127 * ipr_handle_other_interrupt - Handle "other" interrupts
4128 * @ioa_cfg:	ioa config struct
4129 * @int_reg:	interrupt register
4130 *
4131 * Return value:
4132 * 	IRQ_NONE / IRQ_HANDLED
4133 **/
4134static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4135					      volatile u32 int_reg)
4136{
4137	irqreturn_t rc = IRQ_HANDLED;
4138
4139	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4140		/* Mask the interrupt */
4141		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4142
4143		/* Clear the interrupt */
4144		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4145		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4146
4147		list_del(&ioa_cfg->reset_cmd->queue);
4148		del_timer(&ioa_cfg->reset_cmd->timer);
4149		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4150	} else {
4151		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4152			ioa_cfg->ioa_unit_checked = 1;
4153		else
4154			dev_err(&ioa_cfg->pdev->dev,
4155				"Permanent IOA failure. 0x%08X\n", int_reg);
4156
4157		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4158			ioa_cfg->sdt_state = GET_DUMP;
4159
4160		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4161		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4162	}
4163
4164	return rc;
4165}
4166
4167/**
4168 * ipr_isr - Interrupt service routine
4169 * @irq:	irq number
4170 * @devp:	pointer to ioa config struct
4171 *
4172 * Return value:
4173 * 	IRQ_NONE / IRQ_HANDLED
4174 **/
4175static irqreturn_t ipr_isr(int irq, void *devp)
4176{
4177	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4178	unsigned long lock_flags = 0;
4179	volatile u32 int_reg, int_mask_reg;
4180	u32 ioasc;
4181	u16 cmd_index;
4182	struct ipr_cmnd *ipr_cmd;
4183	irqreturn_t rc = IRQ_NONE;
4184
4185	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4186
4187	/* If interrupts are disabled, ignore the interrupt */
4188	if (!ioa_cfg->allow_interrupts) {
4189		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4190		return IRQ_NONE;
4191	}
4192
4193	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4194	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4195
4196	/* If an interrupt on the adapter did not occur, ignore it */
4197	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4198		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4199		return IRQ_NONE;
4200	}
4201
4202	while (1) {
4203		ipr_cmd = NULL;
4204
4205		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4206		       ioa_cfg->toggle_bit) {
4207
4208			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4209				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4210
4211			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4212				ioa_cfg->errors_logged++;
4213				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4214
4215				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4216					ioa_cfg->sdt_state = GET_DUMP;
4217
4218				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4219				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4220				return IRQ_HANDLED;
4221			}
4222
4223			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4224
4225			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4226
4227			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4228
4229			list_del(&ipr_cmd->queue);
4230			del_timer(&ipr_cmd->timer);
4231			ipr_cmd->done(ipr_cmd);
4232
4233			rc = IRQ_HANDLED;
4234
4235			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4236				ioa_cfg->hrrq_curr++;
4237			} else {
4238				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4239				ioa_cfg->toggle_bit ^= 1u;
4240			}
4241		}
4242
4243		if (ipr_cmd != NULL) {
4244			/* Clear the PCI interrupt */
4245			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4246			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4247		} else
4248			break;
4249	}
4250
4251	if (unlikely(rc == IRQ_NONE))
4252		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4253
4254	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4255	return rc;
4256}
4257
4258/**
4259 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4260 * @ioa_cfg:	ioa config struct
4261 * @ipr_cmd:	ipr command struct
4262 *
4263 * Return value:
4264 * 	0 on success / -1 on failure
4265 **/
4266static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4267			   struct ipr_cmnd *ipr_cmd)
4268{
4269	int i, nseg;
4270	struct scatterlist *sg;
4271	u32 length;
4272	u32 ioadl_flags = 0;
4273	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4274	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4275	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4276
4277	length = scsi_bufflen(scsi_cmd);
4278	if (!length)
4279		return 0;
4280
4281	nseg = scsi_dma_map(scsi_cmd);
4282	if (nseg < 0) {
4283		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4284		return -1;
4285	}
4286
4287	ipr_cmd->dma_use_sg = nseg;
4288
4289	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4290		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4291		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4292		ioarcb->write_data_transfer_length = cpu_to_be32(length);
4293		ioarcb->write_ioadl_len =
4294			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4295	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4296		ioadl_flags = IPR_IOADL_FLAGS_READ;
4297		ioarcb->read_data_transfer_length = cpu_to_be32(length);
4298		ioarcb->read_ioadl_len =
4299			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4300	}
4301
4302	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4303		ioadl = ioarcb->add_data.u.ioadl;
4304		ioarcb->write_ioadl_addr =
4305			cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4306				    offsetof(struct ipr_ioarcb, add_data));
4307		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4308	}
4309
4310	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4311		ioadl[i].flags_and_data_len =
4312			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4313		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4314	}
4315
4316	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4317	return 0;
4318}
4319
4320/**
4321 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4322 * @scsi_cmd:	scsi command struct
4323 *
4324 * Return value:
4325 * 	task attributes
4326 **/
4327static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4328{
4329	u8 tag[2];
4330	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4331
4332	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4333		switch (tag[0]) {
4334		case MSG_SIMPLE_TAG:
4335			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4336			break;
4337		case MSG_HEAD_TAG:
4338			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4339			break;
4340		case MSG_ORDERED_TAG:
4341			rc = IPR_FLAGS_LO_ORDERED_TASK;
4342			break;
4343		};
4344	}
4345
4346	return rc;
4347}
4348
4349/**
4350 * ipr_erp_done - Process completion of ERP for a device
4351 * @ipr_cmd:		ipr command struct
4352 *
4353 * This function copies the sense buffer into the scsi_cmd
4354 * struct and pushes the scsi_done function.
4355 *
4356 * Return value:
4357 * 	nothing
4358 **/
4359static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4360{
4361	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4362	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4363	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4364	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4365
4366	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4367		scsi_cmd->result |= (DID_ERROR << 16);
4368		scmd_printk(KERN_ERR, scsi_cmd,
4369			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4370	} else {
4371		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4372		       SCSI_SENSE_BUFFERSIZE);
4373	}
4374
4375	if (res) {
4376		if (!ipr_is_naca_model(res))
4377			res->needs_sync_complete = 1;
4378		res->in_erp = 0;
4379	}
4380	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4381	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4382	scsi_cmd->scsi_done(scsi_cmd);
4383}
4384
4385/**
4386 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4387 * @ipr_cmd:	ipr command struct
4388 *
4389 * Return value:
4390 * 	none
4391 **/
4392static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4393{
4394	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4395	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4396	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4397
4398	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4399	ioarcb->write_data_transfer_length = 0;
4400	ioarcb->read_data_transfer_length = 0;
4401	ioarcb->write_ioadl_len = 0;
4402	ioarcb->read_ioadl_len = 0;
4403	ioasa->ioasc = 0;
4404	ioasa->residual_data_len = 0;
4405	ioarcb->write_ioadl_addr =
4406		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4407	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4408}
4409
4410/**
4411 * ipr_erp_request_sense - Send request sense to a device
4412 * @ipr_cmd:	ipr command struct
4413 *
4414 * This function sends a request sense to a device as a result
4415 * of a check condition.
4416 *
4417 * Return value:
4418 * 	nothing
4419 **/
4420static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4421{
4422	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4423	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4424
4425	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4426		ipr_erp_done(ipr_cmd);
4427		return;
4428	}
4429
4430	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4431
4432	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4433	cmd_pkt->cdb[0] = REQUEST_SENSE;
4434	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4435	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4436	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4437	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4438
4439	ipr_cmd->ioadl[0].flags_and_data_len =
4440		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4441	ipr_cmd->ioadl[0].address =
4442		cpu_to_be32(ipr_cmd->sense_buffer_dma);
4443
4444	ipr_cmd->ioarcb.read_ioadl_len =
4445		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4446	ipr_cmd->ioarcb.read_data_transfer_length =
4447		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4448
4449	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4450		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4451}
4452
4453/**
4454 * ipr_erp_cancel_all - Send cancel all to a device
4455 * @ipr_cmd:	ipr command struct
4456 *
4457 * This function sends a cancel all to a device to clear the
4458 * queue. If we are running TCQ on the device, QERR is set to 1,
4459 * which means all outstanding ops have been dropped on the floor.
4460 * Cancel all will return them to us.
4461 *
4462 * Return value:
4463 * 	nothing
4464 **/
4465static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4466{
4467	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4468	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4469	struct ipr_cmd_pkt *cmd_pkt;
4470
4471	res->in_erp = 1;
4472
4473	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4474
4475	if (!scsi_get_tag_type(scsi_cmd->device)) {
4476		ipr_erp_request_sense(ipr_cmd);
4477		return;
4478	}
4479
4480	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4481	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4482	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4483
4484	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4485		   IPR_CANCEL_ALL_TIMEOUT);
4486}
4487
4488/**
4489 * ipr_dump_ioasa - Dump contents of IOASA
4490 * @ioa_cfg:	ioa config struct
4491 * @ipr_cmd:	ipr command struct
4492 * @res:		resource entry struct
4493 *
4494 * This function is invoked by the interrupt handler when ops
4495 * fail. It will log the IOASA if appropriate. Only called
4496 * for GPDD ops.
4497 *
4498 * Return value:
4499 * 	none
4500 **/
4501static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4502			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4503{
4504	int i;
4505	u16 data_len;
4506	u32 ioasc, fd_ioasc;
4507	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4508	__be32 *ioasa_data = (__be32 *)ioasa;
4509	int error_index;
4510
4511	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4512	fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4513
4514	if (0 == ioasc)
4515		return;
4516
4517	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4518		return;
4519
4520	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4521		error_index = ipr_get_error(fd_ioasc);
4522	else
4523		error_index = ipr_get_error(ioasc);
4524
4525	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4526		/* Don't log an error if the IOA already logged one */
4527		if (ioasa->ilid != 0)
4528			return;
4529
4530		if (!ipr_is_gscsi(res))
4531			return;
4532
4533		if (ipr_error_table[error_index].log_ioasa == 0)
4534			return;
4535	}
4536
4537	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4538
4539	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4540		data_len = sizeof(struct ipr_ioasa);
4541	else
4542		data_len = be16_to_cpu(ioasa->ret_stat_len);
4543
4544	ipr_err("IOASA Dump:\n");
4545
4546	for (i = 0; i < data_len / 4; i += 4) {
4547		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4548			be32_to_cpu(ioasa_data[i]),
4549			be32_to_cpu(ioasa_data[i+1]),
4550			be32_to_cpu(ioasa_data[i+2]),
4551			be32_to_cpu(ioasa_data[i+3]));
4552	}
4553}
4554
4555/**
4556 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4557 * @ioasa:		IOASA
4558 * @sense_buf:	sense data buffer
4559 *
4560 * Return value:
4561 * 	none
4562 **/
4563static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4564{
4565	u32 failing_lba;
4566	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4567	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4568	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4569	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4570
4571	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4572
4573	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4574		return;
4575
4576	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4577
4578	if (ipr_is_vset_device(res) &&
4579	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4580	    ioasa->u.vset.failing_lba_hi != 0) {
4581		sense_buf[0] = 0x72;
4582		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4583		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4584		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4585
4586		sense_buf[7] = 12;
4587		sense_buf[8] = 0;
4588		sense_buf[9] = 0x0A;
4589		sense_buf[10] = 0x80;
4590
4591		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4592
4593		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4594		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4595		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4596		sense_buf[15] = failing_lba & 0x000000ff;
4597
4598		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4599
4600		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4601		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4602		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4603		sense_buf[19] = failing_lba & 0x000000ff;
4604	} else {
4605		sense_buf[0] = 0x70;
4606		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4607		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4608		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4609
4610		/* Illegal request */
4611		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4612		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4613			sense_buf[7] = 10;	/* additional length */
4614
4615			/* IOARCB was in error */
4616			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4617				sense_buf[15] = 0xC0;
4618			else	/* Parameter data was invalid */
4619				sense_buf[15] = 0x80;
4620
4621			sense_buf[16] =
4622			    ((IPR_FIELD_POINTER_MASK &
4623			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4624			sense_buf[17] =
4625			    (IPR_FIELD_POINTER_MASK &
4626			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4627		} else {
4628			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4629				if (ipr_is_vset_device(res))
4630					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4631				else
4632					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4633
4634				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4635				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4636				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4637				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4638				sense_buf[6] = failing_lba & 0x000000ff;
4639			}
4640
4641			sense_buf[7] = 6;	/* additional length */
4642		}
4643	}
4644}
4645
4646/**
4647 * ipr_get_autosense - Copy autosense data to sense buffer
4648 * @ipr_cmd:	ipr command struct
4649 *
4650 * This function copies the autosense buffer to the buffer
4651 * in the scsi_cmd, if there is autosense available.
4652 *
4653 * Return value:
4654 *	1 if autosense was available / 0 if not
4655 **/
4656static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4657{
4658	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4659
4660	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4661		return 0;
4662
4663	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4664	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4665		   SCSI_SENSE_BUFFERSIZE));
4666	return 1;
4667}
4668
4669/**
4670 * ipr_erp_start - Process an error response for a SCSI op
4671 * @ioa_cfg:	ioa config struct
4672 * @ipr_cmd:	ipr command struct
4673 *
4674 * This function determines whether or not to initiate ERP
4675 * on the affected device.
4676 *
4677 * Return value:
4678 * 	nothing
4679 **/
4680static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4681			      struct ipr_cmnd *ipr_cmd)
4682{
4683	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4684	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4685	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4686	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4687
4688	if (!res) {
4689		ipr_scsi_eh_done(ipr_cmd);
4690		return;
4691	}
4692
4693	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4694		ipr_gen_sense(ipr_cmd);
4695
4696	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4697
4698	switch (masked_ioasc) {
4699	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4700		if (ipr_is_naca_model(res))
4701			scsi_cmd->result |= (DID_ABORT << 16);
4702		else
4703			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4704		break;
4705	case IPR_IOASC_IR_RESOURCE_HANDLE:
4706	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4707		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4708		break;
4709	case IPR_IOASC_HW_SEL_TIMEOUT:
4710		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4711		if (!ipr_is_naca_model(res))
4712			res->needs_sync_complete = 1;
4713		break;
4714	case IPR_IOASC_SYNC_REQUIRED:
4715		if (!res->in_erp)
4716			res->needs_sync_complete = 1;
4717		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4718		break;
4719	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4720	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4721		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4722		break;
4723	case IPR_IOASC_BUS_WAS_RESET:
4724	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4725		/*
4726		 * Report the bus reset and ask for a retry. The device
4727		 * will give CC/UA the next command.
4728		 */
4729		if (!res->resetting_device)
4730			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4731		scsi_cmd->result |= (DID_ERROR << 16);
4732		if (!ipr_is_naca_model(res))
4733			res->needs_sync_complete = 1;
4734		break;
4735	case IPR_IOASC_HW_DEV_BUS_STATUS:
4736		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4737		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4738			if (!ipr_get_autosense(ipr_cmd)) {
4739				if (!ipr_is_naca_model(res)) {
4740					ipr_erp_cancel_all(ipr_cmd);
4741					return;
4742				}
4743			}
4744		}
4745		if (!ipr_is_naca_model(res))
4746			res->needs_sync_complete = 1;
4747		break;
4748	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4749		break;
4750	default:
4751		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4752			scsi_cmd->result |= (DID_ERROR << 16);
4753		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4754			res->needs_sync_complete = 1;
4755		break;
4756	}
4757
4758	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4759	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4760	scsi_cmd->scsi_done(scsi_cmd);
4761}
4762
4763/**
4764 * ipr_scsi_done - mid-layer done function
4765 * @ipr_cmd:	ipr command struct
4766 *
4767 * This function is invoked by the interrupt handler for
4768 * ops generated by the SCSI mid-layer
4769 *
4770 * Return value:
4771 * 	none
4772 **/
4773static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4774{
4775	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4776	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4777	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4778
4779	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4780
4781	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4782		scsi_dma_unmap(ipr_cmd->scsi_cmd);
4783		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4784		scsi_cmd->scsi_done(scsi_cmd);
4785	} else
4786		ipr_erp_start(ioa_cfg, ipr_cmd);
4787}
4788
4789/**
4790 * ipr_queuecommand - Queue a mid-layer request
4791 * @scsi_cmd:	scsi command struct
4792 * @done:		done function
4793 *
4794 * This function queues a request generated by the mid-layer.
4795 *
4796 * Return value:
4797 *	0 on success
4798 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4799 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4800 **/
4801static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4802			    void (*done) (struct scsi_cmnd *))
4803{
4804	struct ipr_ioa_cfg *ioa_cfg;
4805	struct ipr_resource_entry *res;
4806	struct ipr_ioarcb *ioarcb;
4807	struct ipr_cmnd *ipr_cmd;
4808	int rc = 0;
4809
4810	scsi_cmd->scsi_done = done;
4811	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4812	res = scsi_cmd->device->hostdata;
4813	scsi_cmd->result = (DID_OK << 16);
4814
4815	/*
4816	 * We are currently blocking all devices due to a host reset
4817	 * We have told the host to stop giving us new requests, but
4818	 * ERP ops don't count. FIXME
4819	 */
4820	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4821		return SCSI_MLQUEUE_HOST_BUSY;
4822
4823	/*
4824	 * FIXME - Create scsi_set_host_offline interface
4825	 *  and the ioa_is_dead check can be removed
4826	 */
4827	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4828		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4829		scsi_cmd->result = (DID_NO_CONNECT << 16);
4830		scsi_cmd->scsi_done(scsi_cmd);
4831		return 0;
4832	}
4833
4834	if (ipr_is_gata(res) && res->sata_port)
4835		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4836
4837	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4838	ioarcb = &ipr_cmd->ioarcb;
4839	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4840
4841	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4842	ipr_cmd->scsi_cmd = scsi_cmd;
4843	ioarcb->res_handle = res->cfgte.res_handle;
4844	ipr_cmd->done = ipr_scsi_done;
4845	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4846
4847	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4848		if (scsi_cmd->underflow == 0)
4849			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4850
4851		if (res->needs_sync_complete) {
4852			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4853			res->needs_sync_complete = 0;
4854		}
4855
4856		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4857		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4858		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4859		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4860	}
4861
4862	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4863	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4864		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4865
4866	if (likely(rc == 0))
4867		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4868
4869	if (likely(rc == 0)) {
4870		mb();
4871		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4872		       ioa_cfg->regs.ioarrin_reg);
4873	} else {
4874		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4875		 return SCSI_MLQUEUE_HOST_BUSY;
4876	}
4877
4878	return 0;
4879}
4880
4881/**
4882 * ipr_ioctl - IOCTL handler
4883 * @sdev:	scsi device struct
4884 * @cmd:	IOCTL cmd
4885 * @arg:	IOCTL arg
4886 *
4887 * Return value:
4888 * 	0 on success / other on failure
4889 **/
4890static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4891{
4892	struct ipr_resource_entry *res;
4893
4894	res = (struct ipr_resource_entry *)sdev->hostdata;
4895	if (res && ipr_is_gata(res))
4896		return ata_scsi_ioctl(sdev, cmd, arg);
4897
4898	return -EINVAL;
4899}
4900
4901/**
4902 * ipr_info - Get information about the card/driver
4903 * @scsi_host:	scsi host struct
4904 *
4905 * Return value:
4906 * 	pointer to buffer with description string
4907 **/
4908static const char * ipr_ioa_info(struct Scsi_Host *host)
4909{
4910	static char buffer[512];
4911	struct ipr_ioa_cfg *ioa_cfg;
4912	unsigned long lock_flags = 0;
4913
4914	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4915
4916	spin_lock_irqsave(host->host_lock, lock_flags);
4917	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4918	spin_unlock_irqrestore(host->host_lock, lock_flags);
4919
4920	return buffer;
4921}
4922
4923static struct scsi_host_template driver_template = {
4924	.module = THIS_MODULE,
4925	.name = "IPR",
4926	.info = ipr_ioa_info,
4927	.ioctl = ipr_ioctl,
4928	.queuecommand = ipr_queuecommand,
4929	.eh_abort_handler = ipr_eh_abort,
4930	.eh_device_reset_handler = ipr_eh_dev_reset,
4931	.eh_host_reset_handler = ipr_eh_host_reset,
4932	.slave_alloc = ipr_slave_alloc,
4933	.slave_configure = ipr_slave_configure,
4934	.slave_destroy = ipr_slave_destroy,
4935	.target_alloc = ipr_target_alloc,
4936	.target_destroy = ipr_target_destroy,
4937	.change_queue_depth = ipr_change_queue_depth,
4938	.change_queue_type = ipr_change_queue_type,
4939	.bios_param = ipr_biosparam,
4940	.can_queue = IPR_MAX_COMMANDS,
4941	.this_id = -1,
4942	.sg_tablesize = IPR_MAX_SGLIST,
4943	.max_sectors = IPR_IOA_MAX_SECTORS,
4944	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4945	.use_clustering = ENABLE_CLUSTERING,
4946	.shost_attrs = ipr_ioa_attrs,
4947	.sdev_attrs = ipr_dev_attrs,
4948	.proc_name = IPR_NAME
4949};
4950
4951/**
4952 * ipr_ata_phy_reset - libata phy_reset handler
4953 * @ap:		ata port to reset
4954 *
4955 **/
4956static void ipr_ata_phy_reset(struct ata_port *ap)
4957{
4958	unsigned long flags;
4959	struct ipr_sata_port *sata_port = ap->private_data;
4960	struct ipr_resource_entry *res = sata_port->res;
4961	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4962	int rc;
4963
4964	ENTER;
4965	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4966	while(ioa_cfg->in_reset_reload) {
4967		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4968		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4969		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4970	}
4971
4972	if (!ioa_cfg->allow_cmds)
4973		goto out_unlock;
4974
4975	rc = ipr_device_reset(ioa_cfg, res);
4976
4977	if (rc) {
4978		ap->ops->port_disable(ap);
4979		goto out_unlock;
4980	}
4981
4982	switch(res->cfgte.proto) {
4983	case IPR_PROTO_SATA:
4984	case IPR_PROTO_SAS_STP:
4985		ap->device[0].class = ATA_DEV_ATA;
4986		break;
4987	case IPR_PROTO_SATA_ATAPI:
4988	case IPR_PROTO_SAS_STP_ATAPI:
4989		ap->device[0].class = ATA_DEV_ATAPI;
4990		break;
4991	default:
4992		ap->device[0].class = ATA_DEV_UNKNOWN;
4993		ap->ops->port_disable(ap);
4994		break;
4995	};
4996
4997out_unlock:
4998	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4999	LEAVE;
5000}
5001
5002/**
5003 * ipr_ata_post_internal - Cleanup after an internal command
5004 * @qc:	ATA queued command
5005 *
5006 * Return value:
5007 * 	none
5008 **/
5009static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5010{
5011	struct ipr_sata_port *sata_port = qc->ap->private_data;
5012	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5013	struct ipr_cmnd *ipr_cmd;
5014	unsigned long flags;
5015
5016	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5017	while(ioa_cfg->in_reset_reload) {
5018		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5019		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5020		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5021	}
5022
5023	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5024		if (ipr_cmd->qc == qc) {
5025			ipr_device_reset(ioa_cfg, sata_port->res);
5026			break;
5027		}
5028	}
5029	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5030}
5031
5032/**
5033 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5034 * @ap:	ATA port
5035 * @tf:	destination ATA taskfile
5036 *
5037 * Return value:
5038 * 	none
5039 **/
5040static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5041{
5042	struct ipr_sata_port *sata_port = ap->private_data;
5043	struct ipr_ioasa_gata *g = &sata_port->ioasa;
5044
5045	tf->feature = g->error;
5046	tf->nsect = g->nsect;
5047	tf->lbal = g->lbal;
5048	tf->lbam = g->lbam;
5049	tf->lbah = g->lbah;
5050	tf->device = g->device;
5051	tf->command = g->status;
5052	tf->hob_nsect = g->hob_nsect;
5053	tf->hob_lbal = g->hob_lbal;
5054	tf->hob_lbam = g->hob_lbam;
5055	tf->hob_lbah = g->hob_lbah;
5056	tf->ctl = g->alt_status;
5057}
5058
5059/**
5060 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5061 * @regs:	destination
5062 * @tf:	source ATA taskfile
5063 *
5064 * Return value:
5065 * 	none
5066 **/
5067static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5068			     struct ata_taskfile *tf)
5069{
5070	regs->feature = tf->feature;
5071	regs->nsect = tf->nsect;
5072	regs->lbal = tf->lbal;
5073	regs->lbam = tf->lbam;
5074	regs->lbah = tf->lbah;
5075	regs->device = tf->device;
5076	regs->command = tf->command;
5077	regs->hob_feature = tf->hob_feature;
5078	regs->hob_nsect = tf->hob_nsect;
5079	regs->hob_lbal = tf->hob_lbal;
5080	regs->hob_lbam = tf->hob_lbam;
5081	regs->hob_lbah = tf->hob_lbah;
5082	regs->ctl = tf->ctl;
5083}
5084
5085/**
5086 * ipr_sata_done - done function for SATA commands
5087 * @ipr_cmd:	ipr command struct
5088 *
5089 * This function is invoked by the interrupt handler for
5090 * ops generated by the SCSI mid-layer to SATA devices
5091 *
5092 * Return value:
5093 * 	none
5094 **/
5095static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5096{
5097	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5098	struct ata_queued_cmd *qc = ipr_cmd->qc;
5099	struct ipr_sata_port *sata_port = qc->ap->private_data;
5100	struct ipr_resource_entry *res = sata_port->res;
5101	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5102
5103	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5104	       sizeof(struct ipr_ioasa_gata));
5105	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5106
5107	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5108		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5109					 res->cfgte.res_addr.target);
5110
5111	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5112		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5113	else
5114		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5115	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5116	ata_qc_complete(qc);
5117}
5118
5119/**
5120 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5121 * @ipr_cmd:	ipr command struct
5122 * @qc:		ATA queued command
5123 *
5124 **/
5125static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5126				struct ata_queued_cmd *qc)
5127{
5128	u32 ioadl_flags = 0;
5129	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5130	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5131	int len = qc->nbytes + qc->pad_len;
5132	struct scatterlist *sg;
5133
5134	if (len == 0)
5135		return;
5136
5137	if (qc->dma_dir == DMA_TO_DEVICE) {
5138		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5139		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5140		ioarcb->write_data_transfer_length = cpu_to_be32(len);
5141		ioarcb->write_ioadl_len =
5142			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5143	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5144		ioadl_flags = IPR_IOADL_FLAGS_READ;
5145		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5146		ioarcb->read_ioadl_len =
5147			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5148	}
5149
5150	ata_for_each_sg(sg, qc) {
5151		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5152		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5153		if (ata_sg_is_last(sg, qc))
5154			ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5155		else
5156			ioadl++;
5157	}
5158}
5159
5160/**
5161 * ipr_qc_issue - Issue a SATA qc to a device
5162 * @qc:	queued command
5163 *
5164 * Return value:
5165 * 	0 if success
5166 **/
5167static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5168{
5169	struct ata_port *ap = qc->ap;
5170	struct ipr_sata_port *sata_port = ap->private_data;
5171	struct ipr_resource_entry *res = sata_port->res;
5172	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5173	struct ipr_cmnd *ipr_cmd;
5174	struct ipr_ioarcb *ioarcb;
5175	struct ipr_ioarcb_ata_regs *regs;
5176
5177	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5178		return AC_ERR_SYSTEM;
5179
5180	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5181	ioarcb = &ipr_cmd->ioarcb;
5182	regs = &ioarcb->add_data.u.regs;
5183
5184	memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5185	ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5186
5187	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5188	ipr_cmd->qc = qc;
5189	ipr_cmd->done = ipr_sata_done;
5190	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5191	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5192	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5193	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5194	ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5195
5196	ipr_build_ata_ioadl(ipr_cmd, qc);
5197	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5198	ipr_copy_sata_tf(regs, &qc->tf);
5199	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5200	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5201
5202	switch (qc->tf.protocol) {
5203	case ATA_PROT_NODATA:
5204	case ATA_PROT_PIO:
5205		break;
5206
5207	case ATA_PROT_DMA:
5208		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5209		break;
5210
5211	case ATA_PROT_ATAPI:
5212	case ATA_PROT_ATAPI_NODATA:
5213		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5214		break;
5215
5216	case ATA_PROT_ATAPI_DMA:
5217		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5218		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5219		break;
5220
5221	default:
5222		WARN_ON(1);
5223		return AC_ERR_INVALID;
5224	}
5225
5226	mb();
5227	writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5228	       ioa_cfg->regs.ioarrin_reg);
5229	return 0;
5230}
5231
5232/**
5233 * ipr_ata_check_status - Return last ATA status
5234 * @ap:	ATA port
5235 *
5236 * Return value:
5237 * 	ATA status
5238 **/
5239static u8 ipr_ata_check_status(struct ata_port *ap)
5240{
5241	struct ipr_sata_port *sata_port = ap->private_data;
5242	return sata_port->ioasa.status;
5243}
5244
5245/**
5246 * ipr_ata_check_altstatus - Return last ATA altstatus
5247 * @ap:	ATA port
5248 *
5249 * Return value:
5250 * 	Alt ATA status
5251 **/
5252static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5253{
5254	struct ipr_sata_port *sata_port = ap->private_data;
5255	return sata_port->ioasa.alt_status;
5256}
5257
5258static struct ata_port_operations ipr_sata_ops = {
5259	.port_disable = ata_port_disable,
5260	.check_status = ipr_ata_check_status,
5261	.check_altstatus = ipr_ata_check_altstatus,
5262	.dev_select = ata_noop_dev_select,
5263	.phy_reset = ipr_ata_phy_reset,
5264	.post_internal_cmd = ipr_ata_post_internal,
5265	.tf_read = ipr_tf_read,
5266	.qc_prep = ata_noop_qc_prep,
5267	.qc_issue = ipr_qc_issue,
5268	.port_start = ata_sas_port_start,
5269	.port_stop = ata_sas_port_stop
5270};
5271
5272static struct ata_port_info sata_port_info = {
5273	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5274	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5275	.pio_mask	= 0x10, /* pio4 */
5276	.mwdma_mask = 0x07,
5277	.udma_mask	= 0x7f, /* udma0-6 */
5278	.port_ops	= &ipr_sata_ops
5279};
5280
5281#ifdef CONFIG_PPC_PSERIES
5282static const u16 ipr_blocked_processors[] = {
5283	PV_NORTHSTAR,
5284	PV_PULSAR,
5285	PV_POWER4,
5286	PV_ICESTAR,
5287	PV_SSTAR,
5288	PV_POWER4p,
5289	PV_630,
5290	PV_630p
5291};
5292
5293/**
5294 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5295 * @ioa_cfg:	ioa cfg struct
5296 *
5297 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5298 * certain pSeries hardware. This function determines if the given
5299 * adapter is in one of these confgurations or not.
5300 *
5301 * Return value:
5302 * 	1 if adapter is not supported / 0 if adapter is supported
5303 **/
5304static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5305{
5306	u8 rev_id;
5307	int i;
5308
5309	if (ioa_cfg->type == 0x5702) {
5310		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5311					 &rev_id) == PCIBIOS_SUCCESSFUL) {
5312			if (rev_id < 4) {
5313				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5314					if (__is_processor(ipr_blocked_processors[i]))
5315						return 1;
5316				}
5317			}
5318		}
5319	}
5320	return 0;
5321}
5322#else
5323#define ipr_invalid_adapter(ioa_cfg) 0
5324#endif
5325
5326/**
5327 * ipr_ioa_bringdown_done - IOA bring down completion.
5328 * @ipr_cmd:	ipr command struct
5329 *
5330 * This function processes the completion of an adapter bring down.
5331 * It wakes any reset sleepers.
5332 *
5333 * Return value:
5334 * 	IPR_RC_JOB_RETURN
5335 **/
5336static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5337{
5338	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5339
5340	ENTER;
5341	ioa_cfg->in_reset_reload = 0;
5342	ioa_cfg->reset_retries = 0;
5343	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5344	wake_up_all(&ioa_cfg->reset_wait_q);
5345
5346	spin_unlock_irq(ioa_cfg->host->host_lock);
5347	scsi_unblock_requests(ioa_cfg->host);
5348	spin_lock_irq(ioa_cfg->host->host_lock);
5349	LEAVE;
5350
5351	return IPR_RC_JOB_RETURN;
5352}
5353
5354/**
5355 * ipr_ioa_reset_done - IOA reset completion.
5356 * @ipr_cmd:	ipr command struct
5357 *
5358 * This function processes the completion of an adapter reset.
5359 * It schedules any necessary mid-layer add/removes and
5360 * wakes any reset sleepers.
5361 *
5362 * Return value:
5363 * 	IPR_RC_JOB_RETURN
5364 **/
5365static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5366{
5367	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5368	struct ipr_resource_entry *res;
5369	struct ipr_hostrcb *hostrcb, *temp;
5370	int i = 0;
5371
5372	ENTER;
5373	ioa_cfg->in_reset_reload = 0;
5374	ioa_cfg->allow_cmds = 1;
5375	ioa_cfg->reset_cmd = NULL;
5376	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5377
5378	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5379		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5380			ipr_trace;
5381			break;
5382		}
5383	}
5384	schedule_work(&ioa_cfg->work_q);
5385
5386	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5387		list_del(&hostrcb->queue);
5388		if (i++ < IPR_NUM_LOG_HCAMS)
5389			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5390		else
5391			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5392	}
5393
5394	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5395	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5396
5397	ioa_cfg->reset_retries = 0;
5398	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5399	wake_up_all(&ioa_cfg->reset_wait_q);
5400
5401	spin_unlock_irq(ioa_cfg->host->host_lock);
5402	scsi_unblock_requests(ioa_cfg->host);
5403	spin_lock_irq(ioa_cfg->host->host_lock);
5404
5405	if (!ioa_cfg->allow_cmds)
5406		scsi_block_requests(ioa_cfg->host);
5407
5408	LEAVE;
5409	return IPR_RC_JOB_RETURN;
5410}
5411
5412/**
5413 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5414 * @supported_dev:	supported device struct
5415 * @vpids:			vendor product id struct
5416 *
5417 * Return value:
5418 * 	none
5419 **/
5420static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5421				 struct ipr_std_inq_vpids *vpids)
5422{
5423	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5424	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5425	supported_dev->num_records = 1;
5426	supported_dev->data_length =
5427		cpu_to_be16(sizeof(struct ipr_supported_device));
5428	supported_dev->reserved = 0;
5429}
5430
5431/**
5432 * ipr_set_supported_devs - Send Set Supported Devices for a device
5433 * @ipr_cmd:	ipr command struct
5434 *
5435 * This function send a Set Supported Devices to the adapter
5436 *
5437 * Return value:
5438 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5439 **/
5440static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5441{
5442	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5443	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5444	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5445	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5446	struct ipr_resource_entry *res = ipr_cmd->u.res;
5447
5448	ipr_cmd->job_step = ipr_ioa_reset_done;
5449
5450	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5451		if (!ipr_is_scsi_disk(res))
5452			continue;
5453
5454		ipr_cmd->u.res = res;
5455		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5456
5457		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5458		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5459		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5460
5461		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5462		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5463		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5464
5465		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5466							sizeof(struct ipr_supported_device));
5467		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5468					     offsetof(struct ipr_misc_cbs, supp_dev));
5469		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5470		ioarcb->write_data_transfer_length =
5471			cpu_to_be32(sizeof(struct ipr_supported_device));
5472
5473		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5474			   IPR_SET_SUP_DEVICE_TIMEOUT);
5475
5476		ipr_cmd->job_step = ipr_set_supported_devs;
5477		return IPR_RC_JOB_RETURN;
5478	}
5479
5480	return IPR_RC_JOB_CONTINUE;
5481}
5482
5483/**
5484 * ipr_setup_write_cache - Disable write cache if needed
5485 * @ipr_cmd:	ipr command struct
5486 *
5487 * This function sets up adapters write cache to desired setting
5488 *
5489 * Return value:
5490 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5491 **/
5492static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5493{
5494	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5495
5496	ipr_cmd->job_step = ipr_set_supported_devs;
5497	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5498				    struct ipr_resource_entry, queue);
5499
5500	if (ioa_cfg->cache_state != CACHE_DISABLED)
5501		return IPR_RC_JOB_CONTINUE;
5502
5503	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5504	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5505	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5506	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5507
5508	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5509
5510	return IPR_RC_JOB_RETURN;
5511}
5512
5513/**
5514 * ipr_get_mode_page - Locate specified mode page
5515 * @mode_pages:	mode page buffer
5516 * @page_code:	page code to find
5517 * @len:		minimum required length for mode page
5518 *
5519 * Return value:
5520 * 	pointer to mode page / NULL on failure
5521 **/
5522static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5523			       u32 page_code, u32 len)
5524{
5525	struct ipr_mode_page_hdr *mode_hdr;
5526	u32 page_length;
5527	u32 length;
5528
5529	if (!mode_pages || (mode_pages->hdr.length == 0))
5530		return NULL;
5531
5532	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5533	mode_hdr = (struct ipr_mode_page_hdr *)
5534		(mode_pages->data + mode_pages->hdr.block_desc_len);
5535
5536	while (length) {
5537		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5538			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5539				return mode_hdr;
5540			break;
5541		} else {
5542			page_length = (sizeof(struct ipr_mode_page_hdr) +
5543				       mode_hdr->page_length);
5544			length -= page_length;
5545			mode_hdr = (struct ipr_mode_page_hdr *)
5546				((unsigned long)mode_hdr + page_length);
5547		}
5548	}
5549	return NULL;
5550}
5551
5552/**
5553 * ipr_check_term_power - Check for term power errors
5554 * @ioa_cfg:	ioa config struct
5555 * @mode_pages:	IOAFP mode pages buffer
5556 *
5557 * Check the IOAFP's mode page 28 for term power errors
5558 *
5559 * Return value:
5560 * 	nothing
5561 **/
5562static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5563				 struct ipr_mode_pages *mode_pages)
5564{
5565	int i;
5566	int entry_length;
5567	struct ipr_dev_bus_entry *bus;
5568	struct ipr_mode_page28 *mode_page;
5569
5570	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5571				      sizeof(struct ipr_mode_page28));
5572
5573	entry_length = mode_page->entry_length;
5574
5575	bus = mode_page->bus;
5576
5577	for (i = 0; i < mode_page->num_entries; i++) {
5578		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5579			dev_err(&ioa_cfg->pdev->dev,
5580				"Term power is absent on scsi bus %d\n",
5581				bus->res_addr.bus);
5582		}
5583
5584		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5585	}
5586}
5587
5588/**
5589 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5590 * @ioa_cfg:	ioa config struct
5591 *
5592 * Looks through the config table checking for SES devices. If
5593 * the SES device is in the SES table indicating a maximum SCSI
5594 * bus speed, the speed is limited for the bus.
5595 *
5596 * Return value:
5597 * 	none
5598 **/
5599static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5600{
5601	u32 max_xfer_rate;
5602	int i;
5603
5604	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5605		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5606						       ioa_cfg->bus_attr[i].bus_width);
5607
5608		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5609			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5610	}
5611}
5612
5613/**
5614 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5615 * @ioa_cfg:	ioa config struct
5616 * @mode_pages:	mode page 28 buffer
5617 *
5618 * Updates mode page 28 based on driver configuration
5619 *
5620 * Return value:
5621 * 	none
5622 **/
5623static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5624					  	struct ipr_mode_pages *mode_pages)
5625{
5626	int i, entry_length;
5627	struct ipr_dev_bus_entry *bus;
5628	struct ipr_bus_attributes *bus_attr;
5629	struct ipr_mode_page28 *mode_page;
5630
5631	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5632				      sizeof(struct ipr_mode_page28));
5633
5634	entry_length = mode_page->entry_length;
5635
5636	/* Loop for each device bus entry */
5637	for (i = 0, bus = mode_page->bus;
5638	     i < mode_page->num_entries;
5639	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5640		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5641			dev_err(&ioa_cfg->pdev->dev,
5642				"Invalid resource address reported: 0x%08X\n",
5643				IPR_GET_PHYS_LOC(bus->res_addr));
5644			continue;
5645		}
5646
5647		bus_attr = &ioa_cfg->bus_attr[i];
5648		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5649		bus->bus_width = bus_attr->bus_width;
5650		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5651		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5652		if (bus_attr->qas_enabled)
5653			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5654		else
5655			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5656	}
5657}
5658
5659/**
5660 * ipr_build_mode_select - Build a mode select command
5661 * @ipr_cmd:	ipr command struct
5662 * @res_handle:	resource handle to send command to
5663 * @parm:		Byte 2 of Mode Sense command
5664 * @dma_addr:	DMA buffer address
5665 * @xfer_len:	data transfer length
5666 *
5667 * Return value:
5668 * 	none
5669 **/
5670static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5671				  __be32 res_handle, u8 parm, u32 dma_addr,
5672				  u8 xfer_len)
5673{
5674	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5675	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5676
5677	ioarcb->res_handle = res_handle;
5678	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5679	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5680	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5681	ioarcb->cmd_pkt.cdb[1] = parm;
5682	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5683
5684	ioadl->flags_and_data_len =
5685		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5686	ioadl->address = cpu_to_be32(dma_addr);
5687	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5688	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5689}
5690
5691/**
5692 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5693 * @ipr_cmd:	ipr command struct
5694 *
5695 * This function sets up the SCSI bus attributes and sends
5696 * a Mode Select for Page 28 to activate them.
5697 *
5698 * Return value:
5699 * 	IPR_RC_JOB_RETURN
5700 **/
5701static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5702{
5703	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5704	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5705	int length;
5706
5707	ENTER;
5708	ipr_scsi_bus_speed_limit(ioa_cfg);
5709	ipr_check_term_power(ioa_cfg, mode_pages);
5710	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5711	length = mode_pages->hdr.length + 1;
5712	mode_pages->hdr.length = 0;
5713
5714	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5715			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5716			      length);
5717
5718	ipr_cmd->job_step = ipr_setup_write_cache;
5719	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5720
5721	LEAVE;
5722	return IPR_RC_JOB_RETURN;
5723}
5724
5725/**
5726 * ipr_build_mode_sense - Builds a mode sense command
5727 * @ipr_cmd:	ipr command struct
5728 * @res:		resource entry struct
5729 * @parm:		Byte 2 of mode sense command
5730 * @dma_addr:	DMA address of mode sense buffer
5731 * @xfer_len:	Size of DMA buffer
5732 *
5733 * Return value:
5734 * 	none
5735 **/
5736static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5737				 __be32 res_handle,
5738				 u8 parm, u32 dma_addr, u8 xfer_len)
5739{
5740	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5741	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5742
5743	ioarcb->res_handle = res_handle;
5744	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5745	ioarcb->cmd_pkt.cdb[2] = parm;
5746	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5747	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5748
5749	ioadl->flags_and_data_len =
5750		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5751	ioadl->address = cpu_to_be32(dma_addr);
5752	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5753	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5754}
5755
5756/**
5757 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5758 * @ipr_cmd:	ipr command struct
5759 *
5760 * This function handles the failure of an IOA bringup command.
5761 *
5762 * Return value:
5763 * 	IPR_RC_JOB_RETURN
5764 **/
5765static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5766{
5767	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5768	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5769
5770	dev_err(&ioa_cfg->pdev->dev,
5771		"0x%02X failed with IOASC: 0x%08X\n",
5772		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5773
5774	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5775	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5776	return IPR_RC_JOB_RETURN;
5777}
5778
5779/**
5780 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5781 * @ipr_cmd:	ipr command struct
5782 *
5783 * This function handles the failure of a Mode Sense to the IOAFP.
5784 * Some adapters do not handle all mode pages.
5785 *
5786 * Return value:
5787 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5788 **/
5789static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5790{
5791	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5792
5793	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5794		ipr_cmd->job_step = ipr_setup_write_cache;
5795		return IPR_RC_JOB_CONTINUE;
5796	}
5797
5798	return ipr_reset_cmd_failed(ipr_cmd);
5799}
5800
5801/**
5802 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5803 * @ipr_cmd:	ipr command struct
5804 *
5805 * This function send a Page 28 mode sense to the IOA to
5806 * retrieve SCSI bus attributes.
5807 *
5808 * Return value:
5809 * 	IPR_RC_JOB_RETURN
5810 **/
5811static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5812{
5813	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5814
5815	ENTER;
5816	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5817			     0x28, ioa_cfg->vpd_cbs_dma +
5818			     offsetof(struct ipr_misc_cbs, mode_pages),
5819			     sizeof(struct ipr_mode_pages));
5820
5821	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5822	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5823
5824	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5825
5826	LEAVE;
5827	return IPR_RC_JOB_RETURN;
5828}
5829
5830/**
5831 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
5832 * @ipr_cmd:	ipr command struct
5833 *
5834 * This function enables dual IOA RAID support if possible.
5835 *
5836 * Return value:
5837 * 	IPR_RC_JOB_RETURN
5838 **/
5839static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
5840{
5841	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5842	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5843	struct ipr_mode_page24 *mode_page;
5844	int length;
5845
5846	ENTER;
5847	mode_page = ipr_get_mode_page(mode_pages, 0x24,
5848				      sizeof(struct ipr_mode_page24));
5849
5850	if (mode_page)
5851		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
5852
5853	length = mode_pages->hdr.length + 1;
5854	mode_pages->hdr.length = 0;
5855
5856	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5857			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5858			      length);
5859
5860	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5861	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5862
5863	LEAVE;
5864	return IPR_RC_JOB_RETURN;
5865}
5866
5867/**
5868 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
5869 * @ipr_cmd:	ipr command struct
5870 *
5871 * This function handles the failure of a Mode Sense to the IOAFP.
5872 * Some adapters do not handle all mode pages.
5873 *
5874 * Return value:
5875 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5876 **/
5877static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
5878{
5879	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5880
5881	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5882		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5883		return IPR_RC_JOB_CONTINUE;
5884	}
5885
5886	return ipr_reset_cmd_failed(ipr_cmd);
5887}
5888
5889/**
5890 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
5891 * @ipr_cmd:	ipr command struct
5892 *
5893 * This function send a mode sense to the IOA to retrieve
5894 * the IOA Advanced Function Control mode page.
5895 *
5896 * Return value:
5897 * 	IPR_RC_JOB_RETURN
5898 **/
5899static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
5900{
5901	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5902
5903	ENTER;
5904	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5905			     0x24, ioa_cfg->vpd_cbs_dma +
5906			     offsetof(struct ipr_misc_cbs, mode_pages),
5907			     sizeof(struct ipr_mode_pages));
5908
5909	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
5910	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
5911
5912	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5913
5914	LEAVE;
5915	return IPR_RC_JOB_RETURN;
5916}
5917
5918/**
5919 * ipr_init_res_table - Initialize the resource table
5920 * @ipr_cmd:	ipr command struct
5921 *
5922 * This function looks through the existing resource table, comparing
5923 * it with the config table. This function will take care of old/new
5924 * devices and schedule adding/removing them from the mid-layer
5925 * as appropriate.
5926 *
5927 * Return value:
5928 * 	IPR_RC_JOB_CONTINUE
5929 **/
5930static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5931{
5932	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5933	struct ipr_resource_entry *res, *temp;
5934	struct ipr_config_table_entry *cfgte;
5935	int found, i;
5936	LIST_HEAD(old_res);
5937
5938	ENTER;
5939	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5940		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5941
5942	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5943		list_move_tail(&res->queue, &old_res);
5944
5945	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5946		cfgte = &ioa_cfg->cfg_table->dev[i];
5947		found = 0;
5948
5949		list_for_each_entry_safe(res, temp, &old_res, queue) {
5950			if (!memcmp(&res->cfgte.res_addr,
5951				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5952				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5953				found = 1;
5954				break;
5955			}
5956		}
5957
5958		if (!found) {
5959			if (list_empty(&ioa_cfg->free_res_q)) {
5960				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5961				break;
5962			}
5963
5964			found = 1;
5965			res = list_entry(ioa_cfg->free_res_q.next,
5966					 struct ipr_resource_entry, queue);
5967			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5968			ipr_init_res_entry(res);
5969			res->add_to_ml = 1;
5970		}
5971
5972		if (found)
5973			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5974	}
5975
5976	list_for_each_entry_safe(res, temp, &old_res, queue) {
5977		if (res->sdev) {
5978			res->del_from_ml = 1;
5979			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5980			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5981		} else {
5982			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5983		}
5984	}
5985
5986	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
5987		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
5988	else
5989		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5990
5991	LEAVE;
5992	return IPR_RC_JOB_CONTINUE;
5993}
5994
5995/**
5996 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5997 * @ipr_cmd:	ipr command struct
5998 *
5999 * This function sends a Query IOA Configuration command
6000 * to the adapter to retrieve the IOA configuration table.
6001 *
6002 * Return value:
6003 * 	IPR_RC_JOB_RETURN
6004 **/
6005static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6006{
6007	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6008	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6009	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6010	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6011	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6012
6013	ENTER;
6014	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6015		ioa_cfg->dual_raid = 1;
6016	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6017		 ucode_vpd->major_release, ucode_vpd->card_type,
6018		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6019	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6020	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6021
6022	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6023	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
6024	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
6025
6026	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6027	ioarcb->read_data_transfer_length =
6028		cpu_to_be32(sizeof(struct ipr_config_table));
6029
6030	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6031	ioadl->flags_and_data_len =
6032		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6033
6034	ipr_cmd->job_step = ipr_init_res_table;
6035
6036	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6037
6038	LEAVE;
6039	return IPR_RC_JOB_RETURN;
6040}
6041
6042/**
6043 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6044 * @ipr_cmd:	ipr command struct
6045 *
6046 * This utility function sends an inquiry to the adapter.
6047 *
6048 * Return value:
6049 * 	none
6050 **/
6051static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6052			      u32 dma_addr, u8 xfer_len)
6053{
6054	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6055	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6056
6057	ENTER;
6058	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6059	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6060
6061	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6062	ioarcb->cmd_pkt.cdb[1] = flags;
6063	ioarcb->cmd_pkt.cdb[2] = page;
6064	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6065
6066	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6067	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6068
6069	ioadl->address = cpu_to_be32(dma_addr);
6070	ioadl->flags_and_data_len =
6071		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6072
6073	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6074	LEAVE;
6075}
6076
6077/**
6078 * ipr_inquiry_page_supported - Is the given inquiry page supported
6079 * @page0:		inquiry page 0 buffer
6080 * @page:		page code.
6081 *
6082 * This function determines if the specified inquiry page is supported.
6083 *
6084 * Return value:
6085 *	1 if page is supported / 0 if not
6086 **/
6087static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6088{
6089	int i;
6090
6091	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6092		if (page0->page[i] == page)
6093			return 1;
6094
6095	return 0;
6096}
6097
6098/**
6099 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6100 * @ipr_cmd:	ipr command struct
6101 *
6102 * This function sends a Page 0xD0 inquiry to the adapter
6103 * to retrieve adapter capabilities.
6104 *
6105 * Return value:
6106 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6107 **/
6108static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6109{
6110	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6111	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6112	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6113
6114	ENTER;
6115	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6116	memset(cap, 0, sizeof(*cap));
6117
6118	if (ipr_inquiry_page_supported(page0, 0xD0)) {
6119		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6120				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6121				  sizeof(struct ipr_inquiry_cap));
6122		return IPR_RC_JOB_RETURN;
6123	}
6124
6125	LEAVE;
6126	return IPR_RC_JOB_CONTINUE;
6127}
6128
6129/**
6130 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6131 * @ipr_cmd:	ipr command struct
6132 *
6133 * This function sends a Page 3 inquiry to the adapter
6134 * to retrieve software VPD information.
6135 *
6136 * Return value:
6137 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6138 **/
6139static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6140{
6141	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6142	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6143
6144	ENTER;
6145
6146	if (!ipr_inquiry_page_supported(page0, 1))
6147		ioa_cfg->cache_state = CACHE_NONE;
6148
6149	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6150
6151	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6152			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6153			  sizeof(struct ipr_inquiry_page3));
6154
6155	LEAVE;
6156	return IPR_RC_JOB_RETURN;
6157}
6158
6159/**
6160 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6161 * @ipr_cmd:	ipr command struct
6162 *
6163 * This function sends a Page 0 inquiry to the adapter
6164 * to retrieve supported inquiry pages.
6165 *
6166 * Return value:
6167 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6168 **/
6169static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6170{
6171	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6172	char type[5];
6173
6174	ENTER;
6175
6176	/* Grab the type out of the VPD and store it away */
6177	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6178	type[4] = '\0';
6179	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6180
6181	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6182
6183	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6184			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6185			  sizeof(struct ipr_inquiry_page0));
6186
6187	LEAVE;
6188	return IPR_RC_JOB_RETURN;
6189}
6190
6191/**
6192 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6193 * @ipr_cmd:	ipr command struct
6194 *
6195 * This function sends a standard inquiry to the adapter.
6196 *
6197 * Return value:
6198 * 	IPR_RC_JOB_RETURN
6199 **/
6200static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6201{
6202	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6203
6204	ENTER;
6205	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6206
6207	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6208			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6209			  sizeof(struct ipr_ioa_vpd));
6210
6211	LEAVE;
6212	return IPR_RC_JOB_RETURN;
6213}
6214
6215/**
6216 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6217 * @ipr_cmd:	ipr command struct
6218 *
6219 * This function send an Identify Host Request Response Queue
6220 * command to establish the HRRQ with the adapter.
6221 *
6222 * Return value:
6223 * 	IPR_RC_JOB_RETURN
6224 **/
6225static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6226{
6227	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6228	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6229
6230	ENTER;
6231	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6232
6233	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6234	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6235
6236	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6237	ioarcb->cmd_pkt.cdb[2] =
6238		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6239	ioarcb->cmd_pkt.cdb[3] =
6240		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6241	ioarcb->cmd_pkt.cdb[4] =
6242		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6243	ioarcb->cmd_pkt.cdb[5] =
6244		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6245	ioarcb->cmd_pkt.cdb[7] =
6246		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6247	ioarcb->cmd_pkt.cdb[8] =
6248		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6249
6250	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6251
6252	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6253
6254	LEAVE;
6255	return IPR_RC_JOB_RETURN;
6256}
6257
6258/**
6259 * ipr_reset_timer_done - Adapter reset timer function
6260 * @ipr_cmd:	ipr command struct
6261 *
6262 * Description: This function is used in adapter reset processing
6263 * for timing events. If the reset_cmd pointer in the IOA
6264 * config struct is not this adapter's we are doing nested
6265 * resets and fail_all_ops will take care of freeing the
6266 * command block.
6267 *
6268 * Return value:
6269 * 	none
6270 **/
6271static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6272{
6273	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6274	unsigned long lock_flags = 0;
6275
6276	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6277
6278	if (ioa_cfg->reset_cmd == ipr_cmd) {
6279		list_del(&ipr_cmd->queue);
6280		ipr_cmd->done(ipr_cmd);
6281	}
6282
6283	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6284}
6285
6286/**
6287 * ipr_reset_start_timer - Start a timer for adapter reset job
6288 * @ipr_cmd:	ipr command struct
6289 * @timeout:	timeout value
6290 *
6291 * Description: This function is used in adapter reset processing
6292 * for timing events. If the reset_cmd pointer in the IOA
6293 * config struct is not this adapter's we are doing nested
6294 * resets and fail_all_ops will take care of freeing the
6295 * command block.
6296 *
6297 * Return value:
6298 * 	none
6299 **/
6300static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6301				  unsigned long timeout)
6302{
6303	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6304	ipr_cmd->done = ipr_reset_ioa_job;
6305
6306	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6307	ipr_cmd->timer.expires = jiffies + timeout;
6308	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6309	add_timer(&ipr_cmd->timer);
6310}
6311
6312/**
6313 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6314 * @ioa_cfg:	ioa cfg struct
6315 *
6316 * Return value:
6317 * 	nothing
6318 **/
6319static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6320{
6321	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6322
6323	/* Initialize Host RRQ pointers */
6324	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6325	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6326	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6327	ioa_cfg->toggle_bit = 1;
6328
6329	/* Zero out config table */
6330	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6331}
6332
6333/**
6334 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6335 * @ipr_cmd:	ipr command struct
6336 *
6337 * This function reinitializes some control blocks and
6338 * enables destructive diagnostics on the adapter.
6339 *
6340 * Return value:
6341 * 	IPR_RC_JOB_RETURN
6342 **/
6343static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6344{
6345	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6346	volatile u32 int_reg;
6347
6348	ENTER;
6349	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6350	ipr_init_ioa_mem(ioa_cfg);
6351
6352	ioa_cfg->allow_interrupts = 1;
6353	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6354
6355	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6356		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6357		       ioa_cfg->regs.clr_interrupt_mask_reg);
6358		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6359		return IPR_RC_JOB_CONTINUE;
6360	}
6361
6362	/* Enable destructive diagnostics on IOA */
6363	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6364
6365	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6366	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6367
6368	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6369
6370	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6371	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6372	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6373	ipr_cmd->done = ipr_reset_ioa_job;
6374	add_timer(&ipr_cmd->timer);
6375	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6376
6377	LEAVE;
6378	return IPR_RC_JOB_RETURN;
6379}
6380
6381/**
6382 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6383 * @ipr_cmd:	ipr command struct
6384 *
6385 * This function is invoked when an adapter dump has run out
6386 * of processing time.
6387 *
6388 * Return value:
6389 * 	IPR_RC_JOB_CONTINUE
6390 **/
6391static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6392{
6393	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6394
6395	if (ioa_cfg->sdt_state == GET_DUMP)
6396		ioa_cfg->sdt_state = ABORT_DUMP;
6397
6398	ipr_cmd->job_step = ipr_reset_alert;
6399
6400	return IPR_RC_JOB_CONTINUE;
6401}
6402
6403/**
6404 * ipr_unit_check_no_data - Log a unit check/no data error log
6405 * @ioa_cfg:		ioa config struct
6406 *
6407 * Logs an error indicating the adapter unit checked, but for some
6408 * reason, we were unable to fetch the unit check buffer.
6409 *
6410 * Return value:
6411 * 	nothing
6412 **/
6413static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6414{
6415	ioa_cfg->errors_logged++;
6416	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6417}
6418
6419/**
6420 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6421 * @ioa_cfg:		ioa config struct
6422 *
6423 * Fetches the unit check buffer from the adapter by clocking the data
6424 * through the mailbox register.
6425 *
6426 * Return value:
6427 * 	nothing
6428 **/
6429static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6430{
6431	unsigned long mailbox;
6432	struct ipr_hostrcb *hostrcb;
6433	struct ipr_uc_sdt sdt;
6434	int rc, length;
6435	u32 ioasc;
6436
6437	mailbox = readl(ioa_cfg->ioa_mailbox);
6438
6439	if (!ipr_sdt_is_fmt2(mailbox)) {
6440		ipr_unit_check_no_data(ioa_cfg);
6441		return;
6442	}
6443
6444	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6445	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6446					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6447
6448	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6449	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6450		ipr_unit_check_no_data(ioa_cfg);
6451		return;
6452	}
6453
6454	/* Find length of the first sdt entry (UC buffer) */
6455	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6456		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6457
6458	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6459			     struct ipr_hostrcb, queue);
6460	list_del(&hostrcb->queue);
6461	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6462
6463	rc = ipr_get_ldump_data_section(ioa_cfg,
6464					be32_to_cpu(sdt.entry[0].bar_str_offset),
6465					(__be32 *)&hostrcb->hcam,
6466					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6467
6468	if (!rc) {
6469		ipr_handle_log_data(ioa_cfg, hostrcb);
6470		ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6471		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6472		    ioa_cfg->sdt_state == GET_DUMP)
6473			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6474	} else
6475		ipr_unit_check_no_data(ioa_cfg);
6476
6477	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6478}
6479
6480/**
6481 * ipr_reset_restore_cfg_space - Restore PCI config space.
6482 * @ipr_cmd:	ipr command struct
6483 *
6484 * Description: This function restores the saved PCI config space of
6485 * the adapter, fails all outstanding ops back to the callers, and
6486 * fetches the dump/unit check if applicable to this reset.
6487 *
6488 * Return value:
6489 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6490 **/
6491static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6492{
6493	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6494	int rc;
6495
6496	ENTER;
6497	rc = pci_restore_state(ioa_cfg->pdev);
6498
6499	if (rc != PCIBIOS_SUCCESSFUL) {
6500		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6501		return IPR_RC_JOB_CONTINUE;
6502	}
6503
6504	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6505		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6506		return IPR_RC_JOB_CONTINUE;
6507	}
6508
6509	ipr_fail_all_ops(ioa_cfg);
6510
6511	if (ioa_cfg->ioa_unit_checked) {
6512		ioa_cfg->ioa_unit_checked = 0;
6513		ipr_get_unit_check_buffer(ioa_cfg);
6514		ipr_cmd->job_step = ipr_reset_alert;
6515		ipr_reset_start_timer(ipr_cmd, 0);
6516		return IPR_RC_JOB_RETURN;
6517	}
6518
6519	if (ioa_cfg->in_ioa_bringdown) {
6520		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6521	} else {
6522		ipr_cmd->job_step = ipr_reset_enable_ioa;
6523
6524		if (GET_DUMP == ioa_cfg->sdt_state) {
6525			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6526			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6527			schedule_work(&ioa_cfg->work_q);
6528			return IPR_RC_JOB_RETURN;
6529		}
6530	}
6531
6532	ENTER;
6533	return IPR_RC_JOB_CONTINUE;
6534}
6535
6536/**
6537 * ipr_reset_bist_done - BIST has completed on the adapter.
6538 * @ipr_cmd:	ipr command struct
6539 *
6540 * Description: Unblock config space and resume the reset process.
6541 *
6542 * Return value:
6543 * 	IPR_RC_JOB_CONTINUE
6544 **/
6545static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6546{
6547	ENTER;
6548	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6549	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6550	LEAVE;
6551	return IPR_RC_JOB_CONTINUE;
6552}
6553
6554/**
6555 * ipr_reset_start_bist - Run BIST on the adapter.
6556 * @ipr_cmd:	ipr command struct
6557 *
6558 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6559 *
6560 * Return value:
6561 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6562 **/
6563static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6564{
6565	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6566	int rc;
6567
6568	ENTER;
6569	pci_block_user_cfg_access(ioa_cfg->pdev);
6570	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6571
6572	if (rc != PCIBIOS_SUCCESSFUL) {
6573		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6574		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6575		rc = IPR_RC_JOB_CONTINUE;
6576	} else {
6577		ipr_cmd->job_step = ipr_reset_bist_done;
6578		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6579		rc = IPR_RC_JOB_RETURN;
6580	}
6581
6582	LEAVE;
6583	return rc;
6584}
6585
6586/**
6587 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6588 * @ipr_cmd:	ipr command struct
6589 *
6590 * Description: This clears PCI reset to the adapter and delays two seconds.
6591 *
6592 * Return value:
6593 * 	IPR_RC_JOB_RETURN
6594 **/
6595static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6596{
6597	ENTER;
6598	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6599	ipr_cmd->job_step = ipr_reset_bist_done;
6600	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6601	LEAVE;
6602	return IPR_RC_JOB_RETURN;
6603}
6604
6605/**
6606 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6607 * @ipr_cmd:	ipr command struct
6608 *
6609 * Description: This asserts PCI reset to the adapter.
6610 *
6611 * Return value:
6612 * 	IPR_RC_JOB_RETURN
6613 **/
6614static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6615{
6616	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6617	struct pci_dev *pdev = ioa_cfg->pdev;
6618
6619	ENTER;
6620	pci_block_user_cfg_access(pdev);
6621	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6622	ipr_cmd->job_step = ipr_reset_slot_reset_done;
6623	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6624	LEAVE;
6625	return IPR_RC_JOB_RETURN;
6626}
6627
6628/**
6629 * ipr_reset_allowed - Query whether or not IOA can be reset
6630 * @ioa_cfg:	ioa config struct
6631 *
6632 * Return value:
6633 * 	0 if reset not allowed / non-zero if reset is allowed
6634 **/
6635static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6636{
6637	volatile u32 temp_reg;
6638
6639	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6640	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6641}
6642
6643/**
6644 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6645 * @ipr_cmd:	ipr command struct
6646 *
6647 * Description: This function waits for adapter permission to run BIST,
6648 * then runs BIST. If the adapter does not give permission after a
6649 * reasonable time, we will reset the adapter anyway. The impact of
6650 * resetting the adapter without warning the adapter is the risk of
6651 * losing the persistent error log on the adapter. If the adapter is
6652 * reset while it is writing to the flash on the adapter, the flash
6653 * segment will have bad ECC and be zeroed.
6654 *
6655 * Return value:
6656 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6657 **/
6658static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6659{
6660	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6661	int rc = IPR_RC_JOB_RETURN;
6662
6663	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6664		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6665		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6666	} else {
6667		ipr_cmd->job_step = ioa_cfg->reset;
6668		rc = IPR_RC_JOB_CONTINUE;
6669	}
6670
6671	return rc;
6672}
6673
6674/**
6675 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6676 * @ipr_cmd:	ipr command struct
6677 *
6678 * Description: This function alerts the adapter that it will be reset.
6679 * If memory space is not currently enabled, proceed directly
6680 * to running BIST on the adapter. The timer must always be started
6681 * so we guarantee we do not run BIST from ipr_isr.
6682 *
6683 * Return value:
6684 * 	IPR_RC_JOB_RETURN
6685 **/
6686static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6687{
6688	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6689	u16 cmd_reg;
6690	int rc;
6691
6692	ENTER;
6693	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6694
6695	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6696		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6697		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6698		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6699	} else {
6700		ipr_cmd->job_step = ioa_cfg->reset;
6701	}
6702
6703	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6704	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6705
6706	LEAVE;
6707	return IPR_RC_JOB_RETURN;
6708}
6709
6710/**
6711 * ipr_reset_ucode_download_done - Microcode download completion
6712 * @ipr_cmd:	ipr command struct
6713 *
6714 * Description: This function unmaps the microcode download buffer.
6715 *
6716 * Return value:
6717 * 	IPR_RC_JOB_CONTINUE
6718 **/
6719static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6720{
6721	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6722	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6723
6724	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6725		     sglist->num_sg, DMA_TO_DEVICE);
6726
6727	ipr_cmd->job_step = ipr_reset_alert;
6728	return IPR_RC_JOB_CONTINUE;
6729}
6730
6731/**
6732 * ipr_reset_ucode_download - Download microcode to the adapter
6733 * @ipr_cmd:	ipr command struct
6734 *
6735 * Description: This function checks to see if it there is microcode
6736 * to download to the adapter. If there is, a download is performed.
6737 *
6738 * Return value:
6739 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6740 **/
6741static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6742{
6743	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6744	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6745
6746	ENTER;
6747	ipr_cmd->job_step = ipr_reset_alert;
6748
6749	if (!sglist)
6750		return IPR_RC_JOB_CONTINUE;
6751
6752	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6753	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6754	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6755	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6756	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6757	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6758	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6759
6760	ipr_build_ucode_ioadl(ipr_cmd, sglist);
6761	ipr_cmd->job_step = ipr_reset_ucode_download_done;
6762
6763	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6764		   IPR_WRITE_BUFFER_TIMEOUT);
6765
6766	LEAVE;
6767	return IPR_RC_JOB_RETURN;
6768}
6769
6770/**
6771 * ipr_reset_shutdown_ioa - Shutdown the adapter
6772 * @ipr_cmd:	ipr command struct
6773 *
6774 * Description: This function issues an adapter shutdown of the
6775 * specified type to the specified adapter as part of the
6776 * adapter reset job.
6777 *
6778 * Return value:
6779 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6780 **/
6781static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6782{
6783	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6784	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6785	unsigned long timeout;
6786	int rc = IPR_RC_JOB_CONTINUE;
6787
6788	ENTER;
6789	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6790		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6791		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6792		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6793		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6794
6795		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
6796			timeout = IPR_SHUTDOWN_TIMEOUT;
6797		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6798			timeout = IPR_INTERNAL_TIMEOUT;
6799		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6800			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
6801		else
6802			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6803
6804		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6805
6806		rc = IPR_RC_JOB_RETURN;
6807		ipr_cmd->job_step = ipr_reset_ucode_download;
6808	} else
6809		ipr_cmd->job_step = ipr_reset_alert;
6810
6811	LEAVE;
6812	return rc;
6813}
6814
6815/**
6816 * ipr_reset_ioa_job - Adapter reset job
6817 * @ipr_cmd:	ipr command struct
6818 *
6819 * Description: This function is the job router for the adapter reset job.
6820 *
6821 * Return value:
6822 * 	none
6823 **/
6824static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6825{
6826	u32 rc, ioasc;
6827	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6828
6829	do {
6830		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6831
6832		if (ioa_cfg->reset_cmd != ipr_cmd) {
6833			/*
6834			 * We are doing nested adapter resets and this is
6835			 * not the current reset job.
6836			 */
6837			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6838			return;
6839		}
6840
6841		if (IPR_IOASC_SENSE_KEY(ioasc)) {
6842			rc = ipr_cmd->job_step_failed(ipr_cmd);
6843			if (rc == IPR_RC_JOB_RETURN)
6844				return;
6845		}
6846
6847		ipr_reinit_ipr_cmnd(ipr_cmd);
6848		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6849		rc = ipr_cmd->job_step(ipr_cmd);
6850	} while(rc == IPR_RC_JOB_CONTINUE);
6851}
6852
6853/**
6854 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6855 * @ioa_cfg:		ioa config struct
6856 * @job_step:		first job step of reset job
6857 * @shutdown_type:	shutdown type
6858 *
6859 * Description: This function will initiate the reset of the given adapter
6860 * starting at the selected job step.
6861 * If the caller needs to wait on the completion of the reset,
6862 * the caller must sleep on the reset_wait_q.
6863 *
6864 * Return value:
6865 * 	none
6866 **/
6867static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6868				    int (*job_step) (struct ipr_cmnd *),
6869				    enum ipr_shutdown_type shutdown_type)
6870{
6871	struct ipr_cmnd *ipr_cmd;
6872
6873	ioa_cfg->in_reset_reload = 1;
6874	ioa_cfg->allow_cmds = 0;
6875	scsi_block_requests(ioa_cfg->host);
6876
6877	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6878	ioa_cfg->reset_cmd = ipr_cmd;
6879	ipr_cmd->job_step = job_step;
6880	ipr_cmd->u.shutdown_type = shutdown_type;
6881
6882	ipr_reset_ioa_job(ipr_cmd);
6883}
6884
6885/**
6886 * ipr_initiate_ioa_reset - Initiate an adapter reset
6887 * @ioa_cfg:		ioa config struct
6888 * @shutdown_type:	shutdown type
6889 *
6890 * Description: This function will initiate the reset of the given adapter.
6891 * If the caller needs to wait on the completion of the reset,
6892 * the caller must sleep on the reset_wait_q.
6893 *
6894 * Return value:
6895 * 	none
6896 **/
6897static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6898				   enum ipr_shutdown_type shutdown_type)
6899{
6900	if (ioa_cfg->ioa_is_dead)
6901		return;
6902
6903	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6904		ioa_cfg->sdt_state = ABORT_DUMP;
6905
6906	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6907		dev_err(&ioa_cfg->pdev->dev,
6908			"IOA taken offline - error recovery failed\n");
6909
6910		ioa_cfg->reset_retries = 0;
6911		ioa_cfg->ioa_is_dead = 1;
6912
6913		if (ioa_cfg->in_ioa_bringdown) {
6914			ioa_cfg->reset_cmd = NULL;
6915			ioa_cfg->in_reset_reload = 0;
6916			ipr_fail_all_ops(ioa_cfg);
6917			wake_up_all(&ioa_cfg->reset_wait_q);
6918
6919			spin_unlock_irq(ioa_cfg->host->host_lock);
6920			scsi_unblock_requests(ioa_cfg->host);
6921			spin_lock_irq(ioa_cfg->host->host_lock);
6922			return;
6923		} else {
6924			ioa_cfg->in_ioa_bringdown = 1;
6925			shutdown_type = IPR_SHUTDOWN_NONE;
6926		}
6927	}
6928
6929	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6930				shutdown_type);
6931}
6932
6933/**
6934 * ipr_reset_freeze - Hold off all I/O activity
6935 * @ipr_cmd:	ipr command struct
6936 *
6937 * Description: If the PCI slot is frozen, hold off all I/O
6938 * activity; then, as soon as the slot is available again,
6939 * initiate an adapter reset.
6940 */
6941static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6942{
6943	/* Disallow new interrupts, avoid loop */
6944	ipr_cmd->ioa_cfg->allow_interrupts = 0;
6945	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6946	ipr_cmd->done = ipr_reset_ioa_job;
6947	return IPR_RC_JOB_RETURN;
6948}
6949
6950/**
6951 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6952 * @pdev:	PCI device struct
6953 *
6954 * Description: This routine is called to tell us that the PCI bus
6955 * is down. Can't do anything here, except put the device driver
6956 * into a holding pattern, waiting for the PCI bus to come back.
6957 */
6958static void ipr_pci_frozen(struct pci_dev *pdev)
6959{
6960	unsigned long flags = 0;
6961	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6962
6963	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6964	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6965	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6966}
6967
6968/**
6969 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6970 * @pdev:	PCI device struct
6971 *
6972 * Description: This routine is called by the pci error recovery
6973 * code after the PCI slot has been reset, just before we
6974 * should resume normal operations.
6975 */
6976static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6977{
6978	unsigned long flags = 0;
6979	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6980
6981	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6982	if (ioa_cfg->needs_warm_reset)
6983		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6984	else
6985		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6986					IPR_SHUTDOWN_NONE);
6987	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6988	return PCI_ERS_RESULT_RECOVERED;
6989}
6990
6991/**
6992 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6993 * @pdev:	PCI device struct
6994 *
6995 * Description: This routine is called when the PCI bus has
6996 * permanently failed.
6997 */
6998static void ipr_pci_perm_failure(struct pci_dev *pdev)
6999{
7000	unsigned long flags = 0;
7001	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7002
7003	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7004	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7005		ioa_cfg->sdt_state = ABORT_DUMP;
7006	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7007	ioa_cfg->in_ioa_bringdown = 1;
7008	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7009	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7010}
7011
7012/**
7013 * ipr_pci_error_detected - Called when a PCI error is detected.
7014 * @pdev:	PCI device struct
7015 * @state:	PCI channel state
7016 *
7017 * Description: Called when a PCI error is detected.
7018 *
7019 * Return value:
7020 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7021 */
7022static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7023					       pci_channel_state_t state)
7024{
7025	switch (state) {
7026	case pci_channel_io_frozen:
7027		ipr_pci_frozen(pdev);
7028		return PCI_ERS_RESULT_NEED_RESET;
7029	case pci_channel_io_perm_failure:
7030		ipr_pci_perm_failure(pdev);
7031		return PCI_ERS_RESULT_DISCONNECT;
7032		break;
7033	default:
7034		break;
7035	}
7036	return PCI_ERS_RESULT_NEED_RESET;
7037}
7038
7039/**
7040 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7041 * @ioa_cfg:	ioa cfg struct
7042 *
7043 * Description: This is the second phase of adapter intialization
7044 * This function takes care of initilizing the adapter to the point
7045 * where it can accept new commands.
7046
7047 * Return value:
7048 * 	0 on sucess / -EIO on failure
7049 **/
7050static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7051{
7052	int rc = 0;
7053	unsigned long host_lock_flags = 0;
7054
7055	ENTER;
7056	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7057	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7058	if (ioa_cfg->needs_hard_reset) {
7059		ioa_cfg->needs_hard_reset = 0;
7060		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7061	} else
7062		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7063					IPR_SHUTDOWN_NONE);
7064
7065	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7066	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7067	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7068
7069	if (ioa_cfg->ioa_is_dead) {
7070		rc = -EIO;
7071	} else if (ipr_invalid_adapter(ioa_cfg)) {
7072		if (!ipr_testmode)
7073			rc = -EIO;
7074
7075		dev_err(&ioa_cfg->pdev->dev,
7076			"Adapter not supported in this hardware configuration.\n");
7077	}
7078
7079	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7080
7081	LEAVE;
7082	return rc;
7083}
7084
7085/**
7086 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7087 * @ioa_cfg:	ioa config struct
7088 *
7089 * Return value:
7090 * 	none
7091 **/
7092static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7093{
7094	int i;
7095
7096	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7097		if (ioa_cfg->ipr_cmnd_list[i])
7098			pci_pool_free(ioa_cfg->ipr_cmd_pool,
7099				      ioa_cfg->ipr_cmnd_list[i],
7100				      ioa_cfg->ipr_cmnd_list_dma[i]);
7101
7102		ioa_cfg->ipr_cmnd_list[i] = NULL;
7103	}
7104
7105	if (ioa_cfg->ipr_cmd_pool)
7106		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7107
7108	ioa_cfg->ipr_cmd_pool = NULL;
7109}
7110
7111/**
7112 * ipr_free_mem - Frees memory allocated for an adapter
7113 * @ioa_cfg:	ioa cfg struct
7114 *
7115 * Return value:
7116 * 	nothing
7117 **/
7118static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7119{
7120	int i;
7121
7122	kfree(ioa_cfg->res_entries);
7123	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7124			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7125	ipr_free_cmd_blks(ioa_cfg);
7126	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7127			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7128	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7129			    ioa_cfg->cfg_table,
7130			    ioa_cfg->cfg_table_dma);
7131
7132	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7133		pci_free_consistent(ioa_cfg->pdev,
7134				    sizeof(struct ipr_hostrcb),
7135				    ioa_cfg->hostrcb[i],
7136				    ioa_cfg->hostrcb_dma[i]);
7137	}
7138
7139	ipr_free_dump(ioa_cfg);
7140	kfree(ioa_cfg->trace);
7141}
7142
7143/**
7144 * ipr_free_all_resources - Free all allocated resources for an adapter.
7145 * @ipr_cmd:	ipr command struct
7146 *
7147 * This function frees all allocated resources for the
7148 * specified adapter.
7149 *
7150 * Return value:
7151 * 	none
7152 **/
7153static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7154{
7155	struct pci_dev *pdev = ioa_cfg->pdev;
7156
7157	ENTER;
7158	free_irq(pdev->irq, ioa_cfg);
7159	iounmap(ioa_cfg->hdw_dma_regs);
7160	pci_release_regions(pdev);
7161	ipr_free_mem(ioa_cfg);
7162	scsi_host_put(ioa_cfg->host);
7163	pci_disable_device(pdev);
7164	LEAVE;
7165}
7166
7167/**
7168 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7169 * @ioa_cfg:	ioa config struct
7170 *
7171 * Return value:
7172 * 	0 on success / -ENOMEM on allocation failure
7173 **/
7174static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7175{
7176	struct ipr_cmnd *ipr_cmd;
7177	struct ipr_ioarcb *ioarcb;
7178	dma_addr_t dma_addr;
7179	int i;
7180
7181	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7182						 sizeof(struct ipr_cmnd), 8, 0);
7183
7184	if (!ioa_cfg->ipr_cmd_pool)
7185		return -ENOMEM;
7186
7187	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7188		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7189
7190		if (!ipr_cmd) {
7191			ipr_free_cmd_blks(ioa_cfg);
7192			return -ENOMEM;
7193		}
7194
7195		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7196		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7197		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7198
7199		ioarcb = &ipr_cmd->ioarcb;
7200		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7201		ioarcb->host_response_handle = cpu_to_be32(i << 2);
7202		ioarcb->write_ioadl_addr =
7203			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7204		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7205		ioarcb->ioasa_host_pci_addr =
7206			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7207		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7208		ipr_cmd->cmd_index = i;
7209		ipr_cmd->ioa_cfg = ioa_cfg;
7210		ipr_cmd->sense_buffer_dma = dma_addr +
7211			offsetof(struct ipr_cmnd, sense_buffer);
7212
7213		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7214	}
7215
7216	return 0;
7217}
7218
7219/**
7220 * ipr_alloc_mem - Allocate memory for an adapter
7221 * @ioa_cfg:	ioa config struct
7222 *
7223 * Return value:
7224 * 	0 on success / non-zero for error
7225 **/
7226static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7227{
7228	struct pci_dev *pdev = ioa_cfg->pdev;
7229	int i, rc = -ENOMEM;
7230
7231	ENTER;
7232	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7233				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7234
7235	if (!ioa_cfg->res_entries)
7236		goto out;
7237
7238	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7239		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7240
7241	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7242						sizeof(struct ipr_misc_cbs),
7243						&ioa_cfg->vpd_cbs_dma);
7244
7245	if (!ioa_cfg->vpd_cbs)
7246		goto out_free_res_entries;
7247
7248	if (ipr_alloc_cmd_blks(ioa_cfg))
7249		goto out_free_vpd_cbs;
7250
7251	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7252						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7253						 &ioa_cfg->host_rrq_dma);
7254
7255	if (!ioa_cfg->host_rrq)
7256		goto out_ipr_free_cmd_blocks;
7257
7258	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7259						  sizeof(struct ipr_config_table),
7260						  &ioa_cfg->cfg_table_dma);
7261
7262	if (!ioa_cfg->cfg_table)
7263		goto out_free_host_rrq;
7264
7265	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7266		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7267							   sizeof(struct ipr_hostrcb),
7268							   &ioa_cfg->hostrcb_dma[i]);
7269
7270		if (!ioa_cfg->hostrcb[i])
7271			goto out_free_hostrcb_dma;
7272
7273		ioa_cfg->hostrcb[i]->hostrcb_dma =
7274			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7275		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7276		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7277	}
7278
7279	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7280				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7281
7282	if (!ioa_cfg->trace)
7283		goto out_free_hostrcb_dma;
7284
7285	rc = 0;
7286out:
7287	LEAVE;
7288	return rc;
7289
7290out_free_hostrcb_dma:
7291	while (i-- > 0) {
7292		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7293				    ioa_cfg->hostrcb[i],
7294				    ioa_cfg->hostrcb_dma[i]);
7295	}
7296	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7297			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7298out_free_host_rrq:
7299	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7300			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7301out_ipr_free_cmd_blocks:
7302	ipr_free_cmd_blks(ioa_cfg);
7303out_free_vpd_cbs:
7304	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7305			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7306out_free_res_entries:
7307	kfree(ioa_cfg->res_entries);
7308	goto out;
7309}
7310
7311/**
7312 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7313 * @ioa_cfg:	ioa config struct
7314 *
7315 * Return value:
7316 * 	none
7317 **/
7318static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7319{
7320	int i;
7321
7322	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7323		ioa_cfg->bus_attr[i].bus = i;
7324		ioa_cfg->bus_attr[i].qas_enabled = 0;
7325		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7326		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7327			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7328		else
7329			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7330	}
7331}
7332
7333/**
7334 * ipr_init_ioa_cfg - Initialize IOA config struct
7335 * @ioa_cfg:	ioa config struct
7336 * @host:		scsi host struct
7337 * @pdev:		PCI dev struct
7338 *
7339 * Return value:
7340 * 	none
7341 **/
7342static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7343				       struct Scsi_Host *host, struct pci_dev *pdev)
7344{
7345	const struct ipr_interrupt_offsets *p;
7346	struct ipr_interrupts *t;
7347	void __iomem *base;
7348
7349	ioa_cfg->host = host;
7350	ioa_cfg->pdev = pdev;
7351	ioa_cfg->log_level = ipr_log_level;
7352	ioa_cfg->doorbell = IPR_DOORBELL;
7353	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7354	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7355	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7356	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7357	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7358	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7359	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7360	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7361
7362	INIT_LIST_HEAD(&ioa_cfg->free_q);
7363	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7364	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7365	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7366	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7367	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7368	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7369	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7370	ioa_cfg->sdt_state = INACTIVE;
7371	if (ipr_enable_cache)
7372		ioa_cfg->cache_state = CACHE_ENABLED;
7373	else
7374		ioa_cfg->cache_state = CACHE_DISABLED;
7375
7376	ipr_initialize_bus_attr(ioa_cfg);
7377
7378	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7379	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7380	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7381	host->unique_id = host->host_no;
7382	host->max_cmd_len = IPR_MAX_CDB_LEN;
7383	pci_set_drvdata(pdev, ioa_cfg);
7384
7385	p = &ioa_cfg->chip_cfg->regs;
7386	t = &ioa_cfg->regs;
7387	base = ioa_cfg->hdw_dma_regs;
7388
7389	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7390	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7391	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7392	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7393	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7394	t->ioarrin_reg = base + p->ioarrin_reg;
7395	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7396	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7397	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7398}
7399
7400/**
7401 * ipr_get_chip_cfg - Find adapter chip configuration
7402 * @dev_id:		PCI device id struct
7403 *
7404 * Return value:
7405 * 	ptr to chip config on success / NULL on failure
7406 **/
7407static const struct ipr_chip_cfg_t * __devinit
7408ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7409{
7410	int i;
7411
7412	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7413		if (ipr_chip[i].vendor == dev_id->vendor &&
7414		    ipr_chip[i].device == dev_id->device)
7415			return ipr_chip[i].cfg;
7416	return NULL;
7417}
7418
7419/**
7420 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7421 * @pdev:		PCI device struct
7422 * @dev_id:		PCI device id struct
7423 *
7424 * Return value:
7425 * 	0 on success / non-zero on failure
7426 **/
7427static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7428				   const struct pci_device_id *dev_id)
7429{
7430	struct ipr_ioa_cfg *ioa_cfg;
7431	struct Scsi_Host *host;
7432	unsigned long ipr_regs_pci;
7433	void __iomem *ipr_regs;
7434	int rc = PCIBIOS_SUCCESSFUL;
7435	volatile u32 mask, uproc, interrupts;
7436
7437	ENTER;
7438
7439	if ((rc = pci_enable_device(pdev))) {
7440		dev_err(&pdev->dev, "Cannot enable adapter\n");
7441		goto out;
7442	}
7443
7444	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7445
7446	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7447
7448	if (!host) {
7449		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7450		rc = -ENOMEM;
7451		goto out_disable;
7452	}
7453
7454	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7455	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7456	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7457		      sata_port_info.flags, &ipr_sata_ops);
7458
7459	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7460
7461	if (!ioa_cfg->chip_cfg) {
7462		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7463			dev_id->vendor, dev_id->device);
7464		goto out_scsi_host_put;
7465	}
7466
7467	if (ipr_transop_timeout)
7468		ioa_cfg->transop_timeout = ipr_transop_timeout;
7469	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7470		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7471	else
7472		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7473
7474	rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &ioa_cfg->revid);
7475
7476	if (rc != PCIBIOS_SUCCESSFUL) {
7477		dev_err(&pdev->dev, "Failed to read PCI revision ID\n");
7478		rc = -EIO;
7479		goto out_scsi_host_put;
7480	}
7481
7482	ipr_regs_pci = pci_resource_start(pdev, 0);
7483
7484	rc = pci_request_regions(pdev, IPR_NAME);
7485	if (rc < 0) {
7486		dev_err(&pdev->dev,
7487			"Couldn't register memory range of registers\n");
7488		goto out_scsi_host_put;
7489	}
7490
7491	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7492
7493	if (!ipr_regs) {
7494		dev_err(&pdev->dev,
7495			"Couldn't map memory range of registers\n");
7496		rc = -ENOMEM;
7497		goto out_release_regions;
7498	}
7499
7500	ioa_cfg->hdw_dma_regs = ipr_regs;
7501	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7502	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7503
7504	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7505
7506	pci_set_master(pdev);
7507
7508	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7509	if (rc < 0) {
7510		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7511		goto cleanup_nomem;
7512	}
7513
7514	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7515				   ioa_cfg->chip_cfg->cache_line_size);
7516
7517	if (rc != PCIBIOS_SUCCESSFUL) {
7518		dev_err(&pdev->dev, "Write of cache line size failed\n");
7519		rc = -EIO;
7520		goto cleanup_nomem;
7521	}
7522
7523	/* Save away PCI config space for use following IOA reset */
7524	rc = pci_save_state(pdev);
7525
7526	if (rc != PCIBIOS_SUCCESSFUL) {
7527		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7528		rc = -EIO;
7529		goto cleanup_nomem;
7530	}
7531
7532	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7533		goto cleanup_nomem;
7534
7535	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7536		goto cleanup_nomem;
7537
7538	rc = ipr_alloc_mem(ioa_cfg);
7539	if (rc < 0) {
7540		dev_err(&pdev->dev,
7541			"Couldn't allocate enough memory for device driver!\n");
7542		goto cleanup_nomem;
7543	}
7544
7545	/*
7546	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7547	 * the card is in an unknown state and needs a hard reset
7548	 */
7549	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7550	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
7551	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7552	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7553		ioa_cfg->needs_hard_reset = 1;
7554	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7555		ioa_cfg->needs_hard_reset = 1;
7556	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7557		ioa_cfg->ioa_unit_checked = 1;
7558
7559	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7560	rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7561
7562	if (rc) {
7563		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7564			pdev->irq, rc);
7565		goto cleanup_nolog;
7566	}
7567
7568	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7569	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7570		ioa_cfg->needs_warm_reset = 1;
7571		ioa_cfg->reset = ipr_reset_slot_reset;
7572	} else
7573		ioa_cfg->reset = ipr_reset_start_bist;
7574
7575	spin_lock(&ipr_driver_lock);
7576	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7577	spin_unlock(&ipr_driver_lock);
7578
7579	LEAVE;
7580out:
7581	return rc;
7582
7583cleanup_nolog:
7584	ipr_free_mem(ioa_cfg);
7585cleanup_nomem:
7586	iounmap(ipr_regs);
7587out_release_regions:
7588	pci_release_regions(pdev);
7589out_scsi_host_put:
7590	scsi_host_put(host);
7591out_disable:
7592	pci_disable_device(pdev);
7593	goto out;
7594}
7595
7596/**
7597 * ipr_scan_vsets - Scans for VSET devices
7598 * @ioa_cfg:	ioa config struct
7599 *
7600 * Description: Since the VSET resources do not follow SAM in that we can have
7601 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7602 *
7603 * Return value:
7604 * 	none
7605 **/
7606static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7607{
7608	int target, lun;
7609
7610	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7611		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7612			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7613}
7614
7615/**
7616 * ipr_initiate_ioa_bringdown - Bring down an adapter
7617 * @ioa_cfg:		ioa config struct
7618 * @shutdown_type:	shutdown type
7619 *
7620 * Description: This function will initiate bringing down the adapter.
7621 * This consists of issuing an IOA shutdown to the adapter
7622 * to flush the cache, and running BIST.
7623 * If the caller needs to wait on the completion of the reset,
7624 * the caller must sleep on the reset_wait_q.
7625 *
7626 * Return value:
7627 * 	none
7628 **/
7629static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7630				       enum ipr_shutdown_type shutdown_type)
7631{
7632	ENTER;
7633	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7634		ioa_cfg->sdt_state = ABORT_DUMP;
7635	ioa_cfg->reset_retries = 0;
7636	ioa_cfg->in_ioa_bringdown = 1;
7637	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7638	LEAVE;
7639}
7640
7641/**
7642 * __ipr_remove - Remove a single adapter
7643 * @pdev:	pci device struct
7644 *
7645 * Adapter hot plug remove entry point.
7646 *
7647 * Return value:
7648 * 	none
7649 **/
7650static void __ipr_remove(struct pci_dev *pdev)
7651{
7652	unsigned long host_lock_flags = 0;
7653	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7654	ENTER;
7655
7656	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7657	while(ioa_cfg->in_reset_reload) {
7658		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7659		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7660		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7661	}
7662
7663	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7664
7665	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7666	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7667	flush_scheduled_work();
7668	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7669
7670	spin_lock(&ipr_driver_lock);
7671	list_del(&ioa_cfg->queue);
7672	spin_unlock(&ipr_driver_lock);
7673
7674	if (ioa_cfg->sdt_state == ABORT_DUMP)
7675		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7676	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7677
7678	ipr_free_all_resources(ioa_cfg);
7679
7680	LEAVE;
7681}
7682
7683/**
7684 * ipr_remove - IOA hot plug remove entry point
7685 * @pdev:	pci device struct
7686 *
7687 * Adapter hot plug remove entry point.
7688 *
7689 * Return value:
7690 * 	none
7691 **/
7692static void ipr_remove(struct pci_dev *pdev)
7693{
7694	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7695
7696	ENTER;
7697
7698	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7699			      &ipr_trace_attr);
7700	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7701			     &ipr_dump_attr);
7702	scsi_remove_host(ioa_cfg->host);
7703
7704	__ipr_remove(pdev);
7705
7706	LEAVE;
7707}
7708
7709/**
7710 * ipr_probe - Adapter hot plug add entry point
7711 *
7712 * Return value:
7713 * 	0 on success / non-zero on failure
7714 **/
7715static int __devinit ipr_probe(struct pci_dev *pdev,
7716			       const struct pci_device_id *dev_id)
7717{
7718	struct ipr_ioa_cfg *ioa_cfg;
7719	int rc;
7720
7721	rc = ipr_probe_ioa(pdev, dev_id);
7722
7723	if (rc)
7724		return rc;
7725
7726	ioa_cfg = pci_get_drvdata(pdev);
7727	rc = ipr_probe_ioa_part2(ioa_cfg);
7728
7729	if (rc) {
7730		__ipr_remove(pdev);
7731		return rc;
7732	}
7733
7734	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7735
7736	if (rc) {
7737		__ipr_remove(pdev);
7738		return rc;
7739	}
7740
7741	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7742				   &ipr_trace_attr);
7743
7744	if (rc) {
7745		scsi_remove_host(ioa_cfg->host);
7746		__ipr_remove(pdev);
7747		return rc;
7748	}
7749
7750	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7751				   &ipr_dump_attr);
7752
7753	if (rc) {
7754		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7755				      &ipr_trace_attr);
7756		scsi_remove_host(ioa_cfg->host);
7757		__ipr_remove(pdev);
7758		return rc;
7759	}
7760
7761	scsi_scan_host(ioa_cfg->host);
7762	ipr_scan_vsets(ioa_cfg);
7763	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7764	ioa_cfg->allow_ml_add_del = 1;
7765	ioa_cfg->host->max_channel = IPR_VSET_BUS;
7766	schedule_work(&ioa_cfg->work_q);
7767	return 0;
7768}
7769
7770/**
7771 * ipr_shutdown - Shutdown handler.
7772 * @pdev:	pci device struct
7773 *
7774 * This function is invoked upon system shutdown/reboot. It will issue
7775 * an adapter shutdown to the adapter to flush the write cache.
7776 *
7777 * Return value:
7778 * 	none
7779 **/
7780static void ipr_shutdown(struct pci_dev *pdev)
7781{
7782	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7783	unsigned long lock_flags = 0;
7784
7785	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7786	while(ioa_cfg->in_reset_reload) {
7787		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7788		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7789		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7790	}
7791
7792	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7793	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7794	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7795}
7796
7797static struct pci_device_id ipr_pci_table[] __devinitdata = {
7798	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7799		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7800	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7801		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7802	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7803		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7804	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7805		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7806	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7807		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7808	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7809		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7810	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7811		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7812	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7813		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7814		IPR_USE_LONG_TRANSOP_TIMEOUT },
7815	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7816	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7817	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7818	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7819	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7820	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7821	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7822	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7823	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7824	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7825	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7826	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7827	      IPR_USE_LONG_TRANSOP_TIMEOUT},
7828	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7829	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7830	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7831	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7832	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7833	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7834	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7835	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7836	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7837	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7838	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7839	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7840	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7841	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
7842	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7843		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7844	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7845		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7846	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7847		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7848		IPR_USE_LONG_TRANSOP_TIMEOUT },
7849	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7850		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7851		IPR_USE_LONG_TRANSOP_TIMEOUT },
7852	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7853		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7854		IPR_USE_LONG_TRANSOP_TIMEOUT },
7855	{ }
7856};
7857MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7858
7859static struct pci_error_handlers ipr_err_handler = {
7860	.error_detected = ipr_pci_error_detected,
7861	.slot_reset = ipr_pci_slot_reset,
7862};
7863
7864static struct pci_driver ipr_driver = {
7865	.name = IPR_NAME,
7866	.id_table = ipr_pci_table,
7867	.probe = ipr_probe,
7868	.remove = ipr_remove,
7869	.shutdown = ipr_shutdown,
7870	.err_handler = &ipr_err_handler,
7871	.dynids.use_driver_data = 1
7872};
7873
7874/**
7875 * ipr_init - Module entry point
7876 *
7877 * Return value:
7878 * 	0 on success / negative value on failure
7879 **/
7880static int __init ipr_init(void)
7881{
7882	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7883		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7884
7885	return pci_register_driver(&ipr_driver);
7886}
7887
7888/**
7889 * ipr_exit - Module unload
7890 *
7891 * Module unload entry point.
7892 *
7893 * Return value:
7894 * 	none
7895 **/
7896static void __exit ipr_exit(void)
7897{
7898	pci_unregister_driver(&ipr_driver);
7899}
7900
7901module_init(ipr_init);
7902module_exit(ipr_exit);
7903