ipr.c revision 94be9a58d7e683ac3c1df1858a17f09ebade8da0
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/libata.h>
74#include <linux/hdreg.h>
75#include <asm/io.h>
76#include <asm/irq.h>
77#include <asm/processor.h>
78#include <scsi/scsi.h>
79#include <scsi/scsi_host.h>
80#include <scsi/scsi_tcq.h>
81#include <scsi/scsi_eh.h>
82#include <scsi/scsi_cmnd.h>
83#include "ipr.h"
84
85/*
86 *   Global Data
87 */
88static LIST_HEAD(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = 0;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0;
96static unsigned int ipr_dual_ioa_raid = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock);
98
99/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
102		.mailbox = 0x0042C,
103		.cache_line_size = 0x20,
104		{
105			.set_interrupt_mask_reg = 0x0022C,
106			.clr_interrupt_mask_reg = 0x00230,
107			.sense_interrupt_mask_reg = 0x0022C,
108			.clr_interrupt_reg = 0x00228,
109			.sense_interrupt_reg = 0x00224,
110			.ioarrin_reg = 0x00404,
111			.sense_uproc_interrupt_reg = 0x00214,
112			.set_uproc_interrupt_reg = 0x00214,
113			.clr_uproc_interrupt_reg = 0x00218
114		}
115	},
116	{ /* Snipe and Scamp */
117		.mailbox = 0x0052C,
118		.cache_line_size = 0x20,
119		{
120			.set_interrupt_mask_reg = 0x00288,
121			.clr_interrupt_mask_reg = 0x0028C,
122			.sense_interrupt_mask_reg = 0x00288,
123			.clr_interrupt_reg = 0x00284,
124			.sense_interrupt_reg = 0x00280,
125			.ioarrin_reg = 0x00504,
126			.sense_uproc_interrupt_reg = 0x00290,
127			.set_uproc_interrupt_reg = 0x00290,
128			.clr_uproc_interrupt_reg = 0x00294
129		}
130	},
131};
132
133static const struct ipr_chip_t ipr_chip[] = {
134	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
138	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
139	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
140	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
141};
142
143static int ipr_max_bus_speeds [] = {
144	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145};
146
147MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
148MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
149module_param_named(max_speed, ipr_max_speed, uint, 0);
150MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
151module_param_named(log_level, ipr_log_level, uint, 0);
152MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
153module_param_named(testmode, ipr_testmode, int, 0);
154MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
155module_param_named(fastfail, ipr_fastfail, int, 0);
156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
157module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
159module_param_named(enable_cache, ipr_enable_cache, int, 0);
160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
161module_param_named(debug, ipr_debug, int, 0);
162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
165MODULE_LICENSE("GPL");
166MODULE_VERSION(IPR_DRIVER_VERSION);
167
168/*  A constant array of IOASCs/URCs/Error Messages */
169static const
170struct ipr_error_table_t ipr_error_table[] = {
171	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
172	"8155: An unknown error was received"},
173	{0x00330000, 0, 0,
174	"Soft underlength error"},
175	{0x005A0000, 0, 0,
176	"Command to be cancelled not found"},
177	{0x00808000, 0, 0,
178	"Qualified success"},
179	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
180	"FFFE: Soft device bus error recovered by the IOA"},
181	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
182	"4101: Soft device bus fabric error"},
183	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
184	"FFF9: Device sector reassign successful"},
185	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
186	"FFF7: Media error recovered by device rewrite procedures"},
187	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
188	"7001: IOA sector reassignment successful"},
189	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
190	"FFF9: Soft media error. Sector reassignment recommended"},
191	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
192	"FFF7: Media error recovered by IOA rewrite procedures"},
193	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
194	"FF3D: Soft PCI bus error recovered by the IOA"},
195	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
196	"FFF6: Device hardware error recovered by the IOA"},
197	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
198	"FFF6: Device hardware error recovered by the device"},
199	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
200	"FF3D: Soft IOA error recovered by the IOA"},
201	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
202	"FFFA: Undefined device response recovered by the IOA"},
203	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
204	"FFF6: Device bus error, message or command phase"},
205	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
206	"FFFE: Task Management Function failed"},
207	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
208	"FFF6: Failure prediction threshold exceeded"},
209	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
210	"8009: Impending cache battery pack failure"},
211	{0x02040400, 0, 0,
212	"34FF: Disk device format in progress"},
213	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
214	"9070: IOA requested reset"},
215	{0x023F0000, 0, 0,
216	"Synchronization required"},
217	{0x024E0000, 0, 0,
218	"No ready, IOA shutdown"},
219	{0x025A0000, 0, 0,
220	"Not ready, IOA has been shutdown"},
221	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
222	"3020: Storage subsystem configuration error"},
223	{0x03110B00, 0, 0,
224	"FFF5: Medium error, data unreadable, recommend reassign"},
225	{0x03110C00, 0, 0,
226	"7000: Medium error, data unreadable, do not reassign"},
227	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
228	"FFF3: Disk media format bad"},
229	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
230	"3002: Addressed device failed to respond to selection"},
231	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
232	"3100: Device bus error"},
233	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
234	"3109: IOA timed out a device command"},
235	{0x04088000, 0, 0,
236	"3120: SCSI bus is not operational"},
237	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
238	"4100: Hard device bus fabric error"},
239	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
240	"9000: IOA reserved area data check"},
241	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
242	"9001: IOA reserved area invalid data pattern"},
243	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
244	"9002: IOA reserved area LRC error"},
245	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
246	"102E: Out of alternate sectors for disk storage"},
247	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
248	"FFF4: Data transfer underlength error"},
249	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
250	"FFF4: Data transfer overlength error"},
251	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
252	"3400: Logical unit failure"},
253	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
254	"FFF4: Device microcode is corrupt"},
255	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
256	"8150: PCI bus error"},
257	{0x04430000, 1, 0,
258	"Unsupported device bus message received"},
259	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
260	"FFF4: Disk device problem"},
261	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
262	"8150: Permanent IOA failure"},
263	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
264	"3010: Disk device returned wrong response to IOA"},
265	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
266	"8151: IOA microcode error"},
267	{0x04448500, 0, 0,
268	"Device bus status error"},
269	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
270	"8157: IOA error requiring IOA reset to recover"},
271	{0x04448700, 0, 0,
272	"ATA device status error"},
273	{0x04490000, 0, 0,
274	"Message reject received from the device"},
275	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
276	"8008: A permanent cache battery pack failure occurred"},
277	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
278	"9090: Disk unit has been modified after the last known status"},
279	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
280	"9081: IOA detected device error"},
281	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
282	"9082: IOA detected device error"},
283	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
284	"3110: Device bus error, message or command phase"},
285	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
286	"3110: SAS Command / Task Management Function failed"},
287	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
288	"9091: Incorrect hardware configuration change has been detected"},
289	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
290	"9073: Invalid multi-adapter configuration"},
291	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
292	"4010: Incorrect connection between cascaded expanders"},
293	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
294	"4020: Connections exceed IOA design limits"},
295	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
296	"4030: Incorrect multipath connection"},
297	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
298	"4110: Unsupported enclosure function"},
299	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
300	"FFF4: Command to logical unit failed"},
301	{0x05240000, 1, 0,
302	"Illegal request, invalid request type or request packet"},
303	{0x05250000, 0, 0,
304	"Illegal request, invalid resource handle"},
305	{0x05258000, 0, 0,
306	"Illegal request, commands not allowed to this device"},
307	{0x05258100, 0, 0,
308	"Illegal request, command not allowed to a secondary adapter"},
309	{0x05260000, 0, 0,
310	"Illegal request, invalid field in parameter list"},
311	{0x05260100, 0, 0,
312	"Illegal request, parameter not supported"},
313	{0x05260200, 0, 0,
314	"Illegal request, parameter value invalid"},
315	{0x052C0000, 0, 0,
316	"Illegal request, command sequence error"},
317	{0x052C8000, 1, 0,
318	"Illegal request, dual adapter support not enabled"},
319	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
320	"9031: Array protection temporarily suspended, protection resuming"},
321	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
322	"9040: Array protection temporarily suspended, protection resuming"},
323	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
324	"3140: Device bus not ready to ready transition"},
325	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
326	"FFFB: SCSI bus was reset"},
327	{0x06290500, 0, 0,
328	"FFFE: SCSI bus transition to single ended"},
329	{0x06290600, 0, 0,
330	"FFFE: SCSI bus transition to LVD"},
331	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
332	"FFFB: SCSI bus was reset by another initiator"},
333	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
334	"3029: A device replacement has occurred"},
335	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
336	"9051: IOA cache data exists for a missing or failed device"},
337	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
338	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
339	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
340	"9025: Disk unit is not supported at its physical location"},
341	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
342	"3020: IOA detected a SCSI bus configuration error"},
343	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
344	"3150: SCSI bus configuration error"},
345	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
346	"9074: Asymmetric advanced function disk configuration"},
347	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
348	"4040: Incomplete multipath connection between IOA and enclosure"},
349	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
350	"4041: Incomplete multipath connection between enclosure and device"},
351	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
352	"9075: Incomplete multipath connection between IOA and remote IOA"},
353	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
354	"9076: Configuration error, missing remote IOA"},
355	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
356	"4050: Enclosure does not support a required multipath function"},
357	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
358	"9041: Array protection temporarily suspended"},
359	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
360	"9042: Corrupt array parity detected on specified device"},
361	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
362	"9030: Array no longer protected due to missing or failed disk unit"},
363	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
364	"9071: Link operational transition"},
365	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
366	"9072: Link not operational transition"},
367	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
368	"9032: Array exposed but still protected"},
369	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
370	"70DD: Device forced failed by disrupt device command"},
371	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
372	"4061: Multipath redundancy level got better"},
373	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
374	"4060: Multipath redundancy level got worse"},
375	{0x07270000, 0, 0,
376	"Failure due to other device"},
377	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
378	"9008: IOA does not support functions expected by devices"},
379	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
380	"9010: Cache data associated with attached devices cannot be found"},
381	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
382	"9011: Cache data belongs to devices other than those attached"},
383	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
384	"9020: Array missing 2 or more devices with only 1 device present"},
385	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
386	"9021: Array missing 2 or more devices with 2 or more devices present"},
387	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
388	"9022: Exposed array is missing a required device"},
389	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
390	"9023: Array member(s) not at required physical locations"},
391	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
392	"9024: Array not functional due to present hardware configuration"},
393	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
394	"9026: Array not functional due to present hardware configuration"},
395	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
396	"9027: Array is missing a device and parity is out of sync"},
397	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
398	"9028: Maximum number of arrays already exist"},
399	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
400	"9050: Required cache data cannot be located for a disk unit"},
401	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
402	"9052: Cache data exists for a device that has been modified"},
403	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
404	"9054: IOA resources not available due to previous problems"},
405	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
406	"9092: Disk unit requires initialization before use"},
407	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
408	"9029: Incorrect hardware configuration change has been detected"},
409	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
410	"9060: One or more disk pairs are missing from an array"},
411	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
412	"9061: One or more disks are missing from an array"},
413	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
414	"9062: One or more disks are missing from an array"},
415	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
416	"9063: Maximum number of functional arrays has been exceeded"},
417	{0x0B260000, 0, 0,
418	"Aborted command, invalid descriptor"},
419	{0x0B5A0000, 0, 0,
420	"Command terminated by host"}
421};
422
423static const struct ipr_ses_table_entry ipr_ses_table[] = {
424	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
425	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
426	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
427	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
428	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
429	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
430	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
431	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
432	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
433	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
434	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
435	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
436	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
437};
438
439/*
440 *  Function Prototypes
441 */
442static int ipr_reset_alert(struct ipr_cmnd *);
443static void ipr_process_ccn(struct ipr_cmnd *);
444static void ipr_process_error(struct ipr_cmnd *);
445static void ipr_reset_ioa_job(struct ipr_cmnd *);
446static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
447				   enum ipr_shutdown_type);
448
449#ifdef CONFIG_SCSI_IPR_TRACE
450/**
451 * ipr_trc_hook - Add a trace entry to the driver trace
452 * @ipr_cmd:	ipr command struct
453 * @type:		trace type
454 * @add_data:	additional data
455 *
456 * Return value:
457 * 	none
458 **/
459static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
460			 u8 type, u32 add_data)
461{
462	struct ipr_trace_entry *trace_entry;
463	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
464
465	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
466	trace_entry->time = jiffies;
467	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
468	trace_entry->type = type;
469	trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
470	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
471	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
472	trace_entry->u.add_data = add_data;
473}
474#else
475#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
476#endif
477
478/**
479 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
480 * @ipr_cmd:	ipr command struct
481 *
482 * Return value:
483 * 	none
484 **/
485static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
486{
487	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
488	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
489	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
490
491	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
492	ioarcb->write_data_transfer_length = 0;
493	ioarcb->read_data_transfer_length = 0;
494	ioarcb->write_ioadl_len = 0;
495	ioarcb->read_ioadl_len = 0;
496	ioarcb->write_ioadl_addr =
497		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
498	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
499	ioasa->ioasc = 0;
500	ioasa->residual_data_len = 0;
501	ioasa->u.gata.status = 0;
502
503	ipr_cmd->scsi_cmd = NULL;
504	ipr_cmd->qc = NULL;
505	ipr_cmd->sense_buffer[0] = 0;
506	ipr_cmd->dma_use_sg = 0;
507}
508
509/**
510 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
511 * @ipr_cmd:	ipr command struct
512 *
513 * Return value:
514 * 	none
515 **/
516static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
517{
518	ipr_reinit_ipr_cmnd(ipr_cmd);
519	ipr_cmd->u.scratch = 0;
520	ipr_cmd->sibling = NULL;
521	init_timer(&ipr_cmd->timer);
522}
523
524/**
525 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
526 * @ioa_cfg:	ioa config struct
527 *
528 * Return value:
529 * 	pointer to ipr command struct
530 **/
531static
532struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
533{
534	struct ipr_cmnd *ipr_cmd;
535
536	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
537	list_del(&ipr_cmd->queue);
538	ipr_init_ipr_cmnd(ipr_cmd);
539
540	return ipr_cmd;
541}
542
543/**
544 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
545 * @ioa_cfg:	ioa config struct
546 * @clr_ints:     interrupts to clear
547 *
548 * This function masks all interrupts on the adapter, then clears the
549 * interrupts specified in the mask
550 *
551 * Return value:
552 * 	none
553 **/
554static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
555					  u32 clr_ints)
556{
557	volatile u32 int_reg;
558
559	/* Stop new interrupts */
560	ioa_cfg->allow_interrupts = 0;
561
562	/* Set interrupt mask to stop all new interrupts */
563	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
564
565	/* Clear any pending interrupts */
566	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
567	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
568}
569
570/**
571 * ipr_save_pcix_cmd_reg - Save PCI-X command register
572 * @ioa_cfg:	ioa config struct
573 *
574 * Return value:
575 * 	0 on success / -EIO on failure
576 **/
577static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
578{
579	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
580
581	if (pcix_cmd_reg == 0)
582		return 0;
583
584	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
585				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
586		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
587		return -EIO;
588	}
589
590	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
591	return 0;
592}
593
594/**
595 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
596 * @ioa_cfg:	ioa config struct
597 *
598 * Return value:
599 * 	0 on success / -EIO on failure
600 **/
601static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
602{
603	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
604
605	if (pcix_cmd_reg) {
606		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
607					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
608			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
609			return -EIO;
610		}
611	}
612
613	return 0;
614}
615
616/**
617 * ipr_sata_eh_done - done function for aborted SATA commands
618 * @ipr_cmd:	ipr command struct
619 *
620 * This function is invoked for ops generated to SATA
621 * devices which are being aborted.
622 *
623 * Return value:
624 * 	none
625 **/
626static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
627{
628	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
629	struct ata_queued_cmd *qc = ipr_cmd->qc;
630	struct ipr_sata_port *sata_port = qc->ap->private_data;
631
632	qc->err_mask |= AC_ERR_OTHER;
633	sata_port->ioasa.status |= ATA_BUSY;
634	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
635	ata_qc_complete(qc);
636}
637
638/**
639 * ipr_scsi_eh_done - mid-layer done function for aborted ops
640 * @ipr_cmd:	ipr command struct
641 *
642 * This function is invoked by the interrupt handler for
643 * ops generated by the SCSI mid-layer which are being aborted.
644 *
645 * Return value:
646 * 	none
647 **/
648static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
649{
650	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
651	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
652
653	scsi_cmd->result |= (DID_ERROR << 16);
654
655	scsi_dma_unmap(ipr_cmd->scsi_cmd);
656	scsi_cmd->scsi_done(scsi_cmd);
657	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
658}
659
660/**
661 * ipr_fail_all_ops - Fails all outstanding ops.
662 * @ioa_cfg:	ioa config struct
663 *
664 * This function fails all outstanding ops.
665 *
666 * Return value:
667 * 	none
668 **/
669static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
670{
671	struct ipr_cmnd *ipr_cmd, *temp;
672
673	ENTER;
674	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
675		list_del(&ipr_cmd->queue);
676
677		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
678		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
679
680		if (ipr_cmd->scsi_cmd)
681			ipr_cmd->done = ipr_scsi_eh_done;
682		else if (ipr_cmd->qc)
683			ipr_cmd->done = ipr_sata_eh_done;
684
685		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
686		del_timer(&ipr_cmd->timer);
687		ipr_cmd->done(ipr_cmd);
688	}
689
690	LEAVE;
691}
692
693/**
694 * ipr_do_req -  Send driver initiated requests.
695 * @ipr_cmd:		ipr command struct
696 * @done:			done function
697 * @timeout_func:	timeout function
698 * @timeout:		timeout value
699 *
700 * This function sends the specified command to the adapter with the
701 * timeout given. The done function is invoked on command completion.
702 *
703 * Return value:
704 * 	none
705 **/
706static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
707		       void (*done) (struct ipr_cmnd *),
708		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
709{
710	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
711
712	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
713
714	ipr_cmd->done = done;
715
716	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
717	ipr_cmd->timer.expires = jiffies + timeout;
718	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
719
720	add_timer(&ipr_cmd->timer);
721
722	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
723
724	mb();
725	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
726	       ioa_cfg->regs.ioarrin_reg);
727}
728
729/**
730 * ipr_internal_cmd_done - Op done function for an internally generated op.
731 * @ipr_cmd:	ipr command struct
732 *
733 * This function is the op done function for an internally generated,
734 * blocking op. It simply wakes the sleeping thread.
735 *
736 * Return value:
737 * 	none
738 **/
739static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
740{
741	if (ipr_cmd->sibling)
742		ipr_cmd->sibling = NULL;
743	else
744		complete(&ipr_cmd->completion);
745}
746
747/**
748 * ipr_send_blocking_cmd - Send command and sleep on its completion.
749 * @ipr_cmd:	ipr command struct
750 * @timeout_func:	function to invoke if command times out
751 * @timeout:	timeout
752 *
753 * Return value:
754 * 	none
755 **/
756static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
757				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
758				  u32 timeout)
759{
760	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
761
762	init_completion(&ipr_cmd->completion);
763	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
764
765	spin_unlock_irq(ioa_cfg->host->host_lock);
766	wait_for_completion(&ipr_cmd->completion);
767	spin_lock_irq(ioa_cfg->host->host_lock);
768}
769
770/**
771 * ipr_send_hcam - Send an HCAM to the adapter.
772 * @ioa_cfg:	ioa config struct
773 * @type:		HCAM type
774 * @hostrcb:	hostrcb struct
775 *
776 * This function will send a Host Controlled Async command to the adapter.
777 * If HCAMs are currently not allowed to be issued to the adapter, it will
778 * place the hostrcb on the free queue.
779 *
780 * Return value:
781 * 	none
782 **/
783static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
784			  struct ipr_hostrcb *hostrcb)
785{
786	struct ipr_cmnd *ipr_cmd;
787	struct ipr_ioarcb *ioarcb;
788
789	if (ioa_cfg->allow_cmds) {
790		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
791		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
792		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
793
794		ipr_cmd->u.hostrcb = hostrcb;
795		ioarcb = &ipr_cmd->ioarcb;
796
797		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
798		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
799		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
800		ioarcb->cmd_pkt.cdb[1] = type;
801		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
802		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
803
804		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
805		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
806		ipr_cmd->ioadl[0].flags_and_data_len =
807			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
808		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
809
810		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
811			ipr_cmd->done = ipr_process_ccn;
812		else
813			ipr_cmd->done = ipr_process_error;
814
815		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
816
817		mb();
818		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
819		       ioa_cfg->regs.ioarrin_reg);
820	} else {
821		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
822	}
823}
824
825/**
826 * ipr_init_res_entry - Initialize a resource entry struct.
827 * @res:	resource entry struct
828 *
829 * Return value:
830 * 	none
831 **/
832static void ipr_init_res_entry(struct ipr_resource_entry *res)
833{
834	res->needs_sync_complete = 0;
835	res->in_erp = 0;
836	res->add_to_ml = 0;
837	res->del_from_ml = 0;
838	res->resetting_device = 0;
839	res->sdev = NULL;
840	res->sata_port = NULL;
841}
842
843/**
844 * ipr_handle_config_change - Handle a config change from the adapter
845 * @ioa_cfg:	ioa config struct
846 * @hostrcb:	hostrcb
847 *
848 * Return value:
849 * 	none
850 **/
851static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
852			      struct ipr_hostrcb *hostrcb)
853{
854	struct ipr_resource_entry *res = NULL;
855	struct ipr_config_table_entry *cfgte;
856	u32 is_ndn = 1;
857
858	cfgte = &hostrcb->hcam.u.ccn.cfgte;
859
860	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
861		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
862			    sizeof(cfgte->res_addr))) {
863			is_ndn = 0;
864			break;
865		}
866	}
867
868	if (is_ndn) {
869		if (list_empty(&ioa_cfg->free_res_q)) {
870			ipr_send_hcam(ioa_cfg,
871				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
872				      hostrcb);
873			return;
874		}
875
876		res = list_entry(ioa_cfg->free_res_q.next,
877				 struct ipr_resource_entry, queue);
878
879		list_del(&res->queue);
880		ipr_init_res_entry(res);
881		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
882	}
883
884	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
885
886	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
887		if (res->sdev) {
888			res->del_from_ml = 1;
889			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
890			if (ioa_cfg->allow_ml_add_del)
891				schedule_work(&ioa_cfg->work_q);
892		} else
893			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
894	} else if (!res->sdev) {
895		res->add_to_ml = 1;
896		if (ioa_cfg->allow_ml_add_del)
897			schedule_work(&ioa_cfg->work_q);
898	}
899
900	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
901}
902
903/**
904 * ipr_process_ccn - Op done function for a CCN.
905 * @ipr_cmd:	ipr command struct
906 *
907 * This function is the op done function for a configuration
908 * change notification host controlled async from the adapter.
909 *
910 * Return value:
911 * 	none
912 **/
913static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
914{
915	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
916	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
917	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
918
919	list_del(&hostrcb->queue);
920	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
921
922	if (ioasc) {
923		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
924			dev_err(&ioa_cfg->pdev->dev,
925				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
926
927		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
928	} else {
929		ipr_handle_config_change(ioa_cfg, hostrcb);
930	}
931}
932
933/**
934 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
935 * @i:		index into buffer
936 * @buf:		string to modify
937 *
938 * This function will strip all trailing whitespace, pad the end
939 * of the string with a single space, and NULL terminate the string.
940 *
941 * Return value:
942 * 	new length of string
943 **/
944static int strip_and_pad_whitespace(int i, char *buf)
945{
946	while (i && buf[i] == ' ')
947		i--;
948	buf[i+1] = ' ';
949	buf[i+2] = '\0';
950	return i + 2;
951}
952
953/**
954 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
955 * @prefix:		string to print at start of printk
956 * @hostrcb:	hostrcb pointer
957 * @vpd:		vendor/product id/sn struct
958 *
959 * Return value:
960 * 	none
961 **/
962static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
963				struct ipr_vpd *vpd)
964{
965	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
966	int i = 0;
967
968	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
969	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
970
971	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
972	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
973
974	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
975	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
976
977	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
978}
979
980/**
981 * ipr_log_vpd - Log the passed VPD to the error log.
982 * @vpd:		vendor/product id/sn struct
983 *
984 * Return value:
985 * 	none
986 **/
987static void ipr_log_vpd(struct ipr_vpd *vpd)
988{
989	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
990		    + IPR_SERIAL_NUM_LEN];
991
992	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
993	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
994	       IPR_PROD_ID_LEN);
995	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
996	ipr_err("Vendor/Product ID: %s\n", buffer);
997
998	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
999	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1000	ipr_err("    Serial Number: %s\n", buffer);
1001}
1002
1003/**
1004 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1005 * @prefix:		string to print at start of printk
1006 * @hostrcb:	hostrcb pointer
1007 * @vpd:		vendor/product id/sn/wwn struct
1008 *
1009 * Return value:
1010 * 	none
1011 **/
1012static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1013				    struct ipr_ext_vpd *vpd)
1014{
1015	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1016	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1017		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1018}
1019
1020/**
1021 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1022 * @vpd:		vendor/product id/sn/wwn struct
1023 *
1024 * Return value:
1025 * 	none
1026 **/
1027static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1028{
1029	ipr_log_vpd(&vpd->vpd);
1030	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1031		be32_to_cpu(vpd->wwid[1]));
1032}
1033
1034/**
1035 * ipr_log_enhanced_cache_error - Log a cache error.
1036 * @ioa_cfg:	ioa config struct
1037 * @hostrcb:	hostrcb struct
1038 *
1039 * Return value:
1040 * 	none
1041 **/
1042static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1043					 struct ipr_hostrcb *hostrcb)
1044{
1045	struct ipr_hostrcb_type_12_error *error =
1046		&hostrcb->hcam.u.error.u.type_12_error;
1047
1048	ipr_err("-----Current Configuration-----\n");
1049	ipr_err("Cache Directory Card Information:\n");
1050	ipr_log_ext_vpd(&error->ioa_vpd);
1051	ipr_err("Adapter Card Information:\n");
1052	ipr_log_ext_vpd(&error->cfc_vpd);
1053
1054	ipr_err("-----Expected Configuration-----\n");
1055	ipr_err("Cache Directory Card Information:\n");
1056	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1057	ipr_err("Adapter Card Information:\n");
1058	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1059
1060	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1061		     be32_to_cpu(error->ioa_data[0]),
1062		     be32_to_cpu(error->ioa_data[1]),
1063		     be32_to_cpu(error->ioa_data[2]));
1064}
1065
1066/**
1067 * ipr_log_cache_error - Log a cache error.
1068 * @ioa_cfg:	ioa config struct
1069 * @hostrcb:	hostrcb struct
1070 *
1071 * Return value:
1072 * 	none
1073 **/
1074static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1075				struct ipr_hostrcb *hostrcb)
1076{
1077	struct ipr_hostrcb_type_02_error *error =
1078		&hostrcb->hcam.u.error.u.type_02_error;
1079
1080	ipr_err("-----Current Configuration-----\n");
1081	ipr_err("Cache Directory Card Information:\n");
1082	ipr_log_vpd(&error->ioa_vpd);
1083	ipr_err("Adapter Card Information:\n");
1084	ipr_log_vpd(&error->cfc_vpd);
1085
1086	ipr_err("-----Expected Configuration-----\n");
1087	ipr_err("Cache Directory Card Information:\n");
1088	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1089	ipr_err("Adapter Card Information:\n");
1090	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1091
1092	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1093		     be32_to_cpu(error->ioa_data[0]),
1094		     be32_to_cpu(error->ioa_data[1]),
1095		     be32_to_cpu(error->ioa_data[2]));
1096}
1097
1098/**
1099 * ipr_log_enhanced_config_error - Log a configuration error.
1100 * @ioa_cfg:	ioa config struct
1101 * @hostrcb:	hostrcb struct
1102 *
1103 * Return value:
1104 * 	none
1105 **/
1106static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1107					  struct ipr_hostrcb *hostrcb)
1108{
1109	int errors_logged, i;
1110	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1111	struct ipr_hostrcb_type_13_error *error;
1112
1113	error = &hostrcb->hcam.u.error.u.type_13_error;
1114	errors_logged = be32_to_cpu(error->errors_logged);
1115
1116	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1117		be32_to_cpu(error->errors_detected), errors_logged);
1118
1119	dev_entry = error->dev;
1120
1121	for (i = 0; i < errors_logged; i++, dev_entry++) {
1122		ipr_err_separator;
1123
1124		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1125		ipr_log_ext_vpd(&dev_entry->vpd);
1126
1127		ipr_err("-----New Device Information-----\n");
1128		ipr_log_ext_vpd(&dev_entry->new_vpd);
1129
1130		ipr_err("Cache Directory Card Information:\n");
1131		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1132
1133		ipr_err("Adapter Card Information:\n");
1134		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1135	}
1136}
1137
1138/**
1139 * ipr_log_config_error - Log a configuration error.
1140 * @ioa_cfg:	ioa config struct
1141 * @hostrcb:	hostrcb struct
1142 *
1143 * Return value:
1144 * 	none
1145 **/
1146static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1147				 struct ipr_hostrcb *hostrcb)
1148{
1149	int errors_logged, i;
1150	struct ipr_hostrcb_device_data_entry *dev_entry;
1151	struct ipr_hostrcb_type_03_error *error;
1152
1153	error = &hostrcb->hcam.u.error.u.type_03_error;
1154	errors_logged = be32_to_cpu(error->errors_logged);
1155
1156	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1157		be32_to_cpu(error->errors_detected), errors_logged);
1158
1159	dev_entry = error->dev;
1160
1161	for (i = 0; i < errors_logged; i++, dev_entry++) {
1162		ipr_err_separator;
1163
1164		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1165		ipr_log_vpd(&dev_entry->vpd);
1166
1167		ipr_err("-----New Device Information-----\n");
1168		ipr_log_vpd(&dev_entry->new_vpd);
1169
1170		ipr_err("Cache Directory Card Information:\n");
1171		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1172
1173		ipr_err("Adapter Card Information:\n");
1174		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1175
1176		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1177			be32_to_cpu(dev_entry->ioa_data[0]),
1178			be32_to_cpu(dev_entry->ioa_data[1]),
1179			be32_to_cpu(dev_entry->ioa_data[2]),
1180			be32_to_cpu(dev_entry->ioa_data[3]),
1181			be32_to_cpu(dev_entry->ioa_data[4]));
1182	}
1183}
1184
1185/**
1186 * ipr_log_enhanced_array_error - Log an array configuration error.
1187 * @ioa_cfg:	ioa config struct
1188 * @hostrcb:	hostrcb struct
1189 *
1190 * Return value:
1191 * 	none
1192 **/
1193static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1194					 struct ipr_hostrcb *hostrcb)
1195{
1196	int i, num_entries;
1197	struct ipr_hostrcb_type_14_error *error;
1198	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1199	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1200
1201	error = &hostrcb->hcam.u.error.u.type_14_error;
1202
1203	ipr_err_separator;
1204
1205	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1206		error->protection_level,
1207		ioa_cfg->host->host_no,
1208		error->last_func_vset_res_addr.bus,
1209		error->last_func_vset_res_addr.target,
1210		error->last_func_vset_res_addr.lun);
1211
1212	ipr_err_separator;
1213
1214	array_entry = error->array_member;
1215	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1216			    sizeof(error->array_member));
1217
1218	for (i = 0; i < num_entries; i++, array_entry++) {
1219		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1220			continue;
1221
1222		if (be32_to_cpu(error->exposed_mode_adn) == i)
1223			ipr_err("Exposed Array Member %d:\n", i);
1224		else
1225			ipr_err("Array Member %d:\n", i);
1226
1227		ipr_log_ext_vpd(&array_entry->vpd);
1228		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1229		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1230				 "Expected Location");
1231
1232		ipr_err_separator;
1233	}
1234}
1235
1236/**
1237 * ipr_log_array_error - Log an array configuration error.
1238 * @ioa_cfg:	ioa config struct
1239 * @hostrcb:	hostrcb struct
1240 *
1241 * Return value:
1242 * 	none
1243 **/
1244static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1245				struct ipr_hostrcb *hostrcb)
1246{
1247	int i;
1248	struct ipr_hostrcb_type_04_error *error;
1249	struct ipr_hostrcb_array_data_entry *array_entry;
1250	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1251
1252	error = &hostrcb->hcam.u.error.u.type_04_error;
1253
1254	ipr_err_separator;
1255
1256	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1257		error->protection_level,
1258		ioa_cfg->host->host_no,
1259		error->last_func_vset_res_addr.bus,
1260		error->last_func_vset_res_addr.target,
1261		error->last_func_vset_res_addr.lun);
1262
1263	ipr_err_separator;
1264
1265	array_entry = error->array_member;
1266
1267	for (i = 0; i < 18; i++) {
1268		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1269			continue;
1270
1271		if (be32_to_cpu(error->exposed_mode_adn) == i)
1272			ipr_err("Exposed Array Member %d:\n", i);
1273		else
1274			ipr_err("Array Member %d:\n", i);
1275
1276		ipr_log_vpd(&array_entry->vpd);
1277
1278		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1279		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1280				 "Expected Location");
1281
1282		ipr_err_separator;
1283
1284		if (i == 9)
1285			array_entry = error->array_member2;
1286		else
1287			array_entry++;
1288	}
1289}
1290
1291/**
1292 * ipr_log_hex_data - Log additional hex IOA error data.
1293 * @ioa_cfg:	ioa config struct
1294 * @data:		IOA error data
1295 * @len:		data length
1296 *
1297 * Return value:
1298 * 	none
1299 **/
1300static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1301{
1302	int i;
1303
1304	if (len == 0)
1305		return;
1306
1307	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1308		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1309
1310	for (i = 0; i < len / 4; i += 4) {
1311		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1312			be32_to_cpu(data[i]),
1313			be32_to_cpu(data[i+1]),
1314			be32_to_cpu(data[i+2]),
1315			be32_to_cpu(data[i+3]));
1316	}
1317}
1318
1319/**
1320 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1321 * @ioa_cfg:	ioa config struct
1322 * @hostrcb:	hostrcb struct
1323 *
1324 * Return value:
1325 * 	none
1326 **/
1327static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1328					    struct ipr_hostrcb *hostrcb)
1329{
1330	struct ipr_hostrcb_type_17_error *error;
1331
1332	error = &hostrcb->hcam.u.error.u.type_17_error;
1333	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1334	strstrip(error->failure_reason);
1335
1336	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1337		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1338	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1339	ipr_log_hex_data(ioa_cfg, error->data,
1340			 be32_to_cpu(hostrcb->hcam.length) -
1341			 (offsetof(struct ipr_hostrcb_error, u) +
1342			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1343}
1344
1345/**
1346 * ipr_log_dual_ioa_error - Log a dual adapter error.
1347 * @ioa_cfg:	ioa config struct
1348 * @hostrcb:	hostrcb struct
1349 *
1350 * Return value:
1351 * 	none
1352 **/
1353static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1354				   struct ipr_hostrcb *hostrcb)
1355{
1356	struct ipr_hostrcb_type_07_error *error;
1357
1358	error = &hostrcb->hcam.u.error.u.type_07_error;
1359	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1360	strstrip(error->failure_reason);
1361
1362	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1363		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1364	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1365	ipr_log_hex_data(ioa_cfg, error->data,
1366			 be32_to_cpu(hostrcb->hcam.length) -
1367			 (offsetof(struct ipr_hostrcb_error, u) +
1368			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1369}
1370
1371static const struct {
1372	u8 active;
1373	char *desc;
1374} path_active_desc[] = {
1375	{ IPR_PATH_NO_INFO, "Path" },
1376	{ IPR_PATH_ACTIVE, "Active path" },
1377	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1378};
1379
1380static const struct {
1381	u8 state;
1382	char *desc;
1383} path_state_desc[] = {
1384	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1385	{ IPR_PATH_HEALTHY, "is healthy" },
1386	{ IPR_PATH_DEGRADED, "is degraded" },
1387	{ IPR_PATH_FAILED, "is failed" }
1388};
1389
1390/**
1391 * ipr_log_fabric_path - Log a fabric path error
1392 * @hostrcb:	hostrcb struct
1393 * @fabric:		fabric descriptor
1394 *
1395 * Return value:
1396 * 	none
1397 **/
1398static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1399				struct ipr_hostrcb_fabric_desc *fabric)
1400{
1401	int i, j;
1402	u8 path_state = fabric->path_state;
1403	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1404	u8 state = path_state & IPR_PATH_STATE_MASK;
1405
1406	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1407		if (path_active_desc[i].active != active)
1408			continue;
1409
1410		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1411			if (path_state_desc[j].state != state)
1412				continue;
1413
1414			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1415				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1416					     path_active_desc[i].desc, path_state_desc[j].desc,
1417					     fabric->ioa_port);
1418			} else if (fabric->cascaded_expander == 0xff) {
1419				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1420					     path_active_desc[i].desc, path_state_desc[j].desc,
1421					     fabric->ioa_port, fabric->phy);
1422			} else if (fabric->phy == 0xff) {
1423				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1424					     path_active_desc[i].desc, path_state_desc[j].desc,
1425					     fabric->ioa_port, fabric->cascaded_expander);
1426			} else {
1427				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1428					     path_active_desc[i].desc, path_state_desc[j].desc,
1429					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1430			}
1431			return;
1432		}
1433	}
1434
1435	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1436		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1437}
1438
1439static const struct {
1440	u8 type;
1441	char *desc;
1442} path_type_desc[] = {
1443	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1444	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1445	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1446	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1447};
1448
1449static const struct {
1450	u8 status;
1451	char *desc;
1452} path_status_desc[] = {
1453	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1454	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1455	{ IPR_PATH_CFG_FAILED, "Failed" },
1456	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1457	{ IPR_PATH_NOT_DETECTED, "Missing" },
1458	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1459};
1460
1461static const char *link_rate[] = {
1462	"unknown",
1463	"disabled",
1464	"phy reset problem",
1465	"spinup hold",
1466	"port selector",
1467	"unknown",
1468	"unknown",
1469	"unknown",
1470	"1.5Gbps",
1471	"3.0Gbps",
1472	"unknown",
1473	"unknown",
1474	"unknown",
1475	"unknown",
1476	"unknown",
1477	"unknown"
1478};
1479
1480/**
1481 * ipr_log_path_elem - Log a fabric path element.
1482 * @hostrcb:	hostrcb struct
1483 * @cfg:		fabric path element struct
1484 *
1485 * Return value:
1486 * 	none
1487 **/
1488static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1489			      struct ipr_hostrcb_config_element *cfg)
1490{
1491	int i, j;
1492	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1493	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1494
1495	if (type == IPR_PATH_CFG_NOT_EXIST)
1496		return;
1497
1498	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1499		if (path_type_desc[i].type != type)
1500			continue;
1501
1502		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1503			if (path_status_desc[j].status != status)
1504				continue;
1505
1506			if (type == IPR_PATH_CFG_IOA_PORT) {
1507				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1508					     path_status_desc[j].desc, path_type_desc[i].desc,
1509					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1510					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1511			} else {
1512				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1513					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1514						     path_status_desc[j].desc, path_type_desc[i].desc,
1515						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1516						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1517				} else if (cfg->cascaded_expander == 0xff) {
1518					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1519						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1520						     path_type_desc[i].desc, cfg->phy,
1521						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1522						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1523				} else if (cfg->phy == 0xff) {
1524					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1525						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1526						     path_type_desc[i].desc, cfg->cascaded_expander,
1527						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1528						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1529				} else {
1530					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1531						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1532						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1533						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1534						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1535				}
1536			}
1537			return;
1538		}
1539	}
1540
1541	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1542		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1543		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1544		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1545}
1546
1547/**
1548 * ipr_log_fabric_error - Log a fabric error.
1549 * @ioa_cfg:	ioa config struct
1550 * @hostrcb:	hostrcb struct
1551 *
1552 * Return value:
1553 * 	none
1554 **/
1555static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1556				 struct ipr_hostrcb *hostrcb)
1557{
1558	struct ipr_hostrcb_type_20_error *error;
1559	struct ipr_hostrcb_fabric_desc *fabric;
1560	struct ipr_hostrcb_config_element *cfg;
1561	int i, add_len;
1562
1563	error = &hostrcb->hcam.u.error.u.type_20_error;
1564	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1565	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1566
1567	add_len = be32_to_cpu(hostrcb->hcam.length) -
1568		(offsetof(struct ipr_hostrcb_error, u) +
1569		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1570
1571	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1572		ipr_log_fabric_path(hostrcb, fabric);
1573		for_each_fabric_cfg(fabric, cfg)
1574			ipr_log_path_elem(hostrcb, cfg);
1575
1576		add_len -= be16_to_cpu(fabric->length);
1577		fabric = (struct ipr_hostrcb_fabric_desc *)
1578			((unsigned long)fabric + be16_to_cpu(fabric->length));
1579	}
1580
1581	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1582}
1583
1584/**
1585 * ipr_log_generic_error - Log an adapter error.
1586 * @ioa_cfg:	ioa config struct
1587 * @hostrcb:	hostrcb struct
1588 *
1589 * Return value:
1590 * 	none
1591 **/
1592static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1593				  struct ipr_hostrcb *hostrcb)
1594{
1595	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1596			 be32_to_cpu(hostrcb->hcam.length));
1597}
1598
1599/**
1600 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1601 * @ioasc:	IOASC
1602 *
1603 * This function will return the index of into the ipr_error_table
1604 * for the specified IOASC. If the IOASC is not in the table,
1605 * 0 will be returned, which points to the entry used for unknown errors.
1606 *
1607 * Return value:
1608 * 	index into the ipr_error_table
1609 **/
1610static u32 ipr_get_error(u32 ioasc)
1611{
1612	int i;
1613
1614	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1615		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1616			return i;
1617
1618	return 0;
1619}
1620
1621/**
1622 * ipr_handle_log_data - Log an adapter error.
1623 * @ioa_cfg:	ioa config struct
1624 * @hostrcb:	hostrcb struct
1625 *
1626 * This function logs an adapter error to the system.
1627 *
1628 * Return value:
1629 * 	none
1630 **/
1631static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1632				struct ipr_hostrcb *hostrcb)
1633{
1634	u32 ioasc;
1635	int error_index;
1636
1637	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1638		return;
1639
1640	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1641		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1642
1643	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1644
1645	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1646	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1647		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1648		scsi_report_bus_reset(ioa_cfg->host,
1649				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1650	}
1651
1652	error_index = ipr_get_error(ioasc);
1653
1654	if (!ipr_error_table[error_index].log_hcam)
1655		return;
1656
1657	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1658
1659	/* Set indication we have logged an error */
1660	ioa_cfg->errors_logged++;
1661
1662	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1663		return;
1664	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1665		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1666
1667	switch (hostrcb->hcam.overlay_id) {
1668	case IPR_HOST_RCB_OVERLAY_ID_2:
1669		ipr_log_cache_error(ioa_cfg, hostrcb);
1670		break;
1671	case IPR_HOST_RCB_OVERLAY_ID_3:
1672		ipr_log_config_error(ioa_cfg, hostrcb);
1673		break;
1674	case IPR_HOST_RCB_OVERLAY_ID_4:
1675	case IPR_HOST_RCB_OVERLAY_ID_6:
1676		ipr_log_array_error(ioa_cfg, hostrcb);
1677		break;
1678	case IPR_HOST_RCB_OVERLAY_ID_7:
1679		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1680		break;
1681	case IPR_HOST_RCB_OVERLAY_ID_12:
1682		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1683		break;
1684	case IPR_HOST_RCB_OVERLAY_ID_13:
1685		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1686		break;
1687	case IPR_HOST_RCB_OVERLAY_ID_14:
1688	case IPR_HOST_RCB_OVERLAY_ID_16:
1689		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1690		break;
1691	case IPR_HOST_RCB_OVERLAY_ID_17:
1692		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1693		break;
1694	case IPR_HOST_RCB_OVERLAY_ID_20:
1695		ipr_log_fabric_error(ioa_cfg, hostrcb);
1696		break;
1697	case IPR_HOST_RCB_OVERLAY_ID_1:
1698	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1699	default:
1700		ipr_log_generic_error(ioa_cfg, hostrcb);
1701		break;
1702	}
1703}
1704
1705/**
1706 * ipr_process_error - Op done function for an adapter error log.
1707 * @ipr_cmd:	ipr command struct
1708 *
1709 * This function is the op done function for an error log host
1710 * controlled async from the adapter. It will log the error and
1711 * send the HCAM back to the adapter.
1712 *
1713 * Return value:
1714 * 	none
1715 **/
1716static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1717{
1718	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1719	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1720	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1721	u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1722
1723	list_del(&hostrcb->queue);
1724	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1725
1726	if (!ioasc) {
1727		ipr_handle_log_data(ioa_cfg, hostrcb);
1728		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1729			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1730	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1731		dev_err(&ioa_cfg->pdev->dev,
1732			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1733	}
1734
1735	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1736}
1737
1738/**
1739 * ipr_timeout -  An internally generated op has timed out.
1740 * @ipr_cmd:	ipr command struct
1741 *
1742 * This function blocks host requests and initiates an
1743 * adapter reset.
1744 *
1745 * Return value:
1746 * 	none
1747 **/
1748static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1749{
1750	unsigned long lock_flags = 0;
1751	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1752
1753	ENTER;
1754	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1755
1756	ioa_cfg->errors_logged++;
1757	dev_err(&ioa_cfg->pdev->dev,
1758		"Adapter being reset due to command timeout.\n");
1759
1760	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1761		ioa_cfg->sdt_state = GET_DUMP;
1762
1763	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1764		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1765
1766	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1767	LEAVE;
1768}
1769
1770/**
1771 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1772 * @ipr_cmd:	ipr command struct
1773 *
1774 * This function blocks host requests and initiates an
1775 * adapter reset.
1776 *
1777 * Return value:
1778 * 	none
1779 **/
1780static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1781{
1782	unsigned long lock_flags = 0;
1783	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1784
1785	ENTER;
1786	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1787
1788	ioa_cfg->errors_logged++;
1789	dev_err(&ioa_cfg->pdev->dev,
1790		"Adapter timed out transitioning to operational.\n");
1791
1792	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1793		ioa_cfg->sdt_state = GET_DUMP;
1794
1795	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1796		if (ipr_fastfail)
1797			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1798		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1799	}
1800
1801	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1802	LEAVE;
1803}
1804
1805/**
1806 * ipr_reset_reload - Reset/Reload the IOA
1807 * @ioa_cfg:		ioa config struct
1808 * @shutdown_type:	shutdown type
1809 *
1810 * This function resets the adapter and re-initializes it.
1811 * This function assumes that all new host commands have been stopped.
1812 * Return value:
1813 * 	SUCCESS / FAILED
1814 **/
1815static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1816			    enum ipr_shutdown_type shutdown_type)
1817{
1818	if (!ioa_cfg->in_reset_reload)
1819		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1820
1821	spin_unlock_irq(ioa_cfg->host->host_lock);
1822	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1823	spin_lock_irq(ioa_cfg->host->host_lock);
1824
1825	/* If we got hit with a host reset while we were already resetting
1826	 the adapter for some reason, and the reset failed. */
1827	if (ioa_cfg->ioa_is_dead) {
1828		ipr_trace;
1829		return FAILED;
1830	}
1831
1832	return SUCCESS;
1833}
1834
1835/**
1836 * ipr_find_ses_entry - Find matching SES in SES table
1837 * @res:	resource entry struct of SES
1838 *
1839 * Return value:
1840 * 	pointer to SES table entry / NULL on failure
1841 **/
1842static const struct ipr_ses_table_entry *
1843ipr_find_ses_entry(struct ipr_resource_entry *res)
1844{
1845	int i, j, matches;
1846	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1847
1848	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1849		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1850			if (ste->compare_product_id_byte[j] == 'X') {
1851				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1852					matches++;
1853				else
1854					break;
1855			} else
1856				matches++;
1857		}
1858
1859		if (matches == IPR_PROD_ID_LEN)
1860			return ste;
1861	}
1862
1863	return NULL;
1864}
1865
1866/**
1867 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1868 * @ioa_cfg:	ioa config struct
1869 * @bus:		SCSI bus
1870 * @bus_width:	bus width
1871 *
1872 * Return value:
1873 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1874 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1875 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1876 *	max 160MHz = max 320MB/sec).
1877 **/
1878static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1879{
1880	struct ipr_resource_entry *res;
1881	const struct ipr_ses_table_entry *ste;
1882	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1883
1884	/* Loop through each config table entry in the config table buffer */
1885	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1886		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1887			continue;
1888
1889		if (bus != res->cfgte.res_addr.bus)
1890			continue;
1891
1892		if (!(ste = ipr_find_ses_entry(res)))
1893			continue;
1894
1895		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1896	}
1897
1898	return max_xfer_rate;
1899}
1900
1901/**
1902 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1903 * @ioa_cfg:		ioa config struct
1904 * @max_delay:		max delay in micro-seconds to wait
1905 *
1906 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1907 *
1908 * Return value:
1909 * 	0 on success / other on failure
1910 **/
1911static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1912{
1913	volatile u32 pcii_reg;
1914	int delay = 1;
1915
1916	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1917	while (delay < max_delay) {
1918		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1919
1920		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1921			return 0;
1922
1923		/* udelay cannot be used if delay is more than a few milliseconds */
1924		if ((delay / 1000) > MAX_UDELAY_MS)
1925			mdelay(delay / 1000);
1926		else
1927			udelay(delay);
1928
1929		delay += delay;
1930	}
1931	return -EIO;
1932}
1933
1934/**
1935 * ipr_get_ldump_data_section - Dump IOA memory
1936 * @ioa_cfg:			ioa config struct
1937 * @start_addr:			adapter address to dump
1938 * @dest:				destination kernel buffer
1939 * @length_in_words:	length to dump in 4 byte words
1940 *
1941 * Return value:
1942 * 	0 on success / -EIO on failure
1943 **/
1944static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1945				      u32 start_addr,
1946				      __be32 *dest, u32 length_in_words)
1947{
1948	volatile u32 temp_pcii_reg;
1949	int i, delay = 0;
1950
1951	/* Write IOA interrupt reg starting LDUMP state  */
1952	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1953	       ioa_cfg->regs.set_uproc_interrupt_reg);
1954
1955	/* Wait for IO debug acknowledge */
1956	if (ipr_wait_iodbg_ack(ioa_cfg,
1957			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1958		dev_err(&ioa_cfg->pdev->dev,
1959			"IOA dump long data transfer timeout\n");
1960		return -EIO;
1961	}
1962
1963	/* Signal LDUMP interlocked - clear IO debug ack */
1964	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1965	       ioa_cfg->regs.clr_interrupt_reg);
1966
1967	/* Write Mailbox with starting address */
1968	writel(start_addr, ioa_cfg->ioa_mailbox);
1969
1970	/* Signal address valid - clear IOA Reset alert */
1971	writel(IPR_UPROCI_RESET_ALERT,
1972	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1973
1974	for (i = 0; i < length_in_words; i++) {
1975		/* Wait for IO debug acknowledge */
1976		if (ipr_wait_iodbg_ack(ioa_cfg,
1977				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1978			dev_err(&ioa_cfg->pdev->dev,
1979				"IOA dump short data transfer timeout\n");
1980			return -EIO;
1981		}
1982
1983		/* Read data from mailbox and increment destination pointer */
1984		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1985		dest++;
1986
1987		/* For all but the last word of data, signal data received */
1988		if (i < (length_in_words - 1)) {
1989			/* Signal dump data received - Clear IO debug Ack */
1990			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1991			       ioa_cfg->regs.clr_interrupt_reg);
1992		}
1993	}
1994
1995	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1996	writel(IPR_UPROCI_RESET_ALERT,
1997	       ioa_cfg->regs.set_uproc_interrupt_reg);
1998
1999	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2000	       ioa_cfg->regs.clr_uproc_interrupt_reg);
2001
2002	/* Signal dump data received - Clear IO debug Ack */
2003	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2004	       ioa_cfg->regs.clr_interrupt_reg);
2005
2006	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2007	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2008		temp_pcii_reg =
2009		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2010
2011		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2012			return 0;
2013
2014		udelay(10);
2015		delay += 10;
2016	}
2017
2018	return 0;
2019}
2020
2021#ifdef CONFIG_SCSI_IPR_DUMP
2022/**
2023 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2024 * @ioa_cfg:		ioa config struct
2025 * @pci_address:	adapter address
2026 * @length:			length of data to copy
2027 *
2028 * Copy data from PCI adapter to kernel buffer.
2029 * Note: length MUST be a 4 byte multiple
2030 * Return value:
2031 * 	0 on success / other on failure
2032 **/
2033static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2034			unsigned long pci_address, u32 length)
2035{
2036	int bytes_copied = 0;
2037	int cur_len, rc, rem_len, rem_page_len;
2038	__be32 *page;
2039	unsigned long lock_flags = 0;
2040	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2041
2042	while (bytes_copied < length &&
2043	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2044		if (ioa_dump->page_offset >= PAGE_SIZE ||
2045		    ioa_dump->page_offset == 0) {
2046			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2047
2048			if (!page) {
2049				ipr_trace;
2050				return bytes_copied;
2051			}
2052
2053			ioa_dump->page_offset = 0;
2054			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2055			ioa_dump->next_page_index++;
2056		} else
2057			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2058
2059		rem_len = length - bytes_copied;
2060		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2061		cur_len = min(rem_len, rem_page_len);
2062
2063		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2064		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2065			rc = -EIO;
2066		} else {
2067			rc = ipr_get_ldump_data_section(ioa_cfg,
2068							pci_address + bytes_copied,
2069							&page[ioa_dump->page_offset / 4],
2070							(cur_len / sizeof(u32)));
2071		}
2072		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2073
2074		if (!rc) {
2075			ioa_dump->page_offset += cur_len;
2076			bytes_copied += cur_len;
2077		} else {
2078			ipr_trace;
2079			break;
2080		}
2081		schedule();
2082	}
2083
2084	return bytes_copied;
2085}
2086
2087/**
2088 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2089 * @hdr:	dump entry header struct
2090 *
2091 * Return value:
2092 * 	nothing
2093 **/
2094static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2095{
2096	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2097	hdr->num_elems = 1;
2098	hdr->offset = sizeof(*hdr);
2099	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2100}
2101
2102/**
2103 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2104 * @ioa_cfg:	ioa config struct
2105 * @driver_dump:	driver dump struct
2106 *
2107 * Return value:
2108 * 	nothing
2109 **/
2110static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2111				   struct ipr_driver_dump *driver_dump)
2112{
2113	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2114
2115	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2116	driver_dump->ioa_type_entry.hdr.len =
2117		sizeof(struct ipr_dump_ioa_type_entry) -
2118		sizeof(struct ipr_dump_entry_header);
2119	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2120	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2121	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2122	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2123		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2124		ucode_vpd->minor_release[1];
2125	driver_dump->hdr.num_entries++;
2126}
2127
2128/**
2129 * ipr_dump_version_data - Fill in the driver version in the dump.
2130 * @ioa_cfg:	ioa config struct
2131 * @driver_dump:	driver dump struct
2132 *
2133 * Return value:
2134 * 	nothing
2135 **/
2136static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2137				  struct ipr_driver_dump *driver_dump)
2138{
2139	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2140	driver_dump->version_entry.hdr.len =
2141		sizeof(struct ipr_dump_version_entry) -
2142		sizeof(struct ipr_dump_entry_header);
2143	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2144	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2145	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2146	driver_dump->hdr.num_entries++;
2147}
2148
2149/**
2150 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2151 * @ioa_cfg:	ioa config struct
2152 * @driver_dump:	driver dump struct
2153 *
2154 * Return value:
2155 * 	nothing
2156 **/
2157static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2158				   struct ipr_driver_dump *driver_dump)
2159{
2160	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2161	driver_dump->trace_entry.hdr.len =
2162		sizeof(struct ipr_dump_trace_entry) -
2163		sizeof(struct ipr_dump_entry_header);
2164	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2165	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2166	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2167	driver_dump->hdr.num_entries++;
2168}
2169
2170/**
2171 * ipr_dump_location_data - Fill in the IOA location in the dump.
2172 * @ioa_cfg:	ioa config struct
2173 * @driver_dump:	driver dump struct
2174 *
2175 * Return value:
2176 * 	nothing
2177 **/
2178static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2179				   struct ipr_driver_dump *driver_dump)
2180{
2181	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2182	driver_dump->location_entry.hdr.len =
2183		sizeof(struct ipr_dump_location_entry) -
2184		sizeof(struct ipr_dump_entry_header);
2185	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2186	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2187	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2188	driver_dump->hdr.num_entries++;
2189}
2190
2191/**
2192 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2193 * @ioa_cfg:	ioa config struct
2194 * @dump:		dump struct
2195 *
2196 * Return value:
2197 * 	nothing
2198 **/
2199static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2200{
2201	unsigned long start_addr, sdt_word;
2202	unsigned long lock_flags = 0;
2203	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2204	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2205	u32 num_entries, start_off, end_off;
2206	u32 bytes_to_copy, bytes_copied, rc;
2207	struct ipr_sdt *sdt;
2208	int i;
2209
2210	ENTER;
2211
2212	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2213
2214	if (ioa_cfg->sdt_state != GET_DUMP) {
2215		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2216		return;
2217	}
2218
2219	start_addr = readl(ioa_cfg->ioa_mailbox);
2220
2221	if (!ipr_sdt_is_fmt2(start_addr)) {
2222		dev_err(&ioa_cfg->pdev->dev,
2223			"Invalid dump table format: %lx\n", start_addr);
2224		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2225		return;
2226	}
2227
2228	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2229
2230	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2231
2232	/* Initialize the overall dump header */
2233	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2234	driver_dump->hdr.num_entries = 1;
2235	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2236	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2237	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2238	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2239
2240	ipr_dump_version_data(ioa_cfg, driver_dump);
2241	ipr_dump_location_data(ioa_cfg, driver_dump);
2242	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2243	ipr_dump_trace_data(ioa_cfg, driver_dump);
2244
2245	/* Update dump_header */
2246	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2247
2248	/* IOA Dump entry */
2249	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2250	ioa_dump->format = IPR_SDT_FMT2;
2251	ioa_dump->hdr.len = 0;
2252	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2253	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2254
2255	/* First entries in sdt are actually a list of dump addresses and
2256	 lengths to gather the real dump data.  sdt represents the pointer
2257	 to the ioa generated dump table.  Dump data will be extracted based
2258	 on entries in this table */
2259	sdt = &ioa_dump->sdt;
2260
2261	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2262					sizeof(struct ipr_sdt) / sizeof(__be32));
2263
2264	/* Smart Dump table is ready to use and the first entry is valid */
2265	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2266		dev_err(&ioa_cfg->pdev->dev,
2267			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2268			rc, be32_to_cpu(sdt->hdr.state));
2269		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2270		ioa_cfg->sdt_state = DUMP_OBTAINED;
2271		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2272		return;
2273	}
2274
2275	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2276
2277	if (num_entries > IPR_NUM_SDT_ENTRIES)
2278		num_entries = IPR_NUM_SDT_ENTRIES;
2279
2280	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2281
2282	for (i = 0; i < num_entries; i++) {
2283		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2284			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2285			break;
2286		}
2287
2288		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2289			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2290			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2291			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2292
2293			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2294				bytes_to_copy = end_off - start_off;
2295				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2296					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2297					continue;
2298				}
2299
2300				/* Copy data from adapter to driver buffers */
2301				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2302							    bytes_to_copy);
2303
2304				ioa_dump->hdr.len += bytes_copied;
2305
2306				if (bytes_copied != bytes_to_copy) {
2307					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2308					break;
2309				}
2310			}
2311		}
2312	}
2313
2314	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2315
2316	/* Update dump_header */
2317	driver_dump->hdr.len += ioa_dump->hdr.len;
2318	wmb();
2319	ioa_cfg->sdt_state = DUMP_OBTAINED;
2320	LEAVE;
2321}
2322
2323#else
2324#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2325#endif
2326
2327/**
2328 * ipr_release_dump - Free adapter dump memory
2329 * @kref:	kref struct
2330 *
2331 * Return value:
2332 *	nothing
2333 **/
2334static void ipr_release_dump(struct kref *kref)
2335{
2336	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2337	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2338	unsigned long lock_flags = 0;
2339	int i;
2340
2341	ENTER;
2342	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2343	ioa_cfg->dump = NULL;
2344	ioa_cfg->sdt_state = INACTIVE;
2345	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2346
2347	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2348		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2349
2350	kfree(dump);
2351	LEAVE;
2352}
2353
2354/**
2355 * ipr_worker_thread - Worker thread
2356 * @work:		ioa config struct
2357 *
2358 * Called at task level from a work thread. This function takes care
2359 * of adding and removing device from the mid-layer as configuration
2360 * changes are detected by the adapter.
2361 *
2362 * Return value:
2363 * 	nothing
2364 **/
2365static void ipr_worker_thread(struct work_struct *work)
2366{
2367	unsigned long lock_flags;
2368	struct ipr_resource_entry *res;
2369	struct scsi_device *sdev;
2370	struct ipr_dump *dump;
2371	struct ipr_ioa_cfg *ioa_cfg =
2372		container_of(work, struct ipr_ioa_cfg, work_q);
2373	u8 bus, target, lun;
2374	int did_work;
2375
2376	ENTER;
2377	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2378
2379	if (ioa_cfg->sdt_state == GET_DUMP) {
2380		dump = ioa_cfg->dump;
2381		if (!dump) {
2382			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2383			return;
2384		}
2385		kref_get(&dump->kref);
2386		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2387		ipr_get_ioa_dump(ioa_cfg, dump);
2388		kref_put(&dump->kref, ipr_release_dump);
2389
2390		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2391		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2392			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2393		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2394		return;
2395	}
2396
2397restart:
2398	do {
2399		did_work = 0;
2400		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2401			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2402			return;
2403		}
2404
2405		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2406			if (res->del_from_ml && res->sdev) {
2407				did_work = 1;
2408				sdev = res->sdev;
2409				if (!scsi_device_get(sdev)) {
2410					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2411					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2412					scsi_remove_device(sdev);
2413					scsi_device_put(sdev);
2414					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2415				}
2416				break;
2417			}
2418		}
2419	} while(did_work);
2420
2421	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2422		if (res->add_to_ml) {
2423			bus = res->cfgte.res_addr.bus;
2424			target = res->cfgte.res_addr.target;
2425			lun = res->cfgte.res_addr.lun;
2426			res->add_to_ml = 0;
2427			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2428			scsi_add_device(ioa_cfg->host, bus, target, lun);
2429			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2430			goto restart;
2431		}
2432	}
2433
2434	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2435	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
2436	LEAVE;
2437}
2438
2439#ifdef CONFIG_SCSI_IPR_TRACE
2440/**
2441 * ipr_read_trace - Dump the adapter trace
2442 * @kobj:		kobject struct
2443 * @bin_attr:		bin_attribute struct
2444 * @buf:		buffer
2445 * @off:		offset
2446 * @count:		buffer size
2447 *
2448 * Return value:
2449 *	number of bytes printed to buffer
2450 **/
2451static ssize_t ipr_read_trace(struct kobject *kobj,
2452			      struct bin_attribute *bin_attr,
2453			      char *buf, loff_t off, size_t count)
2454{
2455	struct device *dev = container_of(kobj, struct device, kobj);
2456	struct Scsi_Host *shost = class_to_shost(dev);
2457	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2458	unsigned long lock_flags = 0;
2459	ssize_t ret;
2460
2461	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2462	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
2463				IPR_TRACE_SIZE);
2464	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2465
2466	return ret;
2467}
2468
2469static struct bin_attribute ipr_trace_attr = {
2470	.attr =	{
2471		.name = "trace",
2472		.mode = S_IRUGO,
2473	},
2474	.size = 0,
2475	.read = ipr_read_trace,
2476};
2477#endif
2478
2479static const struct {
2480	enum ipr_cache_state state;
2481	char *name;
2482} cache_state [] = {
2483	{ CACHE_NONE, "none" },
2484	{ CACHE_DISABLED, "disabled" },
2485	{ CACHE_ENABLED, "enabled" }
2486};
2487
2488/**
2489 * ipr_show_write_caching - Show the write caching attribute
2490 * @dev:	device struct
2491 * @buf:	buffer
2492 *
2493 * Return value:
2494 *	number of bytes printed to buffer
2495 **/
2496static ssize_t ipr_show_write_caching(struct device *dev,
2497				      struct device_attribute *attr, char *buf)
2498{
2499	struct Scsi_Host *shost = class_to_shost(dev);
2500	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2501	unsigned long lock_flags = 0;
2502	int i, len = 0;
2503
2504	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2505	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2506		if (cache_state[i].state == ioa_cfg->cache_state) {
2507			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2508			break;
2509		}
2510	}
2511	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2512	return len;
2513}
2514
2515
2516/**
2517 * ipr_store_write_caching - Enable/disable adapter write cache
2518 * @dev:	device struct
2519 * @buf:	buffer
2520 * @count:	buffer size
2521 *
2522 * This function will enable/disable adapter write cache.
2523 *
2524 * Return value:
2525 * 	count on success / other on failure
2526 **/
2527static ssize_t ipr_store_write_caching(struct device *dev,
2528				       struct device_attribute *attr,
2529				       const char *buf, size_t count)
2530{
2531	struct Scsi_Host *shost = class_to_shost(dev);
2532	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2533	unsigned long lock_flags = 0;
2534	enum ipr_cache_state new_state = CACHE_INVALID;
2535	int i;
2536
2537	if (!capable(CAP_SYS_ADMIN))
2538		return -EACCES;
2539	if (ioa_cfg->cache_state == CACHE_NONE)
2540		return -EINVAL;
2541
2542	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2543		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2544			new_state = cache_state[i].state;
2545			break;
2546		}
2547	}
2548
2549	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2550		return -EINVAL;
2551
2552	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2553	if (ioa_cfg->cache_state == new_state) {
2554		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2555		return count;
2556	}
2557
2558	ioa_cfg->cache_state = new_state;
2559	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2560		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2561	if (!ioa_cfg->in_reset_reload)
2562		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2563	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2564	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2565
2566	return count;
2567}
2568
2569static struct device_attribute ipr_ioa_cache_attr = {
2570	.attr = {
2571		.name =		"write_cache",
2572		.mode =		S_IRUGO | S_IWUSR,
2573	},
2574	.show = ipr_show_write_caching,
2575	.store = ipr_store_write_caching
2576};
2577
2578/**
2579 * ipr_show_fw_version - Show the firmware version
2580 * @dev:	class device struct
2581 * @buf:	buffer
2582 *
2583 * Return value:
2584 *	number of bytes printed to buffer
2585 **/
2586static ssize_t ipr_show_fw_version(struct device *dev,
2587				   struct device_attribute *attr, char *buf)
2588{
2589	struct Scsi_Host *shost = class_to_shost(dev);
2590	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2591	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2592	unsigned long lock_flags = 0;
2593	int len;
2594
2595	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2596	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2597		       ucode_vpd->major_release, ucode_vpd->card_type,
2598		       ucode_vpd->minor_release[0],
2599		       ucode_vpd->minor_release[1]);
2600	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2601	return len;
2602}
2603
2604static struct device_attribute ipr_fw_version_attr = {
2605	.attr = {
2606		.name =		"fw_version",
2607		.mode =		S_IRUGO,
2608	},
2609	.show = ipr_show_fw_version,
2610};
2611
2612/**
2613 * ipr_show_log_level - Show the adapter's error logging level
2614 * @dev:	class device struct
2615 * @buf:	buffer
2616 *
2617 * Return value:
2618 * 	number of bytes printed to buffer
2619 **/
2620static ssize_t ipr_show_log_level(struct device *dev,
2621				   struct device_attribute *attr, char *buf)
2622{
2623	struct Scsi_Host *shost = class_to_shost(dev);
2624	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2625	unsigned long lock_flags = 0;
2626	int len;
2627
2628	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2629	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2630	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2631	return len;
2632}
2633
2634/**
2635 * ipr_store_log_level - Change the adapter's error logging level
2636 * @dev:	class device struct
2637 * @buf:	buffer
2638 *
2639 * Return value:
2640 * 	number of bytes printed to buffer
2641 **/
2642static ssize_t ipr_store_log_level(struct device *dev,
2643			           struct device_attribute *attr,
2644				   const char *buf, size_t count)
2645{
2646	struct Scsi_Host *shost = class_to_shost(dev);
2647	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2648	unsigned long lock_flags = 0;
2649
2650	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2651	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2652	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2653	return strlen(buf);
2654}
2655
2656static struct device_attribute ipr_log_level_attr = {
2657	.attr = {
2658		.name =		"log_level",
2659		.mode =		S_IRUGO | S_IWUSR,
2660	},
2661	.show = ipr_show_log_level,
2662	.store = ipr_store_log_level
2663};
2664
2665/**
2666 * ipr_store_diagnostics - IOA Diagnostics interface
2667 * @dev:	device struct
2668 * @buf:	buffer
2669 * @count:	buffer size
2670 *
2671 * This function will reset the adapter and wait a reasonable
2672 * amount of time for any errors that the adapter might log.
2673 *
2674 * Return value:
2675 * 	count on success / other on failure
2676 **/
2677static ssize_t ipr_store_diagnostics(struct device *dev,
2678				     struct device_attribute *attr,
2679				     const char *buf, size_t count)
2680{
2681	struct Scsi_Host *shost = class_to_shost(dev);
2682	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2683	unsigned long lock_flags = 0;
2684	int rc = count;
2685
2686	if (!capable(CAP_SYS_ADMIN))
2687		return -EACCES;
2688
2689	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2690	while(ioa_cfg->in_reset_reload) {
2691		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2692		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2693		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2694	}
2695
2696	ioa_cfg->errors_logged = 0;
2697	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2698
2699	if (ioa_cfg->in_reset_reload) {
2700		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2701		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2702
2703		/* Wait for a second for any errors to be logged */
2704		msleep(1000);
2705	} else {
2706		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2707		return -EIO;
2708	}
2709
2710	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2711	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2712		rc = -EIO;
2713	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2714
2715	return rc;
2716}
2717
2718static struct device_attribute ipr_diagnostics_attr = {
2719	.attr = {
2720		.name =		"run_diagnostics",
2721		.mode =		S_IWUSR,
2722	},
2723	.store = ipr_store_diagnostics
2724};
2725
2726/**
2727 * ipr_show_adapter_state - Show the adapter's state
2728 * @class_dev:	device struct
2729 * @buf:	buffer
2730 *
2731 * Return value:
2732 * 	number of bytes printed to buffer
2733 **/
2734static ssize_t ipr_show_adapter_state(struct device *dev,
2735				      struct device_attribute *attr, char *buf)
2736{
2737	struct Scsi_Host *shost = class_to_shost(dev);
2738	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2739	unsigned long lock_flags = 0;
2740	int len;
2741
2742	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2743	if (ioa_cfg->ioa_is_dead)
2744		len = snprintf(buf, PAGE_SIZE, "offline\n");
2745	else
2746		len = snprintf(buf, PAGE_SIZE, "online\n");
2747	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2748	return len;
2749}
2750
2751/**
2752 * ipr_store_adapter_state - Change adapter state
2753 * @dev:	device struct
2754 * @buf:	buffer
2755 * @count:	buffer size
2756 *
2757 * This function will change the adapter's state.
2758 *
2759 * Return value:
2760 * 	count on success / other on failure
2761 **/
2762static ssize_t ipr_store_adapter_state(struct device *dev,
2763				       struct device_attribute *attr,
2764				       const char *buf, size_t count)
2765{
2766	struct Scsi_Host *shost = class_to_shost(dev);
2767	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2768	unsigned long lock_flags;
2769	int result = count;
2770
2771	if (!capable(CAP_SYS_ADMIN))
2772		return -EACCES;
2773
2774	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2775	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2776		ioa_cfg->ioa_is_dead = 0;
2777		ioa_cfg->reset_retries = 0;
2778		ioa_cfg->in_ioa_bringdown = 0;
2779		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2780	}
2781	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2782	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2783
2784	return result;
2785}
2786
2787static struct device_attribute ipr_ioa_state_attr = {
2788	.attr = {
2789		.name =		"online_state",
2790		.mode =		S_IRUGO | S_IWUSR,
2791	},
2792	.show = ipr_show_adapter_state,
2793	.store = ipr_store_adapter_state
2794};
2795
2796/**
2797 * ipr_store_reset_adapter - Reset the adapter
2798 * @dev:	device struct
2799 * @buf:	buffer
2800 * @count:	buffer size
2801 *
2802 * This function will reset the adapter.
2803 *
2804 * Return value:
2805 * 	count on success / other on failure
2806 **/
2807static ssize_t ipr_store_reset_adapter(struct device *dev,
2808				       struct device_attribute *attr,
2809				       const char *buf, size_t count)
2810{
2811	struct Scsi_Host *shost = class_to_shost(dev);
2812	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2813	unsigned long lock_flags;
2814	int result = count;
2815
2816	if (!capable(CAP_SYS_ADMIN))
2817		return -EACCES;
2818
2819	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2820	if (!ioa_cfg->in_reset_reload)
2821		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2822	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2823	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2824
2825	return result;
2826}
2827
2828static struct device_attribute ipr_ioa_reset_attr = {
2829	.attr = {
2830		.name =		"reset_host",
2831		.mode =		S_IWUSR,
2832	},
2833	.store = ipr_store_reset_adapter
2834};
2835
2836/**
2837 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2838 * @buf_len:		buffer length
2839 *
2840 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2841 * list to use for microcode download
2842 *
2843 * Return value:
2844 * 	pointer to sglist / NULL on failure
2845 **/
2846static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2847{
2848	int sg_size, order, bsize_elem, num_elem, i, j;
2849	struct ipr_sglist *sglist;
2850	struct scatterlist *scatterlist;
2851	struct page *page;
2852
2853	/* Get the minimum size per scatter/gather element */
2854	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2855
2856	/* Get the actual size per element */
2857	order = get_order(sg_size);
2858
2859	/* Determine the actual number of bytes per element */
2860	bsize_elem = PAGE_SIZE * (1 << order);
2861
2862	/* Determine the actual number of sg entries needed */
2863	if (buf_len % bsize_elem)
2864		num_elem = (buf_len / bsize_elem) + 1;
2865	else
2866		num_elem = buf_len / bsize_elem;
2867
2868	/* Allocate a scatter/gather list for the DMA */
2869	sglist = kzalloc(sizeof(struct ipr_sglist) +
2870			 (sizeof(struct scatterlist) * (num_elem - 1)),
2871			 GFP_KERNEL);
2872
2873	if (sglist == NULL) {
2874		ipr_trace;
2875		return NULL;
2876	}
2877
2878	scatterlist = sglist->scatterlist;
2879	sg_init_table(scatterlist, num_elem);
2880
2881	sglist->order = order;
2882	sglist->num_sg = num_elem;
2883
2884	/* Allocate a bunch of sg elements */
2885	for (i = 0; i < num_elem; i++) {
2886		page = alloc_pages(GFP_KERNEL, order);
2887		if (!page) {
2888			ipr_trace;
2889
2890			/* Free up what we already allocated */
2891			for (j = i - 1; j >= 0; j--)
2892				__free_pages(sg_page(&scatterlist[j]), order);
2893			kfree(sglist);
2894			return NULL;
2895		}
2896
2897		sg_set_page(&scatterlist[i], page, 0, 0);
2898	}
2899
2900	return sglist;
2901}
2902
2903/**
2904 * ipr_free_ucode_buffer - Frees a microcode download buffer
2905 * @p_dnld:		scatter/gather list pointer
2906 *
2907 * Free a DMA'able ucode download buffer previously allocated with
2908 * ipr_alloc_ucode_buffer
2909 *
2910 * Return value:
2911 * 	nothing
2912 **/
2913static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2914{
2915	int i;
2916
2917	for (i = 0; i < sglist->num_sg; i++)
2918		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
2919
2920	kfree(sglist);
2921}
2922
2923/**
2924 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2925 * @sglist:		scatter/gather list pointer
2926 * @buffer:		buffer pointer
2927 * @len:		buffer length
2928 *
2929 * Copy a microcode image from a user buffer into a buffer allocated by
2930 * ipr_alloc_ucode_buffer
2931 *
2932 * Return value:
2933 * 	0 on success / other on failure
2934 **/
2935static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2936				 u8 *buffer, u32 len)
2937{
2938	int bsize_elem, i, result = 0;
2939	struct scatterlist *scatterlist;
2940	void *kaddr;
2941
2942	/* Determine the actual number of bytes per element */
2943	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2944
2945	scatterlist = sglist->scatterlist;
2946
2947	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2948		struct page *page = sg_page(&scatterlist[i]);
2949
2950		kaddr = kmap(page);
2951		memcpy(kaddr, buffer, bsize_elem);
2952		kunmap(page);
2953
2954		scatterlist[i].length = bsize_elem;
2955
2956		if (result != 0) {
2957			ipr_trace;
2958			return result;
2959		}
2960	}
2961
2962	if (len % bsize_elem) {
2963		struct page *page = sg_page(&scatterlist[i]);
2964
2965		kaddr = kmap(page);
2966		memcpy(kaddr, buffer, len % bsize_elem);
2967		kunmap(page);
2968
2969		scatterlist[i].length = len % bsize_elem;
2970	}
2971
2972	sglist->buffer_len = len;
2973	return result;
2974}
2975
2976/**
2977 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2978 * @ipr_cmd:	ipr command struct
2979 * @sglist:		scatter/gather list
2980 *
2981 * Builds a microcode download IOA data list (IOADL).
2982 *
2983 **/
2984static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2985				  struct ipr_sglist *sglist)
2986{
2987	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2988	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2989	struct scatterlist *scatterlist = sglist->scatterlist;
2990	int i;
2991
2992	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2993	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2994	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2995	ioarcb->write_ioadl_len =
2996		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2997
2998	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2999		ioadl[i].flags_and_data_len =
3000			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3001		ioadl[i].address =
3002			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3003	}
3004
3005	ioadl[i-1].flags_and_data_len |=
3006		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3007}
3008
3009/**
3010 * ipr_update_ioa_ucode - Update IOA's microcode
3011 * @ioa_cfg:	ioa config struct
3012 * @sglist:		scatter/gather list
3013 *
3014 * Initiate an adapter reset to update the IOA's microcode
3015 *
3016 * Return value:
3017 * 	0 on success / -EIO on failure
3018 **/
3019static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3020				struct ipr_sglist *sglist)
3021{
3022	unsigned long lock_flags;
3023
3024	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3025	while(ioa_cfg->in_reset_reload) {
3026		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3027		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3028		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3029	}
3030
3031	if (ioa_cfg->ucode_sglist) {
3032		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3033		dev_err(&ioa_cfg->pdev->dev,
3034			"Microcode download already in progress\n");
3035		return -EIO;
3036	}
3037
3038	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3039					sglist->num_sg, DMA_TO_DEVICE);
3040
3041	if (!sglist->num_dma_sg) {
3042		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3043		dev_err(&ioa_cfg->pdev->dev,
3044			"Failed to map microcode download buffer!\n");
3045		return -EIO;
3046	}
3047
3048	ioa_cfg->ucode_sglist = sglist;
3049	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3050	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3051	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3052
3053	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3054	ioa_cfg->ucode_sglist = NULL;
3055	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3056	return 0;
3057}
3058
3059/**
3060 * ipr_store_update_fw - Update the firmware on the adapter
3061 * @class_dev:	device struct
3062 * @buf:	buffer
3063 * @count:	buffer size
3064 *
3065 * This function will update the firmware on the adapter.
3066 *
3067 * Return value:
3068 * 	count on success / other on failure
3069 **/
3070static ssize_t ipr_store_update_fw(struct device *dev,
3071				   struct device_attribute *attr,
3072				   const char *buf, size_t count)
3073{
3074	struct Scsi_Host *shost = class_to_shost(dev);
3075	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3076	struct ipr_ucode_image_header *image_hdr;
3077	const struct firmware *fw_entry;
3078	struct ipr_sglist *sglist;
3079	char fname[100];
3080	char *src;
3081	int len, result, dnld_size;
3082
3083	if (!capable(CAP_SYS_ADMIN))
3084		return -EACCES;
3085
3086	len = snprintf(fname, 99, "%s", buf);
3087	fname[len-1] = '\0';
3088
3089	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3090		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3091		return -EIO;
3092	}
3093
3094	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3095
3096	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3097	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3098	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3099		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3100		release_firmware(fw_entry);
3101		return -EINVAL;
3102	}
3103
3104	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3105	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3106	sglist = ipr_alloc_ucode_buffer(dnld_size);
3107
3108	if (!sglist) {
3109		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3110		release_firmware(fw_entry);
3111		return -ENOMEM;
3112	}
3113
3114	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3115
3116	if (result) {
3117		dev_err(&ioa_cfg->pdev->dev,
3118			"Microcode buffer copy to DMA buffer failed\n");
3119		goto out;
3120	}
3121
3122	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3123
3124	if (!result)
3125		result = count;
3126out:
3127	ipr_free_ucode_buffer(sglist);
3128	release_firmware(fw_entry);
3129	return result;
3130}
3131
3132static struct device_attribute ipr_update_fw_attr = {
3133	.attr = {
3134		.name =		"update_fw",
3135		.mode =		S_IWUSR,
3136	},
3137	.store = ipr_store_update_fw
3138};
3139
3140static struct device_attribute *ipr_ioa_attrs[] = {
3141	&ipr_fw_version_attr,
3142	&ipr_log_level_attr,
3143	&ipr_diagnostics_attr,
3144	&ipr_ioa_state_attr,
3145	&ipr_ioa_reset_attr,
3146	&ipr_update_fw_attr,
3147	&ipr_ioa_cache_attr,
3148	NULL,
3149};
3150
3151#ifdef CONFIG_SCSI_IPR_DUMP
3152/**
3153 * ipr_read_dump - Dump the adapter
3154 * @kobj:		kobject struct
3155 * @bin_attr:		bin_attribute struct
3156 * @buf:		buffer
3157 * @off:		offset
3158 * @count:		buffer size
3159 *
3160 * Return value:
3161 *	number of bytes printed to buffer
3162 **/
3163static ssize_t ipr_read_dump(struct kobject *kobj,
3164			     struct bin_attribute *bin_attr,
3165			     char *buf, loff_t off, size_t count)
3166{
3167	struct device *cdev = container_of(kobj, struct device, kobj);
3168	struct Scsi_Host *shost = class_to_shost(cdev);
3169	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3170	struct ipr_dump *dump;
3171	unsigned long lock_flags = 0;
3172	char *src;
3173	int len;
3174	size_t rc = count;
3175
3176	if (!capable(CAP_SYS_ADMIN))
3177		return -EACCES;
3178
3179	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3180	dump = ioa_cfg->dump;
3181
3182	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3183		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184		return 0;
3185	}
3186	kref_get(&dump->kref);
3187	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3188
3189	if (off > dump->driver_dump.hdr.len) {
3190		kref_put(&dump->kref, ipr_release_dump);
3191		return 0;
3192	}
3193
3194	if (off + count > dump->driver_dump.hdr.len) {
3195		count = dump->driver_dump.hdr.len - off;
3196		rc = count;
3197	}
3198
3199	if (count && off < sizeof(dump->driver_dump)) {
3200		if (off + count > sizeof(dump->driver_dump))
3201			len = sizeof(dump->driver_dump) - off;
3202		else
3203			len = count;
3204		src = (u8 *)&dump->driver_dump + off;
3205		memcpy(buf, src, len);
3206		buf += len;
3207		off += len;
3208		count -= len;
3209	}
3210
3211	off -= sizeof(dump->driver_dump);
3212
3213	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3214		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3215			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3216		else
3217			len = count;
3218		src = (u8 *)&dump->ioa_dump + off;
3219		memcpy(buf, src, len);
3220		buf += len;
3221		off += len;
3222		count -= len;
3223	}
3224
3225	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3226
3227	while (count) {
3228		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3229			len = PAGE_ALIGN(off) - off;
3230		else
3231			len = count;
3232		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3233		src += off & ~PAGE_MASK;
3234		memcpy(buf, src, len);
3235		buf += len;
3236		off += len;
3237		count -= len;
3238	}
3239
3240	kref_put(&dump->kref, ipr_release_dump);
3241	return rc;
3242}
3243
3244/**
3245 * ipr_alloc_dump - Prepare for adapter dump
3246 * @ioa_cfg:	ioa config struct
3247 *
3248 * Return value:
3249 *	0 on success / other on failure
3250 **/
3251static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3252{
3253	struct ipr_dump *dump;
3254	unsigned long lock_flags = 0;
3255
3256	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3257
3258	if (!dump) {
3259		ipr_err("Dump memory allocation failed\n");
3260		return -ENOMEM;
3261	}
3262
3263	kref_init(&dump->kref);
3264	dump->ioa_cfg = ioa_cfg;
3265
3266	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3267
3268	if (INACTIVE != ioa_cfg->sdt_state) {
3269		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270		kfree(dump);
3271		return 0;
3272	}
3273
3274	ioa_cfg->dump = dump;
3275	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3276	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3277		ioa_cfg->dump_taken = 1;
3278		schedule_work(&ioa_cfg->work_q);
3279	}
3280	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3281
3282	return 0;
3283}
3284
3285/**
3286 * ipr_free_dump - Free adapter dump memory
3287 * @ioa_cfg:	ioa config struct
3288 *
3289 * Return value:
3290 *	0 on success / other on failure
3291 **/
3292static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3293{
3294	struct ipr_dump *dump;
3295	unsigned long lock_flags = 0;
3296
3297	ENTER;
3298
3299	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300	dump = ioa_cfg->dump;
3301	if (!dump) {
3302		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303		return 0;
3304	}
3305
3306	ioa_cfg->dump = NULL;
3307	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308
3309	kref_put(&dump->kref, ipr_release_dump);
3310
3311	LEAVE;
3312	return 0;
3313}
3314
3315/**
3316 * ipr_write_dump - Setup dump state of adapter
3317 * @kobj:		kobject struct
3318 * @bin_attr:		bin_attribute struct
3319 * @buf:		buffer
3320 * @off:		offset
3321 * @count:		buffer size
3322 *
3323 * Return value:
3324 *	number of bytes printed to buffer
3325 **/
3326static ssize_t ipr_write_dump(struct kobject *kobj,
3327			      struct bin_attribute *bin_attr,
3328			      char *buf, loff_t off, size_t count)
3329{
3330	struct device *cdev = container_of(kobj, struct device, kobj);
3331	struct Scsi_Host *shost = class_to_shost(cdev);
3332	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3333	int rc;
3334
3335	if (!capable(CAP_SYS_ADMIN))
3336		return -EACCES;
3337
3338	if (buf[0] == '1')
3339		rc = ipr_alloc_dump(ioa_cfg);
3340	else if (buf[0] == '0')
3341		rc = ipr_free_dump(ioa_cfg);
3342	else
3343		return -EINVAL;
3344
3345	if (rc)
3346		return rc;
3347	else
3348		return count;
3349}
3350
3351static struct bin_attribute ipr_dump_attr = {
3352	.attr =	{
3353		.name = "dump",
3354		.mode = S_IRUSR | S_IWUSR,
3355	},
3356	.size = 0,
3357	.read = ipr_read_dump,
3358	.write = ipr_write_dump
3359};
3360#else
3361static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3362#endif
3363
3364/**
3365 * ipr_change_queue_depth - Change the device's queue depth
3366 * @sdev:	scsi device struct
3367 * @qdepth:	depth to set
3368 *
3369 * Return value:
3370 * 	actual depth set
3371 **/
3372static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3373{
3374	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3375	struct ipr_resource_entry *res;
3376	unsigned long lock_flags = 0;
3377
3378	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3379	res = (struct ipr_resource_entry *)sdev->hostdata;
3380
3381	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3382		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3383	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3384
3385	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3386	return sdev->queue_depth;
3387}
3388
3389/**
3390 * ipr_change_queue_type - Change the device's queue type
3391 * @dsev:		scsi device struct
3392 * @tag_type:	type of tags to use
3393 *
3394 * Return value:
3395 * 	actual queue type set
3396 **/
3397static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3398{
3399	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3400	struct ipr_resource_entry *res;
3401	unsigned long lock_flags = 0;
3402
3403	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404	res = (struct ipr_resource_entry *)sdev->hostdata;
3405
3406	if (res) {
3407		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3408			/*
3409			 * We don't bother quiescing the device here since the
3410			 * adapter firmware does it for us.
3411			 */
3412			scsi_set_tag_type(sdev, tag_type);
3413
3414			if (tag_type)
3415				scsi_activate_tcq(sdev, sdev->queue_depth);
3416			else
3417				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3418		} else
3419			tag_type = 0;
3420	} else
3421		tag_type = 0;
3422
3423	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3424	return tag_type;
3425}
3426
3427/**
3428 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3429 * @dev:	device struct
3430 * @buf:	buffer
3431 *
3432 * Return value:
3433 * 	number of bytes printed to buffer
3434 **/
3435static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3436{
3437	struct scsi_device *sdev = to_scsi_device(dev);
3438	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3439	struct ipr_resource_entry *res;
3440	unsigned long lock_flags = 0;
3441	ssize_t len = -ENXIO;
3442
3443	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3444	res = (struct ipr_resource_entry *)sdev->hostdata;
3445	if (res)
3446		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3447	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3448	return len;
3449}
3450
3451static struct device_attribute ipr_adapter_handle_attr = {
3452	.attr = {
3453		.name = 	"adapter_handle",
3454		.mode =		S_IRUSR,
3455	},
3456	.show = ipr_show_adapter_handle
3457};
3458
3459static struct device_attribute *ipr_dev_attrs[] = {
3460	&ipr_adapter_handle_attr,
3461	NULL,
3462};
3463
3464/**
3465 * ipr_biosparam - Return the HSC mapping
3466 * @sdev:			scsi device struct
3467 * @block_device:	block device pointer
3468 * @capacity:		capacity of the device
3469 * @parm:			Array containing returned HSC values.
3470 *
3471 * This function generates the HSC parms that fdisk uses.
3472 * We want to make sure we return something that places partitions
3473 * on 4k boundaries for best performance with the IOA.
3474 *
3475 * Return value:
3476 * 	0 on success
3477 **/
3478static int ipr_biosparam(struct scsi_device *sdev,
3479			 struct block_device *block_device,
3480			 sector_t capacity, int *parm)
3481{
3482	int heads, sectors;
3483	sector_t cylinders;
3484
3485	heads = 128;
3486	sectors = 32;
3487
3488	cylinders = capacity;
3489	sector_div(cylinders, (128 * 32));
3490
3491	/* return result */
3492	parm[0] = heads;
3493	parm[1] = sectors;
3494	parm[2] = cylinders;
3495
3496	return 0;
3497}
3498
3499/**
3500 * ipr_find_starget - Find target based on bus/target.
3501 * @starget:	scsi target struct
3502 *
3503 * Return value:
3504 * 	resource entry pointer if found / NULL if not found
3505 **/
3506static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3507{
3508	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3509	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3510	struct ipr_resource_entry *res;
3511
3512	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3513		if ((res->cfgte.res_addr.bus == starget->channel) &&
3514		    (res->cfgte.res_addr.target == starget->id) &&
3515		    (res->cfgte.res_addr.lun == 0)) {
3516			return res;
3517		}
3518	}
3519
3520	return NULL;
3521}
3522
3523static struct ata_port_info sata_port_info;
3524
3525/**
3526 * ipr_target_alloc - Prepare for commands to a SCSI target
3527 * @starget:	scsi target struct
3528 *
3529 * If the device is a SATA device, this function allocates an
3530 * ATA port with libata, else it does nothing.
3531 *
3532 * Return value:
3533 * 	0 on success / non-0 on failure
3534 **/
3535static int ipr_target_alloc(struct scsi_target *starget)
3536{
3537	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3538	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3539	struct ipr_sata_port *sata_port;
3540	struct ata_port *ap;
3541	struct ipr_resource_entry *res;
3542	unsigned long lock_flags;
3543
3544	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3545	res = ipr_find_starget(starget);
3546	starget->hostdata = NULL;
3547
3548	if (res && ipr_is_gata(res)) {
3549		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3550		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3551		if (!sata_port)
3552			return -ENOMEM;
3553
3554		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3555		if (ap) {
3556			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557			sata_port->ioa_cfg = ioa_cfg;
3558			sata_port->ap = ap;
3559			sata_port->res = res;
3560
3561			res->sata_port = sata_port;
3562			ap->private_data = sata_port;
3563			starget->hostdata = sata_port;
3564		} else {
3565			kfree(sata_port);
3566			return -ENOMEM;
3567		}
3568	}
3569	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570
3571	return 0;
3572}
3573
3574/**
3575 * ipr_target_destroy - Destroy a SCSI target
3576 * @starget:	scsi target struct
3577 *
3578 * If the device was a SATA device, this function frees the libata
3579 * ATA port, else it does nothing.
3580 *
3581 **/
3582static void ipr_target_destroy(struct scsi_target *starget)
3583{
3584	struct ipr_sata_port *sata_port = starget->hostdata;
3585
3586	if (sata_port) {
3587		starget->hostdata = NULL;
3588		ata_sas_port_destroy(sata_port->ap);
3589		kfree(sata_port);
3590	}
3591}
3592
3593/**
3594 * ipr_find_sdev - Find device based on bus/target/lun.
3595 * @sdev:	scsi device struct
3596 *
3597 * Return value:
3598 * 	resource entry pointer if found / NULL if not found
3599 **/
3600static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3601{
3602	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3603	struct ipr_resource_entry *res;
3604
3605	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3606		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3607		    (res->cfgte.res_addr.target == sdev->id) &&
3608		    (res->cfgte.res_addr.lun == sdev->lun))
3609			return res;
3610	}
3611
3612	return NULL;
3613}
3614
3615/**
3616 * ipr_slave_destroy - Unconfigure a SCSI device
3617 * @sdev:	scsi device struct
3618 *
3619 * Return value:
3620 * 	nothing
3621 **/
3622static void ipr_slave_destroy(struct scsi_device *sdev)
3623{
3624	struct ipr_resource_entry *res;
3625	struct ipr_ioa_cfg *ioa_cfg;
3626	unsigned long lock_flags = 0;
3627
3628	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3629
3630	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3631	res = (struct ipr_resource_entry *) sdev->hostdata;
3632	if (res) {
3633		if (res->sata_port)
3634			ata_port_disable(res->sata_port->ap);
3635		sdev->hostdata = NULL;
3636		res->sdev = NULL;
3637		res->sata_port = NULL;
3638	}
3639	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3640}
3641
3642/**
3643 * ipr_slave_configure - Configure a SCSI device
3644 * @sdev:	scsi device struct
3645 *
3646 * This function configures the specified scsi device.
3647 *
3648 * Return value:
3649 * 	0 on success
3650 **/
3651static int ipr_slave_configure(struct scsi_device *sdev)
3652{
3653	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3654	struct ipr_resource_entry *res;
3655	unsigned long lock_flags = 0;
3656
3657	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3658	res = sdev->hostdata;
3659	if (res) {
3660		if (ipr_is_af_dasd_device(res))
3661			sdev->type = TYPE_RAID;
3662		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3663			sdev->scsi_level = 4;
3664			sdev->no_uld_attach = 1;
3665		}
3666		if (ipr_is_vset_device(res)) {
3667			blk_queue_rq_timeout(sdev->request_queue,
3668					     IPR_VSET_RW_TIMEOUT);
3669			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3670		}
3671		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3672			sdev->allow_restart = 1;
3673		if (ipr_is_gata(res) && res->sata_port) {
3674			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3675			ata_sas_slave_configure(sdev, res->sata_port->ap);
3676		} else {
3677			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3678		}
3679	}
3680	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3681	return 0;
3682}
3683
3684/**
3685 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3686 * @sdev:	scsi device struct
3687 *
3688 * This function initializes an ATA port so that future commands
3689 * sent through queuecommand will work.
3690 *
3691 * Return value:
3692 * 	0 on success
3693 **/
3694static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3695{
3696	struct ipr_sata_port *sata_port = NULL;
3697	int rc = -ENXIO;
3698
3699	ENTER;
3700	if (sdev->sdev_target)
3701		sata_port = sdev->sdev_target->hostdata;
3702	if (sata_port)
3703		rc = ata_sas_port_init(sata_port->ap);
3704	if (rc)
3705		ipr_slave_destroy(sdev);
3706
3707	LEAVE;
3708	return rc;
3709}
3710
3711/**
3712 * ipr_slave_alloc - Prepare for commands to a device.
3713 * @sdev:	scsi device struct
3714 *
3715 * This function saves a pointer to the resource entry
3716 * in the scsi device struct if the device exists. We
3717 * can then use this pointer in ipr_queuecommand when
3718 * handling new commands.
3719 *
3720 * Return value:
3721 * 	0 on success / -ENXIO if device does not exist
3722 **/
3723static int ipr_slave_alloc(struct scsi_device *sdev)
3724{
3725	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3726	struct ipr_resource_entry *res;
3727	unsigned long lock_flags;
3728	int rc = -ENXIO;
3729
3730	sdev->hostdata = NULL;
3731
3732	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3733
3734	res = ipr_find_sdev(sdev);
3735	if (res) {
3736		res->sdev = sdev;
3737		res->add_to_ml = 0;
3738		res->in_erp = 0;
3739		sdev->hostdata = res;
3740		if (!ipr_is_naca_model(res))
3741			res->needs_sync_complete = 1;
3742		rc = 0;
3743		if (ipr_is_gata(res)) {
3744			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3745			return ipr_ata_slave_alloc(sdev);
3746		}
3747	}
3748
3749	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3750
3751	return rc;
3752}
3753
3754/**
3755 * ipr_eh_host_reset - Reset the host adapter
3756 * @scsi_cmd:	scsi command struct
3757 *
3758 * Return value:
3759 * 	SUCCESS / FAILED
3760 **/
3761static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3762{
3763	struct ipr_ioa_cfg *ioa_cfg;
3764	int rc;
3765
3766	ENTER;
3767	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3768
3769	dev_err(&ioa_cfg->pdev->dev,
3770		"Adapter being reset as a result of error recovery.\n");
3771
3772	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3773		ioa_cfg->sdt_state = GET_DUMP;
3774
3775	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3776
3777	LEAVE;
3778	return rc;
3779}
3780
3781static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3782{
3783	int rc;
3784
3785	spin_lock_irq(cmd->device->host->host_lock);
3786	rc = __ipr_eh_host_reset(cmd);
3787	spin_unlock_irq(cmd->device->host->host_lock);
3788
3789	return rc;
3790}
3791
3792/**
3793 * ipr_device_reset - Reset the device
3794 * @ioa_cfg:	ioa config struct
3795 * @res:		resource entry struct
3796 *
3797 * This function issues a device reset to the affected device.
3798 * If the device is a SCSI device, a LUN reset will be sent
3799 * to the device first. If that does not work, a target reset
3800 * will be sent. If the device is a SATA device, a PHY reset will
3801 * be sent.
3802 *
3803 * Return value:
3804 *	0 on success / non-zero on failure
3805 **/
3806static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3807			    struct ipr_resource_entry *res)
3808{
3809	struct ipr_cmnd *ipr_cmd;
3810	struct ipr_ioarcb *ioarcb;
3811	struct ipr_cmd_pkt *cmd_pkt;
3812	struct ipr_ioarcb_ata_regs *regs;
3813	u32 ioasc;
3814
3815	ENTER;
3816	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3817	ioarcb = &ipr_cmd->ioarcb;
3818	cmd_pkt = &ioarcb->cmd_pkt;
3819	regs = &ioarcb->add_data.u.regs;
3820
3821	ioarcb->res_handle = res->cfgte.res_handle;
3822	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3823	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3824	if (ipr_is_gata(res)) {
3825		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3826		ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3827		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3828	}
3829
3830	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3831	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3832	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3833	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3834		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3835		       sizeof(struct ipr_ioasa_gata));
3836
3837	LEAVE;
3838	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3839}
3840
3841/**
3842 * ipr_sata_reset - Reset the SATA port
3843 * @link:	SATA link to reset
3844 * @classes:	class of the attached device
3845 *
3846 * This function issues a SATA phy reset to the affected ATA link.
3847 *
3848 * Return value:
3849 *	0 on success / non-zero on failure
3850 **/
3851static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
3852				unsigned long deadline)
3853{
3854	struct ipr_sata_port *sata_port = link->ap->private_data;
3855	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3856	struct ipr_resource_entry *res;
3857	unsigned long lock_flags = 0;
3858	int rc = -ENXIO;
3859
3860	ENTER;
3861	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3862	while(ioa_cfg->in_reset_reload) {
3863		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3864		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3865		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3866	}
3867
3868	res = sata_port->res;
3869	if (res) {
3870		rc = ipr_device_reset(ioa_cfg, res);
3871		switch(res->cfgte.proto) {
3872		case IPR_PROTO_SATA:
3873		case IPR_PROTO_SAS_STP:
3874			*classes = ATA_DEV_ATA;
3875			break;
3876		case IPR_PROTO_SATA_ATAPI:
3877		case IPR_PROTO_SAS_STP_ATAPI:
3878			*classes = ATA_DEV_ATAPI;
3879			break;
3880		default:
3881			*classes = ATA_DEV_UNKNOWN;
3882			break;
3883		};
3884	}
3885
3886	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3887	LEAVE;
3888	return rc;
3889}
3890
3891/**
3892 * ipr_eh_dev_reset - Reset the device
3893 * @scsi_cmd:	scsi command struct
3894 *
3895 * This function issues a device reset to the affected device.
3896 * A LUN reset will be sent to the device first. If that does
3897 * not work, a target reset will be sent.
3898 *
3899 * Return value:
3900 *	SUCCESS / FAILED
3901 **/
3902static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3903{
3904	struct ipr_cmnd *ipr_cmd;
3905	struct ipr_ioa_cfg *ioa_cfg;
3906	struct ipr_resource_entry *res;
3907	struct ata_port *ap;
3908	int rc = 0;
3909
3910	ENTER;
3911	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3912	res = scsi_cmd->device->hostdata;
3913
3914	if (!res)
3915		return FAILED;
3916
3917	/*
3918	 * If we are currently going through reset/reload, return failed. This will force the
3919	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3920	 * reset to complete
3921	 */
3922	if (ioa_cfg->in_reset_reload)
3923		return FAILED;
3924	if (ioa_cfg->ioa_is_dead)
3925		return FAILED;
3926
3927	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3928		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3929			if (ipr_cmd->scsi_cmd)
3930				ipr_cmd->done = ipr_scsi_eh_done;
3931			if (ipr_cmd->qc)
3932				ipr_cmd->done = ipr_sata_eh_done;
3933			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3934				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3935				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3936			}
3937		}
3938	}
3939
3940	res->resetting_device = 1;
3941	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3942
3943	if (ipr_is_gata(res) && res->sata_port) {
3944		ap = res->sata_port->ap;
3945		spin_unlock_irq(scsi_cmd->device->host->host_lock);
3946		ata_std_error_handler(ap);
3947		spin_lock_irq(scsi_cmd->device->host->host_lock);
3948
3949		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3950			if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3951				rc = -EIO;
3952				break;
3953			}
3954		}
3955	} else
3956		rc = ipr_device_reset(ioa_cfg, res);
3957	res->resetting_device = 0;
3958
3959	LEAVE;
3960	return (rc ? FAILED : SUCCESS);
3961}
3962
3963static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3964{
3965	int rc;
3966
3967	spin_lock_irq(cmd->device->host->host_lock);
3968	rc = __ipr_eh_dev_reset(cmd);
3969	spin_unlock_irq(cmd->device->host->host_lock);
3970
3971	return rc;
3972}
3973
3974/**
3975 * ipr_bus_reset_done - Op done function for bus reset.
3976 * @ipr_cmd:	ipr command struct
3977 *
3978 * This function is the op done function for a bus reset
3979 *
3980 * Return value:
3981 * 	none
3982 **/
3983static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3984{
3985	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3986	struct ipr_resource_entry *res;
3987
3988	ENTER;
3989	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3990		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3991			    sizeof(res->cfgte.res_handle))) {
3992			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3993			break;
3994		}
3995	}
3996
3997	/*
3998	 * If abort has not completed, indicate the reset has, else call the
3999	 * abort's done function to wake the sleeping eh thread
4000	 */
4001	if (ipr_cmd->sibling->sibling)
4002		ipr_cmd->sibling->sibling = NULL;
4003	else
4004		ipr_cmd->sibling->done(ipr_cmd->sibling);
4005
4006	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4007	LEAVE;
4008}
4009
4010/**
4011 * ipr_abort_timeout - An abort task has timed out
4012 * @ipr_cmd:	ipr command struct
4013 *
4014 * This function handles when an abort task times out. If this
4015 * happens we issue a bus reset since we have resources tied
4016 * up that must be freed before returning to the midlayer.
4017 *
4018 * Return value:
4019 *	none
4020 **/
4021static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4022{
4023	struct ipr_cmnd *reset_cmd;
4024	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4025	struct ipr_cmd_pkt *cmd_pkt;
4026	unsigned long lock_flags = 0;
4027
4028	ENTER;
4029	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4030	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4031		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4032		return;
4033	}
4034
4035	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4036	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4037	ipr_cmd->sibling = reset_cmd;
4038	reset_cmd->sibling = ipr_cmd;
4039	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4040	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4041	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4042	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4043	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4044
4045	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4046	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4047	LEAVE;
4048}
4049
4050/**
4051 * ipr_cancel_op - Cancel specified op
4052 * @scsi_cmd:	scsi command struct
4053 *
4054 * This function cancels specified op.
4055 *
4056 * Return value:
4057 *	SUCCESS / FAILED
4058 **/
4059static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4060{
4061	struct ipr_cmnd *ipr_cmd;
4062	struct ipr_ioa_cfg *ioa_cfg;
4063	struct ipr_resource_entry *res;
4064	struct ipr_cmd_pkt *cmd_pkt;
4065	u32 ioasc;
4066	int op_found = 0;
4067
4068	ENTER;
4069	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4070	res = scsi_cmd->device->hostdata;
4071
4072	/* If we are currently going through reset/reload, return failed.
4073	 * This will force the mid-layer to call ipr_eh_host_reset,
4074	 * which will then go to sleep and wait for the reset to complete
4075	 */
4076	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4077		return FAILED;
4078	if (!res || !ipr_is_gscsi(res))
4079		return FAILED;
4080
4081	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4082		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4083			ipr_cmd->done = ipr_scsi_eh_done;
4084			op_found = 1;
4085			break;
4086		}
4087	}
4088
4089	if (!op_found)
4090		return SUCCESS;
4091
4092	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4093	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4094	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4095	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4096	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4097	ipr_cmd->u.sdev = scsi_cmd->device;
4098
4099	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4100		    scsi_cmd->cmnd[0]);
4101	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4102	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4103
4104	/*
4105	 * If the abort task timed out and we sent a bus reset, we will get
4106	 * one the following responses to the abort
4107	 */
4108	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4109		ioasc = 0;
4110		ipr_trace;
4111	}
4112
4113	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4114	if (!ipr_is_naca_model(res))
4115		res->needs_sync_complete = 1;
4116
4117	LEAVE;
4118	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4119}
4120
4121/**
4122 * ipr_eh_abort - Abort a single op
4123 * @scsi_cmd:	scsi command struct
4124 *
4125 * Return value:
4126 * 	SUCCESS / FAILED
4127 **/
4128static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4129{
4130	unsigned long flags;
4131	int rc;
4132
4133	ENTER;
4134
4135	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4136	rc = ipr_cancel_op(scsi_cmd);
4137	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4138
4139	LEAVE;
4140	return rc;
4141}
4142
4143/**
4144 * ipr_handle_other_interrupt - Handle "other" interrupts
4145 * @ioa_cfg:	ioa config struct
4146 * @int_reg:	interrupt register
4147 *
4148 * Return value:
4149 * 	IRQ_NONE / IRQ_HANDLED
4150 **/
4151static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4152					      volatile u32 int_reg)
4153{
4154	irqreturn_t rc = IRQ_HANDLED;
4155
4156	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4157		/* Mask the interrupt */
4158		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4159
4160		/* Clear the interrupt */
4161		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4162		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4163
4164		list_del(&ioa_cfg->reset_cmd->queue);
4165		del_timer(&ioa_cfg->reset_cmd->timer);
4166		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4167	} else {
4168		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4169			ioa_cfg->ioa_unit_checked = 1;
4170		else
4171			dev_err(&ioa_cfg->pdev->dev,
4172				"Permanent IOA failure. 0x%08X\n", int_reg);
4173
4174		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4175			ioa_cfg->sdt_state = GET_DUMP;
4176
4177		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4178		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4179	}
4180
4181	return rc;
4182}
4183
4184/**
4185 * ipr_isr - Interrupt service routine
4186 * @irq:	irq number
4187 * @devp:	pointer to ioa config struct
4188 *
4189 * Return value:
4190 * 	IRQ_NONE / IRQ_HANDLED
4191 **/
4192static irqreturn_t ipr_isr(int irq, void *devp)
4193{
4194	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4195	unsigned long lock_flags = 0;
4196	volatile u32 int_reg, int_mask_reg;
4197	u32 ioasc;
4198	u16 cmd_index;
4199	struct ipr_cmnd *ipr_cmd;
4200	irqreturn_t rc = IRQ_NONE;
4201
4202	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4203
4204	/* If interrupts are disabled, ignore the interrupt */
4205	if (!ioa_cfg->allow_interrupts) {
4206		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4207		return IRQ_NONE;
4208	}
4209
4210	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4211	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4212
4213	/* If an interrupt on the adapter did not occur, ignore it */
4214	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4215		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4216		return IRQ_NONE;
4217	}
4218
4219	while (1) {
4220		ipr_cmd = NULL;
4221
4222		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4223		       ioa_cfg->toggle_bit) {
4224
4225			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4226				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4227
4228			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4229				ioa_cfg->errors_logged++;
4230				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4231
4232				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4233					ioa_cfg->sdt_state = GET_DUMP;
4234
4235				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4236				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237				return IRQ_HANDLED;
4238			}
4239
4240			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4241
4242			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4243
4244			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4245
4246			list_del(&ipr_cmd->queue);
4247			del_timer(&ipr_cmd->timer);
4248			ipr_cmd->done(ipr_cmd);
4249
4250			rc = IRQ_HANDLED;
4251
4252			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4253				ioa_cfg->hrrq_curr++;
4254			} else {
4255				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4256				ioa_cfg->toggle_bit ^= 1u;
4257			}
4258		}
4259
4260		if (ipr_cmd != NULL) {
4261			/* Clear the PCI interrupt */
4262			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4263			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4264		} else
4265			break;
4266	}
4267
4268	if (unlikely(rc == IRQ_NONE))
4269		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4270
4271	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4272	return rc;
4273}
4274
4275/**
4276 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4277 * @ioa_cfg:	ioa config struct
4278 * @ipr_cmd:	ipr command struct
4279 *
4280 * Return value:
4281 * 	0 on success / -1 on failure
4282 **/
4283static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4284			   struct ipr_cmnd *ipr_cmd)
4285{
4286	int i, nseg;
4287	struct scatterlist *sg;
4288	u32 length;
4289	u32 ioadl_flags = 0;
4290	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4291	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4292	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4293
4294	length = scsi_bufflen(scsi_cmd);
4295	if (!length)
4296		return 0;
4297
4298	nseg = scsi_dma_map(scsi_cmd);
4299	if (nseg < 0) {
4300		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4301		return -1;
4302	}
4303
4304	ipr_cmd->dma_use_sg = nseg;
4305
4306	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4307		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4308		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4309		ioarcb->write_data_transfer_length = cpu_to_be32(length);
4310		ioarcb->write_ioadl_len =
4311			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4312	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4313		ioadl_flags = IPR_IOADL_FLAGS_READ;
4314		ioarcb->read_data_transfer_length = cpu_to_be32(length);
4315		ioarcb->read_ioadl_len =
4316			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4317	}
4318
4319	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4320		ioadl = ioarcb->add_data.u.ioadl;
4321		ioarcb->write_ioadl_addr =
4322			cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4323				    offsetof(struct ipr_ioarcb, add_data));
4324		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4325	}
4326
4327	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4328		ioadl[i].flags_and_data_len =
4329			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4330		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4331	}
4332
4333	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4334	return 0;
4335}
4336
4337/**
4338 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4339 * @scsi_cmd:	scsi command struct
4340 *
4341 * Return value:
4342 * 	task attributes
4343 **/
4344static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4345{
4346	u8 tag[2];
4347	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4348
4349	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4350		switch (tag[0]) {
4351		case MSG_SIMPLE_TAG:
4352			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4353			break;
4354		case MSG_HEAD_TAG:
4355			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4356			break;
4357		case MSG_ORDERED_TAG:
4358			rc = IPR_FLAGS_LO_ORDERED_TASK;
4359			break;
4360		};
4361	}
4362
4363	return rc;
4364}
4365
4366/**
4367 * ipr_erp_done - Process completion of ERP for a device
4368 * @ipr_cmd:		ipr command struct
4369 *
4370 * This function copies the sense buffer into the scsi_cmd
4371 * struct and pushes the scsi_done function.
4372 *
4373 * Return value:
4374 * 	nothing
4375 **/
4376static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4377{
4378	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4379	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4380	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4381	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4382
4383	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4384		scsi_cmd->result |= (DID_ERROR << 16);
4385		scmd_printk(KERN_ERR, scsi_cmd,
4386			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4387	} else {
4388		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4389		       SCSI_SENSE_BUFFERSIZE);
4390	}
4391
4392	if (res) {
4393		if (!ipr_is_naca_model(res))
4394			res->needs_sync_complete = 1;
4395		res->in_erp = 0;
4396	}
4397	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4398	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4399	scsi_cmd->scsi_done(scsi_cmd);
4400}
4401
4402/**
4403 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4404 * @ipr_cmd:	ipr command struct
4405 *
4406 * Return value:
4407 * 	none
4408 **/
4409static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4410{
4411	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4412	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4413	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4414
4415	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4416	ioarcb->write_data_transfer_length = 0;
4417	ioarcb->read_data_transfer_length = 0;
4418	ioarcb->write_ioadl_len = 0;
4419	ioarcb->read_ioadl_len = 0;
4420	ioasa->ioasc = 0;
4421	ioasa->residual_data_len = 0;
4422	ioarcb->write_ioadl_addr =
4423		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4424	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4425}
4426
4427/**
4428 * ipr_erp_request_sense - Send request sense to a device
4429 * @ipr_cmd:	ipr command struct
4430 *
4431 * This function sends a request sense to a device as a result
4432 * of a check condition.
4433 *
4434 * Return value:
4435 * 	nothing
4436 **/
4437static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4438{
4439	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4440	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4441
4442	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4443		ipr_erp_done(ipr_cmd);
4444		return;
4445	}
4446
4447	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4448
4449	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4450	cmd_pkt->cdb[0] = REQUEST_SENSE;
4451	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4452	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4453	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4454	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4455
4456	ipr_cmd->ioadl[0].flags_and_data_len =
4457		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4458	ipr_cmd->ioadl[0].address =
4459		cpu_to_be32(ipr_cmd->sense_buffer_dma);
4460
4461	ipr_cmd->ioarcb.read_ioadl_len =
4462		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4463	ipr_cmd->ioarcb.read_data_transfer_length =
4464		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4465
4466	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4467		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4468}
4469
4470/**
4471 * ipr_erp_cancel_all - Send cancel all to a device
4472 * @ipr_cmd:	ipr command struct
4473 *
4474 * This function sends a cancel all to a device to clear the
4475 * queue. If we are running TCQ on the device, QERR is set to 1,
4476 * which means all outstanding ops have been dropped on the floor.
4477 * Cancel all will return them to us.
4478 *
4479 * Return value:
4480 * 	nothing
4481 **/
4482static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4483{
4484	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4485	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4486	struct ipr_cmd_pkt *cmd_pkt;
4487
4488	res->in_erp = 1;
4489
4490	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4491
4492	if (!scsi_get_tag_type(scsi_cmd->device)) {
4493		ipr_erp_request_sense(ipr_cmd);
4494		return;
4495	}
4496
4497	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4498	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4499	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4500
4501	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4502		   IPR_CANCEL_ALL_TIMEOUT);
4503}
4504
4505/**
4506 * ipr_dump_ioasa - Dump contents of IOASA
4507 * @ioa_cfg:	ioa config struct
4508 * @ipr_cmd:	ipr command struct
4509 * @res:		resource entry struct
4510 *
4511 * This function is invoked by the interrupt handler when ops
4512 * fail. It will log the IOASA if appropriate. Only called
4513 * for GPDD ops.
4514 *
4515 * Return value:
4516 * 	none
4517 **/
4518static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4519			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4520{
4521	int i;
4522	u16 data_len;
4523	u32 ioasc, fd_ioasc;
4524	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4525	__be32 *ioasa_data = (__be32 *)ioasa;
4526	int error_index;
4527
4528	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4529	fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4530
4531	if (0 == ioasc)
4532		return;
4533
4534	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4535		return;
4536
4537	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4538		error_index = ipr_get_error(fd_ioasc);
4539	else
4540		error_index = ipr_get_error(ioasc);
4541
4542	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4543		/* Don't log an error if the IOA already logged one */
4544		if (ioasa->ilid != 0)
4545			return;
4546
4547		if (!ipr_is_gscsi(res))
4548			return;
4549
4550		if (ipr_error_table[error_index].log_ioasa == 0)
4551			return;
4552	}
4553
4554	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4555
4556	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4557		data_len = sizeof(struct ipr_ioasa);
4558	else
4559		data_len = be16_to_cpu(ioasa->ret_stat_len);
4560
4561	ipr_err("IOASA Dump:\n");
4562
4563	for (i = 0; i < data_len / 4; i += 4) {
4564		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4565			be32_to_cpu(ioasa_data[i]),
4566			be32_to_cpu(ioasa_data[i+1]),
4567			be32_to_cpu(ioasa_data[i+2]),
4568			be32_to_cpu(ioasa_data[i+3]));
4569	}
4570}
4571
4572/**
4573 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4574 * @ioasa:		IOASA
4575 * @sense_buf:	sense data buffer
4576 *
4577 * Return value:
4578 * 	none
4579 **/
4580static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4581{
4582	u32 failing_lba;
4583	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4584	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4585	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4586	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4587
4588	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4589
4590	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4591		return;
4592
4593	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4594
4595	if (ipr_is_vset_device(res) &&
4596	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4597	    ioasa->u.vset.failing_lba_hi != 0) {
4598		sense_buf[0] = 0x72;
4599		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4600		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4601		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4602
4603		sense_buf[7] = 12;
4604		sense_buf[8] = 0;
4605		sense_buf[9] = 0x0A;
4606		sense_buf[10] = 0x80;
4607
4608		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4609
4610		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4611		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4612		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4613		sense_buf[15] = failing_lba & 0x000000ff;
4614
4615		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4616
4617		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4618		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4619		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4620		sense_buf[19] = failing_lba & 0x000000ff;
4621	} else {
4622		sense_buf[0] = 0x70;
4623		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4624		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4625		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4626
4627		/* Illegal request */
4628		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4629		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4630			sense_buf[7] = 10;	/* additional length */
4631
4632			/* IOARCB was in error */
4633			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4634				sense_buf[15] = 0xC0;
4635			else	/* Parameter data was invalid */
4636				sense_buf[15] = 0x80;
4637
4638			sense_buf[16] =
4639			    ((IPR_FIELD_POINTER_MASK &
4640			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4641			sense_buf[17] =
4642			    (IPR_FIELD_POINTER_MASK &
4643			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4644		} else {
4645			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4646				if (ipr_is_vset_device(res))
4647					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4648				else
4649					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4650
4651				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4652				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4653				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4654				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4655				sense_buf[6] = failing_lba & 0x000000ff;
4656			}
4657
4658			sense_buf[7] = 6;	/* additional length */
4659		}
4660	}
4661}
4662
4663/**
4664 * ipr_get_autosense - Copy autosense data to sense buffer
4665 * @ipr_cmd:	ipr command struct
4666 *
4667 * This function copies the autosense buffer to the buffer
4668 * in the scsi_cmd, if there is autosense available.
4669 *
4670 * Return value:
4671 *	1 if autosense was available / 0 if not
4672 **/
4673static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4674{
4675	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4676
4677	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4678		return 0;
4679
4680	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4681	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4682		   SCSI_SENSE_BUFFERSIZE));
4683	return 1;
4684}
4685
4686/**
4687 * ipr_erp_start - Process an error response for a SCSI op
4688 * @ioa_cfg:	ioa config struct
4689 * @ipr_cmd:	ipr command struct
4690 *
4691 * This function determines whether or not to initiate ERP
4692 * on the affected device.
4693 *
4694 * Return value:
4695 * 	nothing
4696 **/
4697static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4698			      struct ipr_cmnd *ipr_cmd)
4699{
4700	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4701	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4702	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4703	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4704
4705	if (!res) {
4706		ipr_scsi_eh_done(ipr_cmd);
4707		return;
4708	}
4709
4710	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4711		ipr_gen_sense(ipr_cmd);
4712
4713	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4714
4715	switch (masked_ioasc) {
4716	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4717		if (ipr_is_naca_model(res))
4718			scsi_cmd->result |= (DID_ABORT << 16);
4719		else
4720			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4721		break;
4722	case IPR_IOASC_IR_RESOURCE_HANDLE:
4723	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4724		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4725		break;
4726	case IPR_IOASC_HW_SEL_TIMEOUT:
4727		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4728		if (!ipr_is_naca_model(res))
4729			res->needs_sync_complete = 1;
4730		break;
4731	case IPR_IOASC_SYNC_REQUIRED:
4732		if (!res->in_erp)
4733			res->needs_sync_complete = 1;
4734		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4735		break;
4736	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4737	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4738		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4739		break;
4740	case IPR_IOASC_BUS_WAS_RESET:
4741	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4742		/*
4743		 * Report the bus reset and ask for a retry. The device
4744		 * will give CC/UA the next command.
4745		 */
4746		if (!res->resetting_device)
4747			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4748		scsi_cmd->result |= (DID_ERROR << 16);
4749		if (!ipr_is_naca_model(res))
4750			res->needs_sync_complete = 1;
4751		break;
4752	case IPR_IOASC_HW_DEV_BUS_STATUS:
4753		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4754		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4755			if (!ipr_get_autosense(ipr_cmd)) {
4756				if (!ipr_is_naca_model(res)) {
4757					ipr_erp_cancel_all(ipr_cmd);
4758					return;
4759				}
4760			}
4761		}
4762		if (!ipr_is_naca_model(res))
4763			res->needs_sync_complete = 1;
4764		break;
4765	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4766		break;
4767	default:
4768		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4769			scsi_cmd->result |= (DID_ERROR << 16);
4770		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4771			res->needs_sync_complete = 1;
4772		break;
4773	}
4774
4775	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4776	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4777	scsi_cmd->scsi_done(scsi_cmd);
4778}
4779
4780/**
4781 * ipr_scsi_done - mid-layer done function
4782 * @ipr_cmd:	ipr command struct
4783 *
4784 * This function is invoked by the interrupt handler for
4785 * ops generated by the SCSI mid-layer
4786 *
4787 * Return value:
4788 * 	none
4789 **/
4790static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4791{
4792	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4793	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4794	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4795
4796	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4797
4798	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4799		scsi_dma_unmap(ipr_cmd->scsi_cmd);
4800		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4801		scsi_cmd->scsi_done(scsi_cmd);
4802	} else
4803		ipr_erp_start(ioa_cfg, ipr_cmd);
4804}
4805
4806/**
4807 * ipr_queuecommand - Queue a mid-layer request
4808 * @scsi_cmd:	scsi command struct
4809 * @done:		done function
4810 *
4811 * This function queues a request generated by the mid-layer.
4812 *
4813 * Return value:
4814 *	0 on success
4815 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4816 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4817 **/
4818static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4819			    void (*done) (struct scsi_cmnd *))
4820{
4821	struct ipr_ioa_cfg *ioa_cfg;
4822	struct ipr_resource_entry *res;
4823	struct ipr_ioarcb *ioarcb;
4824	struct ipr_cmnd *ipr_cmd;
4825	int rc = 0;
4826
4827	scsi_cmd->scsi_done = done;
4828	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4829	res = scsi_cmd->device->hostdata;
4830	scsi_cmd->result = (DID_OK << 16);
4831
4832	/*
4833	 * We are currently blocking all devices due to a host reset
4834	 * We have told the host to stop giving us new requests, but
4835	 * ERP ops don't count. FIXME
4836	 */
4837	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4838		return SCSI_MLQUEUE_HOST_BUSY;
4839
4840	/*
4841	 * FIXME - Create scsi_set_host_offline interface
4842	 *  and the ioa_is_dead check can be removed
4843	 */
4844	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4845		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4846		scsi_cmd->result = (DID_NO_CONNECT << 16);
4847		scsi_cmd->scsi_done(scsi_cmd);
4848		return 0;
4849	}
4850
4851	if (ipr_is_gata(res) && res->sata_port)
4852		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4853
4854	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4855	ioarcb = &ipr_cmd->ioarcb;
4856	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4857
4858	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4859	ipr_cmd->scsi_cmd = scsi_cmd;
4860	ioarcb->res_handle = res->cfgte.res_handle;
4861	ipr_cmd->done = ipr_scsi_done;
4862	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4863
4864	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4865		if (scsi_cmd->underflow == 0)
4866			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4867
4868		if (res->needs_sync_complete) {
4869			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4870			res->needs_sync_complete = 0;
4871		}
4872
4873		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4874		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4875		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4876		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4877	}
4878
4879	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4880	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4881		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4882
4883	if (likely(rc == 0))
4884		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4885
4886	if (likely(rc == 0)) {
4887		mb();
4888		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4889		       ioa_cfg->regs.ioarrin_reg);
4890	} else {
4891		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4892		 return SCSI_MLQUEUE_HOST_BUSY;
4893	}
4894
4895	return 0;
4896}
4897
4898/**
4899 * ipr_ioctl - IOCTL handler
4900 * @sdev:	scsi device struct
4901 * @cmd:	IOCTL cmd
4902 * @arg:	IOCTL arg
4903 *
4904 * Return value:
4905 * 	0 on success / other on failure
4906 **/
4907static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4908{
4909	struct ipr_resource_entry *res;
4910
4911	res = (struct ipr_resource_entry *)sdev->hostdata;
4912	if (res && ipr_is_gata(res)) {
4913		if (cmd == HDIO_GET_IDENTITY)
4914			return -ENOTTY;
4915		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
4916	}
4917
4918	return -EINVAL;
4919}
4920
4921/**
4922 * ipr_info - Get information about the card/driver
4923 * @scsi_host:	scsi host struct
4924 *
4925 * Return value:
4926 * 	pointer to buffer with description string
4927 **/
4928static const char * ipr_ioa_info(struct Scsi_Host *host)
4929{
4930	static char buffer[512];
4931	struct ipr_ioa_cfg *ioa_cfg;
4932	unsigned long lock_flags = 0;
4933
4934	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4935
4936	spin_lock_irqsave(host->host_lock, lock_flags);
4937	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4938	spin_unlock_irqrestore(host->host_lock, lock_flags);
4939
4940	return buffer;
4941}
4942
4943static struct scsi_host_template driver_template = {
4944	.module = THIS_MODULE,
4945	.name = "IPR",
4946	.info = ipr_ioa_info,
4947	.ioctl = ipr_ioctl,
4948	.queuecommand = ipr_queuecommand,
4949	.eh_abort_handler = ipr_eh_abort,
4950	.eh_device_reset_handler = ipr_eh_dev_reset,
4951	.eh_host_reset_handler = ipr_eh_host_reset,
4952	.slave_alloc = ipr_slave_alloc,
4953	.slave_configure = ipr_slave_configure,
4954	.slave_destroy = ipr_slave_destroy,
4955	.target_alloc = ipr_target_alloc,
4956	.target_destroy = ipr_target_destroy,
4957	.change_queue_depth = ipr_change_queue_depth,
4958	.change_queue_type = ipr_change_queue_type,
4959	.bios_param = ipr_biosparam,
4960	.can_queue = IPR_MAX_COMMANDS,
4961	.this_id = -1,
4962	.sg_tablesize = IPR_MAX_SGLIST,
4963	.max_sectors = IPR_IOA_MAX_SECTORS,
4964	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4965	.use_clustering = ENABLE_CLUSTERING,
4966	.shost_attrs = ipr_ioa_attrs,
4967	.sdev_attrs = ipr_dev_attrs,
4968	.proc_name = IPR_NAME
4969};
4970
4971/**
4972 * ipr_ata_phy_reset - libata phy_reset handler
4973 * @ap:		ata port to reset
4974 *
4975 **/
4976static void ipr_ata_phy_reset(struct ata_port *ap)
4977{
4978	unsigned long flags;
4979	struct ipr_sata_port *sata_port = ap->private_data;
4980	struct ipr_resource_entry *res = sata_port->res;
4981	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4982	int rc;
4983
4984	ENTER;
4985	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4986	while(ioa_cfg->in_reset_reload) {
4987		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4988		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4989		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4990	}
4991
4992	if (!ioa_cfg->allow_cmds)
4993		goto out_unlock;
4994
4995	rc = ipr_device_reset(ioa_cfg, res);
4996
4997	if (rc) {
4998		ata_port_disable(ap);
4999		goto out_unlock;
5000	}
5001
5002	switch(res->cfgte.proto) {
5003	case IPR_PROTO_SATA:
5004	case IPR_PROTO_SAS_STP:
5005		ap->link.device[0].class = ATA_DEV_ATA;
5006		break;
5007	case IPR_PROTO_SATA_ATAPI:
5008	case IPR_PROTO_SAS_STP_ATAPI:
5009		ap->link.device[0].class = ATA_DEV_ATAPI;
5010		break;
5011	default:
5012		ap->link.device[0].class = ATA_DEV_UNKNOWN;
5013		ata_port_disable(ap);
5014		break;
5015	};
5016
5017out_unlock:
5018	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5019	LEAVE;
5020}
5021
5022/**
5023 * ipr_ata_post_internal - Cleanup after an internal command
5024 * @qc:	ATA queued command
5025 *
5026 * Return value:
5027 * 	none
5028 **/
5029static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5030{
5031	struct ipr_sata_port *sata_port = qc->ap->private_data;
5032	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5033	struct ipr_cmnd *ipr_cmd;
5034	unsigned long flags;
5035
5036	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5037	while(ioa_cfg->in_reset_reload) {
5038		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5039		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5040		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5041	}
5042
5043	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5044		if (ipr_cmd->qc == qc) {
5045			ipr_device_reset(ioa_cfg, sata_port->res);
5046			break;
5047		}
5048	}
5049	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5050}
5051
5052/**
5053 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5054 * @regs:	destination
5055 * @tf:	source ATA taskfile
5056 *
5057 * Return value:
5058 * 	none
5059 **/
5060static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5061			     struct ata_taskfile *tf)
5062{
5063	regs->feature = tf->feature;
5064	regs->nsect = tf->nsect;
5065	regs->lbal = tf->lbal;
5066	regs->lbam = tf->lbam;
5067	regs->lbah = tf->lbah;
5068	regs->device = tf->device;
5069	regs->command = tf->command;
5070	regs->hob_feature = tf->hob_feature;
5071	regs->hob_nsect = tf->hob_nsect;
5072	regs->hob_lbal = tf->hob_lbal;
5073	regs->hob_lbam = tf->hob_lbam;
5074	regs->hob_lbah = tf->hob_lbah;
5075	regs->ctl = tf->ctl;
5076}
5077
5078/**
5079 * ipr_sata_done - done function for SATA commands
5080 * @ipr_cmd:	ipr command struct
5081 *
5082 * This function is invoked by the interrupt handler for
5083 * ops generated by the SCSI mid-layer to SATA devices
5084 *
5085 * Return value:
5086 * 	none
5087 **/
5088static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5089{
5090	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5091	struct ata_queued_cmd *qc = ipr_cmd->qc;
5092	struct ipr_sata_port *sata_port = qc->ap->private_data;
5093	struct ipr_resource_entry *res = sata_port->res;
5094	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5095
5096	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5097	       sizeof(struct ipr_ioasa_gata));
5098	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5099
5100	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5101		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5102					 res->cfgte.res_addr.target);
5103
5104	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5105		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5106	else
5107		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5108	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5109	ata_qc_complete(qc);
5110}
5111
5112/**
5113 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5114 * @ipr_cmd:	ipr command struct
5115 * @qc:		ATA queued command
5116 *
5117 **/
5118static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5119				struct ata_queued_cmd *qc)
5120{
5121	u32 ioadl_flags = 0;
5122	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5123	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5124	struct ipr_ioadl_desc *last_ioadl = NULL;
5125	int len = qc->nbytes;
5126	struct scatterlist *sg;
5127	unsigned int si;
5128
5129	if (len == 0)
5130		return;
5131
5132	if (qc->dma_dir == DMA_TO_DEVICE) {
5133		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5134		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5135		ioarcb->write_data_transfer_length = cpu_to_be32(len);
5136		ioarcb->write_ioadl_len =
5137			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5138	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5139		ioadl_flags = IPR_IOADL_FLAGS_READ;
5140		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5141		ioarcb->read_ioadl_len =
5142			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5143	}
5144
5145	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5146		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5147		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5148
5149		last_ioadl = ioadl;
5150		ioadl++;
5151	}
5152
5153	if (likely(last_ioadl))
5154		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5155}
5156
5157/**
5158 * ipr_qc_issue - Issue a SATA qc to a device
5159 * @qc:	queued command
5160 *
5161 * Return value:
5162 * 	0 if success
5163 **/
5164static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5165{
5166	struct ata_port *ap = qc->ap;
5167	struct ipr_sata_port *sata_port = ap->private_data;
5168	struct ipr_resource_entry *res = sata_port->res;
5169	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5170	struct ipr_cmnd *ipr_cmd;
5171	struct ipr_ioarcb *ioarcb;
5172	struct ipr_ioarcb_ata_regs *regs;
5173
5174	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5175		return AC_ERR_SYSTEM;
5176
5177	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5178	ioarcb = &ipr_cmd->ioarcb;
5179	regs = &ioarcb->add_data.u.regs;
5180
5181	memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5182	ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5183
5184	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5185	ipr_cmd->qc = qc;
5186	ipr_cmd->done = ipr_sata_done;
5187	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5188	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5189	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5190	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5191	ipr_cmd->dma_use_sg = qc->n_elem;
5192
5193	ipr_build_ata_ioadl(ipr_cmd, qc);
5194	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5195	ipr_copy_sata_tf(regs, &qc->tf);
5196	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5197	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5198
5199	switch (qc->tf.protocol) {
5200	case ATA_PROT_NODATA:
5201	case ATA_PROT_PIO:
5202		break;
5203
5204	case ATA_PROT_DMA:
5205		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5206		break;
5207
5208	case ATAPI_PROT_PIO:
5209	case ATAPI_PROT_NODATA:
5210		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5211		break;
5212
5213	case ATAPI_PROT_DMA:
5214		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5215		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5216		break;
5217
5218	default:
5219		WARN_ON(1);
5220		return AC_ERR_INVALID;
5221	}
5222
5223	mb();
5224	writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5225	       ioa_cfg->regs.ioarrin_reg);
5226	return 0;
5227}
5228
5229/**
5230 * ipr_qc_fill_rtf - Read result TF
5231 * @qc: ATA queued command
5232 *
5233 * Return value:
5234 * 	true
5235 **/
5236static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
5237{
5238	struct ipr_sata_port *sata_port = qc->ap->private_data;
5239	struct ipr_ioasa_gata *g = &sata_port->ioasa;
5240	struct ata_taskfile *tf = &qc->result_tf;
5241
5242	tf->feature = g->error;
5243	tf->nsect = g->nsect;
5244	tf->lbal = g->lbal;
5245	tf->lbam = g->lbam;
5246	tf->lbah = g->lbah;
5247	tf->device = g->device;
5248	tf->command = g->status;
5249	tf->hob_nsect = g->hob_nsect;
5250	tf->hob_lbal = g->hob_lbal;
5251	tf->hob_lbam = g->hob_lbam;
5252	tf->hob_lbah = g->hob_lbah;
5253	tf->ctl = g->alt_status;
5254
5255	return true;
5256}
5257
5258static struct ata_port_operations ipr_sata_ops = {
5259	.phy_reset = ipr_ata_phy_reset,
5260	.hardreset = ipr_sata_reset,
5261	.post_internal_cmd = ipr_ata_post_internal,
5262	.qc_prep = ata_noop_qc_prep,
5263	.qc_issue = ipr_qc_issue,
5264	.qc_fill_rtf = ipr_qc_fill_rtf,
5265	.port_start = ata_sas_port_start,
5266	.port_stop = ata_sas_port_stop
5267};
5268
5269static struct ata_port_info sata_port_info = {
5270	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5271	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5272	.pio_mask	= 0x10, /* pio4 */
5273	.mwdma_mask = 0x07,
5274	.udma_mask	= 0x7f, /* udma0-6 */
5275	.port_ops	= &ipr_sata_ops
5276};
5277
5278#ifdef CONFIG_PPC_PSERIES
5279static const u16 ipr_blocked_processors[] = {
5280	PV_NORTHSTAR,
5281	PV_PULSAR,
5282	PV_POWER4,
5283	PV_ICESTAR,
5284	PV_SSTAR,
5285	PV_POWER4p,
5286	PV_630,
5287	PV_630p
5288};
5289
5290/**
5291 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5292 * @ioa_cfg:	ioa cfg struct
5293 *
5294 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5295 * certain pSeries hardware. This function determines if the given
5296 * adapter is in one of these confgurations or not.
5297 *
5298 * Return value:
5299 * 	1 if adapter is not supported / 0 if adapter is supported
5300 **/
5301static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5302{
5303	int i;
5304
5305	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
5306		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5307			if (__is_processor(ipr_blocked_processors[i]))
5308				return 1;
5309		}
5310	}
5311	return 0;
5312}
5313#else
5314#define ipr_invalid_adapter(ioa_cfg) 0
5315#endif
5316
5317/**
5318 * ipr_ioa_bringdown_done - IOA bring down completion.
5319 * @ipr_cmd:	ipr command struct
5320 *
5321 * This function processes the completion of an adapter bring down.
5322 * It wakes any reset sleepers.
5323 *
5324 * Return value:
5325 * 	IPR_RC_JOB_RETURN
5326 **/
5327static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5328{
5329	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5330
5331	ENTER;
5332	ioa_cfg->in_reset_reload = 0;
5333	ioa_cfg->reset_retries = 0;
5334	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5335	wake_up_all(&ioa_cfg->reset_wait_q);
5336
5337	spin_unlock_irq(ioa_cfg->host->host_lock);
5338	scsi_unblock_requests(ioa_cfg->host);
5339	spin_lock_irq(ioa_cfg->host->host_lock);
5340	LEAVE;
5341
5342	return IPR_RC_JOB_RETURN;
5343}
5344
5345/**
5346 * ipr_ioa_reset_done - IOA reset completion.
5347 * @ipr_cmd:	ipr command struct
5348 *
5349 * This function processes the completion of an adapter reset.
5350 * It schedules any necessary mid-layer add/removes and
5351 * wakes any reset sleepers.
5352 *
5353 * Return value:
5354 * 	IPR_RC_JOB_RETURN
5355 **/
5356static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5357{
5358	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5359	struct ipr_resource_entry *res;
5360	struct ipr_hostrcb *hostrcb, *temp;
5361	int i = 0;
5362
5363	ENTER;
5364	ioa_cfg->in_reset_reload = 0;
5365	ioa_cfg->allow_cmds = 1;
5366	ioa_cfg->reset_cmd = NULL;
5367	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5368
5369	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5370		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5371			ipr_trace;
5372			break;
5373		}
5374	}
5375	schedule_work(&ioa_cfg->work_q);
5376
5377	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5378		list_del(&hostrcb->queue);
5379		if (i++ < IPR_NUM_LOG_HCAMS)
5380			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5381		else
5382			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5383	}
5384
5385	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5386	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5387
5388	ioa_cfg->reset_retries = 0;
5389	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5390	wake_up_all(&ioa_cfg->reset_wait_q);
5391
5392	spin_unlock(ioa_cfg->host->host_lock);
5393	scsi_unblock_requests(ioa_cfg->host);
5394	spin_lock(ioa_cfg->host->host_lock);
5395
5396	if (!ioa_cfg->allow_cmds)
5397		scsi_block_requests(ioa_cfg->host);
5398
5399	LEAVE;
5400	return IPR_RC_JOB_RETURN;
5401}
5402
5403/**
5404 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5405 * @supported_dev:	supported device struct
5406 * @vpids:			vendor product id struct
5407 *
5408 * Return value:
5409 * 	none
5410 **/
5411static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5412				 struct ipr_std_inq_vpids *vpids)
5413{
5414	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5415	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5416	supported_dev->num_records = 1;
5417	supported_dev->data_length =
5418		cpu_to_be16(sizeof(struct ipr_supported_device));
5419	supported_dev->reserved = 0;
5420}
5421
5422/**
5423 * ipr_set_supported_devs - Send Set Supported Devices for a device
5424 * @ipr_cmd:	ipr command struct
5425 *
5426 * This function send a Set Supported Devices to the adapter
5427 *
5428 * Return value:
5429 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5430 **/
5431static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5432{
5433	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5434	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5435	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5436	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5437	struct ipr_resource_entry *res = ipr_cmd->u.res;
5438
5439	ipr_cmd->job_step = ipr_ioa_reset_done;
5440
5441	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5442		if (!ipr_is_scsi_disk(res))
5443			continue;
5444
5445		ipr_cmd->u.res = res;
5446		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5447
5448		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5449		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5450		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5451
5452		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5453		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5454		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5455
5456		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5457							sizeof(struct ipr_supported_device));
5458		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5459					     offsetof(struct ipr_misc_cbs, supp_dev));
5460		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5461		ioarcb->write_data_transfer_length =
5462			cpu_to_be32(sizeof(struct ipr_supported_device));
5463
5464		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5465			   IPR_SET_SUP_DEVICE_TIMEOUT);
5466
5467		ipr_cmd->job_step = ipr_set_supported_devs;
5468		return IPR_RC_JOB_RETURN;
5469	}
5470
5471	return IPR_RC_JOB_CONTINUE;
5472}
5473
5474/**
5475 * ipr_setup_write_cache - Disable write cache if needed
5476 * @ipr_cmd:	ipr command struct
5477 *
5478 * This function sets up adapters write cache to desired setting
5479 *
5480 * Return value:
5481 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5482 **/
5483static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5484{
5485	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5486
5487	ipr_cmd->job_step = ipr_set_supported_devs;
5488	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5489				    struct ipr_resource_entry, queue);
5490
5491	if (ioa_cfg->cache_state != CACHE_DISABLED)
5492		return IPR_RC_JOB_CONTINUE;
5493
5494	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5495	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5496	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5497	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5498
5499	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5500
5501	return IPR_RC_JOB_RETURN;
5502}
5503
5504/**
5505 * ipr_get_mode_page - Locate specified mode page
5506 * @mode_pages:	mode page buffer
5507 * @page_code:	page code to find
5508 * @len:		minimum required length for mode page
5509 *
5510 * Return value:
5511 * 	pointer to mode page / NULL on failure
5512 **/
5513static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5514			       u32 page_code, u32 len)
5515{
5516	struct ipr_mode_page_hdr *mode_hdr;
5517	u32 page_length;
5518	u32 length;
5519
5520	if (!mode_pages || (mode_pages->hdr.length == 0))
5521		return NULL;
5522
5523	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5524	mode_hdr = (struct ipr_mode_page_hdr *)
5525		(mode_pages->data + mode_pages->hdr.block_desc_len);
5526
5527	while (length) {
5528		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5529			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5530				return mode_hdr;
5531			break;
5532		} else {
5533			page_length = (sizeof(struct ipr_mode_page_hdr) +
5534				       mode_hdr->page_length);
5535			length -= page_length;
5536			mode_hdr = (struct ipr_mode_page_hdr *)
5537				((unsigned long)mode_hdr + page_length);
5538		}
5539	}
5540	return NULL;
5541}
5542
5543/**
5544 * ipr_check_term_power - Check for term power errors
5545 * @ioa_cfg:	ioa config struct
5546 * @mode_pages:	IOAFP mode pages buffer
5547 *
5548 * Check the IOAFP's mode page 28 for term power errors
5549 *
5550 * Return value:
5551 * 	nothing
5552 **/
5553static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5554				 struct ipr_mode_pages *mode_pages)
5555{
5556	int i;
5557	int entry_length;
5558	struct ipr_dev_bus_entry *bus;
5559	struct ipr_mode_page28 *mode_page;
5560
5561	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5562				      sizeof(struct ipr_mode_page28));
5563
5564	entry_length = mode_page->entry_length;
5565
5566	bus = mode_page->bus;
5567
5568	for (i = 0; i < mode_page->num_entries; i++) {
5569		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5570			dev_err(&ioa_cfg->pdev->dev,
5571				"Term power is absent on scsi bus %d\n",
5572				bus->res_addr.bus);
5573		}
5574
5575		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5576	}
5577}
5578
5579/**
5580 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5581 * @ioa_cfg:	ioa config struct
5582 *
5583 * Looks through the config table checking for SES devices. If
5584 * the SES device is in the SES table indicating a maximum SCSI
5585 * bus speed, the speed is limited for the bus.
5586 *
5587 * Return value:
5588 * 	none
5589 **/
5590static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5591{
5592	u32 max_xfer_rate;
5593	int i;
5594
5595	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5596		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5597						       ioa_cfg->bus_attr[i].bus_width);
5598
5599		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5600			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5601	}
5602}
5603
5604/**
5605 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5606 * @ioa_cfg:	ioa config struct
5607 * @mode_pages:	mode page 28 buffer
5608 *
5609 * Updates mode page 28 based on driver configuration
5610 *
5611 * Return value:
5612 * 	none
5613 **/
5614static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5615					  	struct ipr_mode_pages *mode_pages)
5616{
5617	int i, entry_length;
5618	struct ipr_dev_bus_entry *bus;
5619	struct ipr_bus_attributes *bus_attr;
5620	struct ipr_mode_page28 *mode_page;
5621
5622	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5623				      sizeof(struct ipr_mode_page28));
5624
5625	entry_length = mode_page->entry_length;
5626
5627	/* Loop for each device bus entry */
5628	for (i = 0, bus = mode_page->bus;
5629	     i < mode_page->num_entries;
5630	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5631		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5632			dev_err(&ioa_cfg->pdev->dev,
5633				"Invalid resource address reported: 0x%08X\n",
5634				IPR_GET_PHYS_LOC(bus->res_addr));
5635			continue;
5636		}
5637
5638		bus_attr = &ioa_cfg->bus_attr[i];
5639		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5640		bus->bus_width = bus_attr->bus_width;
5641		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5642		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5643		if (bus_attr->qas_enabled)
5644			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5645		else
5646			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5647	}
5648}
5649
5650/**
5651 * ipr_build_mode_select - Build a mode select command
5652 * @ipr_cmd:	ipr command struct
5653 * @res_handle:	resource handle to send command to
5654 * @parm:		Byte 2 of Mode Sense command
5655 * @dma_addr:	DMA buffer address
5656 * @xfer_len:	data transfer length
5657 *
5658 * Return value:
5659 * 	none
5660 **/
5661static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5662				  __be32 res_handle, u8 parm, u32 dma_addr,
5663				  u8 xfer_len)
5664{
5665	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5666	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5667
5668	ioarcb->res_handle = res_handle;
5669	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5670	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5671	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5672	ioarcb->cmd_pkt.cdb[1] = parm;
5673	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5674
5675	ioadl->flags_and_data_len =
5676		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5677	ioadl->address = cpu_to_be32(dma_addr);
5678	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5679	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5680}
5681
5682/**
5683 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5684 * @ipr_cmd:	ipr command struct
5685 *
5686 * This function sets up the SCSI bus attributes and sends
5687 * a Mode Select for Page 28 to activate them.
5688 *
5689 * Return value:
5690 * 	IPR_RC_JOB_RETURN
5691 **/
5692static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5693{
5694	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5695	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5696	int length;
5697
5698	ENTER;
5699	ipr_scsi_bus_speed_limit(ioa_cfg);
5700	ipr_check_term_power(ioa_cfg, mode_pages);
5701	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5702	length = mode_pages->hdr.length + 1;
5703	mode_pages->hdr.length = 0;
5704
5705	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5706			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5707			      length);
5708
5709	ipr_cmd->job_step = ipr_setup_write_cache;
5710	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5711
5712	LEAVE;
5713	return IPR_RC_JOB_RETURN;
5714}
5715
5716/**
5717 * ipr_build_mode_sense - Builds a mode sense command
5718 * @ipr_cmd:	ipr command struct
5719 * @res:		resource entry struct
5720 * @parm:		Byte 2 of mode sense command
5721 * @dma_addr:	DMA address of mode sense buffer
5722 * @xfer_len:	Size of DMA buffer
5723 *
5724 * Return value:
5725 * 	none
5726 **/
5727static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5728				 __be32 res_handle,
5729				 u8 parm, u32 dma_addr, u8 xfer_len)
5730{
5731	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5732	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5733
5734	ioarcb->res_handle = res_handle;
5735	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5736	ioarcb->cmd_pkt.cdb[2] = parm;
5737	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5738	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5739
5740	ioadl->flags_and_data_len =
5741		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5742	ioadl->address = cpu_to_be32(dma_addr);
5743	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5744	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5745}
5746
5747/**
5748 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5749 * @ipr_cmd:	ipr command struct
5750 *
5751 * This function handles the failure of an IOA bringup command.
5752 *
5753 * Return value:
5754 * 	IPR_RC_JOB_RETURN
5755 **/
5756static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5757{
5758	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5759	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5760
5761	dev_err(&ioa_cfg->pdev->dev,
5762		"0x%02X failed with IOASC: 0x%08X\n",
5763		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5764
5765	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5766	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5767	return IPR_RC_JOB_RETURN;
5768}
5769
5770/**
5771 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5772 * @ipr_cmd:	ipr command struct
5773 *
5774 * This function handles the failure of a Mode Sense to the IOAFP.
5775 * Some adapters do not handle all mode pages.
5776 *
5777 * Return value:
5778 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5779 **/
5780static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5781{
5782	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5783
5784	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5785		ipr_cmd->job_step = ipr_setup_write_cache;
5786		return IPR_RC_JOB_CONTINUE;
5787	}
5788
5789	return ipr_reset_cmd_failed(ipr_cmd);
5790}
5791
5792/**
5793 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5794 * @ipr_cmd:	ipr command struct
5795 *
5796 * This function send a Page 28 mode sense to the IOA to
5797 * retrieve SCSI bus attributes.
5798 *
5799 * Return value:
5800 * 	IPR_RC_JOB_RETURN
5801 **/
5802static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5803{
5804	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5805
5806	ENTER;
5807	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5808			     0x28, ioa_cfg->vpd_cbs_dma +
5809			     offsetof(struct ipr_misc_cbs, mode_pages),
5810			     sizeof(struct ipr_mode_pages));
5811
5812	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5813	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5814
5815	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5816
5817	LEAVE;
5818	return IPR_RC_JOB_RETURN;
5819}
5820
5821/**
5822 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
5823 * @ipr_cmd:	ipr command struct
5824 *
5825 * This function enables dual IOA RAID support if possible.
5826 *
5827 * Return value:
5828 * 	IPR_RC_JOB_RETURN
5829 **/
5830static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
5831{
5832	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5833	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5834	struct ipr_mode_page24 *mode_page;
5835	int length;
5836
5837	ENTER;
5838	mode_page = ipr_get_mode_page(mode_pages, 0x24,
5839				      sizeof(struct ipr_mode_page24));
5840
5841	if (mode_page)
5842		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
5843
5844	length = mode_pages->hdr.length + 1;
5845	mode_pages->hdr.length = 0;
5846
5847	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5848			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5849			      length);
5850
5851	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5852	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5853
5854	LEAVE;
5855	return IPR_RC_JOB_RETURN;
5856}
5857
5858/**
5859 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
5860 * @ipr_cmd:	ipr command struct
5861 *
5862 * This function handles the failure of a Mode Sense to the IOAFP.
5863 * Some adapters do not handle all mode pages.
5864 *
5865 * Return value:
5866 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5867 **/
5868static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
5869{
5870	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5871
5872	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5873		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5874		return IPR_RC_JOB_CONTINUE;
5875	}
5876
5877	return ipr_reset_cmd_failed(ipr_cmd);
5878}
5879
5880/**
5881 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
5882 * @ipr_cmd:	ipr command struct
5883 *
5884 * This function send a mode sense to the IOA to retrieve
5885 * the IOA Advanced Function Control mode page.
5886 *
5887 * Return value:
5888 * 	IPR_RC_JOB_RETURN
5889 **/
5890static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
5891{
5892	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5893
5894	ENTER;
5895	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5896			     0x24, ioa_cfg->vpd_cbs_dma +
5897			     offsetof(struct ipr_misc_cbs, mode_pages),
5898			     sizeof(struct ipr_mode_pages));
5899
5900	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
5901	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
5902
5903	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5904
5905	LEAVE;
5906	return IPR_RC_JOB_RETURN;
5907}
5908
5909/**
5910 * ipr_init_res_table - Initialize the resource table
5911 * @ipr_cmd:	ipr command struct
5912 *
5913 * This function looks through the existing resource table, comparing
5914 * it with the config table. This function will take care of old/new
5915 * devices and schedule adding/removing them from the mid-layer
5916 * as appropriate.
5917 *
5918 * Return value:
5919 * 	IPR_RC_JOB_CONTINUE
5920 **/
5921static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5922{
5923	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5924	struct ipr_resource_entry *res, *temp;
5925	struct ipr_config_table_entry *cfgte;
5926	int found, i;
5927	LIST_HEAD(old_res);
5928
5929	ENTER;
5930	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5931		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5932
5933	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5934		list_move_tail(&res->queue, &old_res);
5935
5936	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5937		cfgte = &ioa_cfg->cfg_table->dev[i];
5938		found = 0;
5939
5940		list_for_each_entry_safe(res, temp, &old_res, queue) {
5941			if (!memcmp(&res->cfgte.res_addr,
5942				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5943				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5944				found = 1;
5945				break;
5946			}
5947		}
5948
5949		if (!found) {
5950			if (list_empty(&ioa_cfg->free_res_q)) {
5951				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5952				break;
5953			}
5954
5955			found = 1;
5956			res = list_entry(ioa_cfg->free_res_q.next,
5957					 struct ipr_resource_entry, queue);
5958			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5959			ipr_init_res_entry(res);
5960			res->add_to_ml = 1;
5961		}
5962
5963		if (found)
5964			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5965	}
5966
5967	list_for_each_entry_safe(res, temp, &old_res, queue) {
5968		if (res->sdev) {
5969			res->del_from_ml = 1;
5970			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5971			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5972		} else {
5973			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5974		}
5975	}
5976
5977	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
5978		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
5979	else
5980		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5981
5982	LEAVE;
5983	return IPR_RC_JOB_CONTINUE;
5984}
5985
5986/**
5987 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5988 * @ipr_cmd:	ipr command struct
5989 *
5990 * This function sends a Query IOA Configuration command
5991 * to the adapter to retrieve the IOA configuration table.
5992 *
5993 * Return value:
5994 * 	IPR_RC_JOB_RETURN
5995 **/
5996static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5997{
5998	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5999	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6000	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6001	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6002	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6003
6004	ENTER;
6005	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6006		ioa_cfg->dual_raid = 1;
6007	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6008		 ucode_vpd->major_release, ucode_vpd->card_type,
6009		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6010	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6011	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6012
6013	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6014	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
6015	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
6016
6017	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6018	ioarcb->read_data_transfer_length =
6019		cpu_to_be32(sizeof(struct ipr_config_table));
6020
6021	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6022	ioadl->flags_and_data_len =
6023		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6024
6025	ipr_cmd->job_step = ipr_init_res_table;
6026
6027	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6028
6029	LEAVE;
6030	return IPR_RC_JOB_RETURN;
6031}
6032
6033/**
6034 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6035 * @ipr_cmd:	ipr command struct
6036 *
6037 * This utility function sends an inquiry to the adapter.
6038 *
6039 * Return value:
6040 * 	none
6041 **/
6042static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6043			      u32 dma_addr, u8 xfer_len)
6044{
6045	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6046	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6047
6048	ENTER;
6049	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6050	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6051
6052	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6053	ioarcb->cmd_pkt.cdb[1] = flags;
6054	ioarcb->cmd_pkt.cdb[2] = page;
6055	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6056
6057	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6058	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6059
6060	ioadl->address = cpu_to_be32(dma_addr);
6061	ioadl->flags_and_data_len =
6062		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6063
6064	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6065	LEAVE;
6066}
6067
6068/**
6069 * ipr_inquiry_page_supported - Is the given inquiry page supported
6070 * @page0:		inquiry page 0 buffer
6071 * @page:		page code.
6072 *
6073 * This function determines if the specified inquiry page is supported.
6074 *
6075 * Return value:
6076 *	1 if page is supported / 0 if not
6077 **/
6078static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6079{
6080	int i;
6081
6082	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6083		if (page0->page[i] == page)
6084			return 1;
6085
6086	return 0;
6087}
6088
6089/**
6090 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6091 * @ipr_cmd:	ipr command struct
6092 *
6093 * This function sends a Page 0xD0 inquiry to the adapter
6094 * to retrieve adapter capabilities.
6095 *
6096 * Return value:
6097 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6098 **/
6099static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6100{
6101	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6102	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6103	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6104
6105	ENTER;
6106	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6107	memset(cap, 0, sizeof(*cap));
6108
6109	if (ipr_inquiry_page_supported(page0, 0xD0)) {
6110		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6111				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6112				  sizeof(struct ipr_inquiry_cap));
6113		return IPR_RC_JOB_RETURN;
6114	}
6115
6116	LEAVE;
6117	return IPR_RC_JOB_CONTINUE;
6118}
6119
6120/**
6121 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6122 * @ipr_cmd:	ipr command struct
6123 *
6124 * This function sends a Page 3 inquiry to the adapter
6125 * to retrieve software VPD information.
6126 *
6127 * Return value:
6128 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6129 **/
6130static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6131{
6132	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6133	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6134
6135	ENTER;
6136
6137	if (!ipr_inquiry_page_supported(page0, 1))
6138		ioa_cfg->cache_state = CACHE_NONE;
6139
6140	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6141
6142	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6143			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6144			  sizeof(struct ipr_inquiry_page3));
6145
6146	LEAVE;
6147	return IPR_RC_JOB_RETURN;
6148}
6149
6150/**
6151 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6152 * @ipr_cmd:	ipr command struct
6153 *
6154 * This function sends a Page 0 inquiry to the adapter
6155 * to retrieve supported inquiry pages.
6156 *
6157 * Return value:
6158 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6159 **/
6160static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6161{
6162	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6163	char type[5];
6164
6165	ENTER;
6166
6167	/* Grab the type out of the VPD and store it away */
6168	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6169	type[4] = '\0';
6170	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6171
6172	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6173
6174	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6175			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6176			  sizeof(struct ipr_inquiry_page0));
6177
6178	LEAVE;
6179	return IPR_RC_JOB_RETURN;
6180}
6181
6182/**
6183 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6184 * @ipr_cmd:	ipr command struct
6185 *
6186 * This function sends a standard inquiry to the adapter.
6187 *
6188 * Return value:
6189 * 	IPR_RC_JOB_RETURN
6190 **/
6191static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6192{
6193	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6194
6195	ENTER;
6196	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6197
6198	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6199			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6200			  sizeof(struct ipr_ioa_vpd));
6201
6202	LEAVE;
6203	return IPR_RC_JOB_RETURN;
6204}
6205
6206/**
6207 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6208 * @ipr_cmd:	ipr command struct
6209 *
6210 * This function send an Identify Host Request Response Queue
6211 * command to establish the HRRQ with the adapter.
6212 *
6213 * Return value:
6214 * 	IPR_RC_JOB_RETURN
6215 **/
6216static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6217{
6218	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6219	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6220
6221	ENTER;
6222	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6223
6224	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6225	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6226
6227	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6228	ioarcb->cmd_pkt.cdb[2] =
6229		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6230	ioarcb->cmd_pkt.cdb[3] =
6231		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6232	ioarcb->cmd_pkt.cdb[4] =
6233		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6234	ioarcb->cmd_pkt.cdb[5] =
6235		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6236	ioarcb->cmd_pkt.cdb[7] =
6237		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6238	ioarcb->cmd_pkt.cdb[8] =
6239		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6240
6241	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6242
6243	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6244
6245	LEAVE;
6246	return IPR_RC_JOB_RETURN;
6247}
6248
6249/**
6250 * ipr_reset_timer_done - Adapter reset timer function
6251 * @ipr_cmd:	ipr command struct
6252 *
6253 * Description: This function is used in adapter reset processing
6254 * for timing events. If the reset_cmd pointer in the IOA
6255 * config struct is not this adapter's we are doing nested
6256 * resets and fail_all_ops will take care of freeing the
6257 * command block.
6258 *
6259 * Return value:
6260 * 	none
6261 **/
6262static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6263{
6264	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6265	unsigned long lock_flags = 0;
6266
6267	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6268
6269	if (ioa_cfg->reset_cmd == ipr_cmd) {
6270		list_del(&ipr_cmd->queue);
6271		ipr_cmd->done(ipr_cmd);
6272	}
6273
6274	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6275}
6276
6277/**
6278 * ipr_reset_start_timer - Start a timer for adapter reset job
6279 * @ipr_cmd:	ipr command struct
6280 * @timeout:	timeout value
6281 *
6282 * Description: This function is used in adapter reset processing
6283 * for timing events. If the reset_cmd pointer in the IOA
6284 * config struct is not this adapter's we are doing nested
6285 * resets and fail_all_ops will take care of freeing the
6286 * command block.
6287 *
6288 * Return value:
6289 * 	none
6290 **/
6291static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6292				  unsigned long timeout)
6293{
6294	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6295	ipr_cmd->done = ipr_reset_ioa_job;
6296
6297	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6298	ipr_cmd->timer.expires = jiffies + timeout;
6299	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6300	add_timer(&ipr_cmd->timer);
6301}
6302
6303/**
6304 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6305 * @ioa_cfg:	ioa cfg struct
6306 *
6307 * Return value:
6308 * 	nothing
6309 **/
6310static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6311{
6312	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6313
6314	/* Initialize Host RRQ pointers */
6315	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6316	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6317	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6318	ioa_cfg->toggle_bit = 1;
6319
6320	/* Zero out config table */
6321	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6322}
6323
6324/**
6325 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6326 * @ipr_cmd:	ipr command struct
6327 *
6328 * This function reinitializes some control blocks and
6329 * enables destructive diagnostics on the adapter.
6330 *
6331 * Return value:
6332 * 	IPR_RC_JOB_RETURN
6333 **/
6334static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6335{
6336	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6337	volatile u32 int_reg;
6338
6339	ENTER;
6340	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6341	ipr_init_ioa_mem(ioa_cfg);
6342
6343	ioa_cfg->allow_interrupts = 1;
6344	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6345
6346	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6347		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6348		       ioa_cfg->regs.clr_interrupt_mask_reg);
6349		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6350		return IPR_RC_JOB_CONTINUE;
6351	}
6352
6353	/* Enable destructive diagnostics on IOA */
6354	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6355
6356	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6357	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6358
6359	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6360
6361	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6362	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6363	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6364	ipr_cmd->done = ipr_reset_ioa_job;
6365	add_timer(&ipr_cmd->timer);
6366	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6367
6368	LEAVE;
6369	return IPR_RC_JOB_RETURN;
6370}
6371
6372/**
6373 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6374 * @ipr_cmd:	ipr command struct
6375 *
6376 * This function is invoked when an adapter dump has run out
6377 * of processing time.
6378 *
6379 * Return value:
6380 * 	IPR_RC_JOB_CONTINUE
6381 **/
6382static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6383{
6384	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6385
6386	if (ioa_cfg->sdt_state == GET_DUMP)
6387		ioa_cfg->sdt_state = ABORT_DUMP;
6388
6389	ipr_cmd->job_step = ipr_reset_alert;
6390
6391	return IPR_RC_JOB_CONTINUE;
6392}
6393
6394/**
6395 * ipr_unit_check_no_data - Log a unit check/no data error log
6396 * @ioa_cfg:		ioa config struct
6397 *
6398 * Logs an error indicating the adapter unit checked, but for some
6399 * reason, we were unable to fetch the unit check buffer.
6400 *
6401 * Return value:
6402 * 	nothing
6403 **/
6404static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6405{
6406	ioa_cfg->errors_logged++;
6407	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6408}
6409
6410/**
6411 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6412 * @ioa_cfg:		ioa config struct
6413 *
6414 * Fetches the unit check buffer from the adapter by clocking the data
6415 * through the mailbox register.
6416 *
6417 * Return value:
6418 * 	nothing
6419 **/
6420static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6421{
6422	unsigned long mailbox;
6423	struct ipr_hostrcb *hostrcb;
6424	struct ipr_uc_sdt sdt;
6425	int rc, length;
6426	u32 ioasc;
6427
6428	mailbox = readl(ioa_cfg->ioa_mailbox);
6429
6430	if (!ipr_sdt_is_fmt2(mailbox)) {
6431		ipr_unit_check_no_data(ioa_cfg);
6432		return;
6433	}
6434
6435	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6436	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6437					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6438
6439	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6440	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6441		ipr_unit_check_no_data(ioa_cfg);
6442		return;
6443	}
6444
6445	/* Find length of the first sdt entry (UC buffer) */
6446	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6447		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6448
6449	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6450			     struct ipr_hostrcb, queue);
6451	list_del(&hostrcb->queue);
6452	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6453
6454	rc = ipr_get_ldump_data_section(ioa_cfg,
6455					be32_to_cpu(sdt.entry[0].bar_str_offset),
6456					(__be32 *)&hostrcb->hcam,
6457					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6458
6459	if (!rc) {
6460		ipr_handle_log_data(ioa_cfg, hostrcb);
6461		ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6462		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6463		    ioa_cfg->sdt_state == GET_DUMP)
6464			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6465	} else
6466		ipr_unit_check_no_data(ioa_cfg);
6467
6468	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6469}
6470
6471/**
6472 * ipr_reset_restore_cfg_space - Restore PCI config space.
6473 * @ipr_cmd:	ipr command struct
6474 *
6475 * Description: This function restores the saved PCI config space of
6476 * the adapter, fails all outstanding ops back to the callers, and
6477 * fetches the dump/unit check if applicable to this reset.
6478 *
6479 * Return value:
6480 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6481 **/
6482static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6483{
6484	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6485	int rc;
6486
6487	ENTER;
6488	rc = pci_restore_state(ioa_cfg->pdev);
6489
6490	if (rc != PCIBIOS_SUCCESSFUL) {
6491		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6492		return IPR_RC_JOB_CONTINUE;
6493	}
6494
6495	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6496		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6497		return IPR_RC_JOB_CONTINUE;
6498	}
6499
6500	ipr_fail_all_ops(ioa_cfg);
6501
6502	if (ioa_cfg->ioa_unit_checked) {
6503		ioa_cfg->ioa_unit_checked = 0;
6504		ipr_get_unit_check_buffer(ioa_cfg);
6505		ipr_cmd->job_step = ipr_reset_alert;
6506		ipr_reset_start_timer(ipr_cmd, 0);
6507		return IPR_RC_JOB_RETURN;
6508	}
6509
6510	if (ioa_cfg->in_ioa_bringdown) {
6511		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6512	} else {
6513		ipr_cmd->job_step = ipr_reset_enable_ioa;
6514
6515		if (GET_DUMP == ioa_cfg->sdt_state) {
6516			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6517			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6518			schedule_work(&ioa_cfg->work_q);
6519			return IPR_RC_JOB_RETURN;
6520		}
6521	}
6522
6523	ENTER;
6524	return IPR_RC_JOB_CONTINUE;
6525}
6526
6527/**
6528 * ipr_reset_bist_done - BIST has completed on the adapter.
6529 * @ipr_cmd:	ipr command struct
6530 *
6531 * Description: Unblock config space and resume the reset process.
6532 *
6533 * Return value:
6534 * 	IPR_RC_JOB_CONTINUE
6535 **/
6536static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6537{
6538	ENTER;
6539	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6540	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6541	LEAVE;
6542	return IPR_RC_JOB_CONTINUE;
6543}
6544
6545/**
6546 * ipr_reset_start_bist - Run BIST on the adapter.
6547 * @ipr_cmd:	ipr command struct
6548 *
6549 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6550 *
6551 * Return value:
6552 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6553 **/
6554static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6555{
6556	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6557	int rc;
6558
6559	ENTER;
6560	pci_block_user_cfg_access(ioa_cfg->pdev);
6561	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6562
6563	if (rc != PCIBIOS_SUCCESSFUL) {
6564		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6565		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6566		rc = IPR_RC_JOB_CONTINUE;
6567	} else {
6568		ipr_cmd->job_step = ipr_reset_bist_done;
6569		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6570		rc = IPR_RC_JOB_RETURN;
6571	}
6572
6573	LEAVE;
6574	return rc;
6575}
6576
6577/**
6578 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6579 * @ipr_cmd:	ipr command struct
6580 *
6581 * Description: This clears PCI reset to the adapter and delays two seconds.
6582 *
6583 * Return value:
6584 * 	IPR_RC_JOB_RETURN
6585 **/
6586static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6587{
6588	ENTER;
6589	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6590	ipr_cmd->job_step = ipr_reset_bist_done;
6591	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6592	LEAVE;
6593	return IPR_RC_JOB_RETURN;
6594}
6595
6596/**
6597 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6598 * @ipr_cmd:	ipr command struct
6599 *
6600 * Description: This asserts PCI reset to the adapter.
6601 *
6602 * Return value:
6603 * 	IPR_RC_JOB_RETURN
6604 **/
6605static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6606{
6607	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6608	struct pci_dev *pdev = ioa_cfg->pdev;
6609
6610	ENTER;
6611	pci_block_user_cfg_access(pdev);
6612	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6613	ipr_cmd->job_step = ipr_reset_slot_reset_done;
6614	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6615	LEAVE;
6616	return IPR_RC_JOB_RETURN;
6617}
6618
6619/**
6620 * ipr_reset_allowed - Query whether or not IOA can be reset
6621 * @ioa_cfg:	ioa config struct
6622 *
6623 * Return value:
6624 * 	0 if reset not allowed / non-zero if reset is allowed
6625 **/
6626static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6627{
6628	volatile u32 temp_reg;
6629
6630	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6631	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6632}
6633
6634/**
6635 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6636 * @ipr_cmd:	ipr command struct
6637 *
6638 * Description: This function waits for adapter permission to run BIST,
6639 * then runs BIST. If the adapter does not give permission after a
6640 * reasonable time, we will reset the adapter anyway. The impact of
6641 * resetting the adapter without warning the adapter is the risk of
6642 * losing the persistent error log on the adapter. If the adapter is
6643 * reset while it is writing to the flash on the adapter, the flash
6644 * segment will have bad ECC and be zeroed.
6645 *
6646 * Return value:
6647 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6648 **/
6649static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6650{
6651	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6652	int rc = IPR_RC_JOB_RETURN;
6653
6654	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6655		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6656		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6657	} else {
6658		ipr_cmd->job_step = ioa_cfg->reset;
6659		rc = IPR_RC_JOB_CONTINUE;
6660	}
6661
6662	return rc;
6663}
6664
6665/**
6666 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6667 * @ipr_cmd:	ipr command struct
6668 *
6669 * Description: This function alerts the adapter that it will be reset.
6670 * If memory space is not currently enabled, proceed directly
6671 * to running BIST on the adapter. The timer must always be started
6672 * so we guarantee we do not run BIST from ipr_isr.
6673 *
6674 * Return value:
6675 * 	IPR_RC_JOB_RETURN
6676 **/
6677static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6678{
6679	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6680	u16 cmd_reg;
6681	int rc;
6682
6683	ENTER;
6684	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6685
6686	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6687		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6688		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6689		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6690	} else {
6691		ipr_cmd->job_step = ioa_cfg->reset;
6692	}
6693
6694	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6695	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6696
6697	LEAVE;
6698	return IPR_RC_JOB_RETURN;
6699}
6700
6701/**
6702 * ipr_reset_ucode_download_done - Microcode download completion
6703 * @ipr_cmd:	ipr command struct
6704 *
6705 * Description: This function unmaps the microcode download buffer.
6706 *
6707 * Return value:
6708 * 	IPR_RC_JOB_CONTINUE
6709 **/
6710static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6711{
6712	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6713	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6714
6715	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6716		     sglist->num_sg, DMA_TO_DEVICE);
6717
6718	ipr_cmd->job_step = ipr_reset_alert;
6719	return IPR_RC_JOB_CONTINUE;
6720}
6721
6722/**
6723 * ipr_reset_ucode_download - Download microcode to the adapter
6724 * @ipr_cmd:	ipr command struct
6725 *
6726 * Description: This function checks to see if it there is microcode
6727 * to download to the adapter. If there is, a download is performed.
6728 *
6729 * Return value:
6730 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6731 **/
6732static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6733{
6734	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6735	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6736
6737	ENTER;
6738	ipr_cmd->job_step = ipr_reset_alert;
6739
6740	if (!sglist)
6741		return IPR_RC_JOB_CONTINUE;
6742
6743	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6744	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6745	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6746	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6747	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6748	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6749	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6750
6751	ipr_build_ucode_ioadl(ipr_cmd, sglist);
6752	ipr_cmd->job_step = ipr_reset_ucode_download_done;
6753
6754	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6755		   IPR_WRITE_BUFFER_TIMEOUT);
6756
6757	LEAVE;
6758	return IPR_RC_JOB_RETURN;
6759}
6760
6761/**
6762 * ipr_reset_shutdown_ioa - Shutdown the adapter
6763 * @ipr_cmd:	ipr command struct
6764 *
6765 * Description: This function issues an adapter shutdown of the
6766 * specified type to the specified adapter as part of the
6767 * adapter reset job.
6768 *
6769 * Return value:
6770 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6771 **/
6772static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6773{
6774	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6775	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6776	unsigned long timeout;
6777	int rc = IPR_RC_JOB_CONTINUE;
6778
6779	ENTER;
6780	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6781		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6782		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6783		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6784		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6785
6786		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
6787			timeout = IPR_SHUTDOWN_TIMEOUT;
6788		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6789			timeout = IPR_INTERNAL_TIMEOUT;
6790		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6791			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
6792		else
6793			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6794
6795		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6796
6797		rc = IPR_RC_JOB_RETURN;
6798		ipr_cmd->job_step = ipr_reset_ucode_download;
6799	} else
6800		ipr_cmd->job_step = ipr_reset_alert;
6801
6802	LEAVE;
6803	return rc;
6804}
6805
6806/**
6807 * ipr_reset_ioa_job - Adapter reset job
6808 * @ipr_cmd:	ipr command struct
6809 *
6810 * Description: This function is the job router for the adapter reset job.
6811 *
6812 * Return value:
6813 * 	none
6814 **/
6815static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6816{
6817	u32 rc, ioasc;
6818	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6819
6820	do {
6821		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6822
6823		if (ioa_cfg->reset_cmd != ipr_cmd) {
6824			/*
6825			 * We are doing nested adapter resets and this is
6826			 * not the current reset job.
6827			 */
6828			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6829			return;
6830		}
6831
6832		if (IPR_IOASC_SENSE_KEY(ioasc)) {
6833			rc = ipr_cmd->job_step_failed(ipr_cmd);
6834			if (rc == IPR_RC_JOB_RETURN)
6835				return;
6836		}
6837
6838		ipr_reinit_ipr_cmnd(ipr_cmd);
6839		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6840		rc = ipr_cmd->job_step(ipr_cmd);
6841	} while(rc == IPR_RC_JOB_CONTINUE);
6842}
6843
6844/**
6845 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6846 * @ioa_cfg:		ioa config struct
6847 * @job_step:		first job step of reset job
6848 * @shutdown_type:	shutdown type
6849 *
6850 * Description: This function will initiate the reset of the given adapter
6851 * starting at the selected job step.
6852 * If the caller needs to wait on the completion of the reset,
6853 * the caller must sleep on the reset_wait_q.
6854 *
6855 * Return value:
6856 * 	none
6857 **/
6858static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6859				    int (*job_step) (struct ipr_cmnd *),
6860				    enum ipr_shutdown_type shutdown_type)
6861{
6862	struct ipr_cmnd *ipr_cmd;
6863
6864	ioa_cfg->in_reset_reload = 1;
6865	ioa_cfg->allow_cmds = 0;
6866	scsi_block_requests(ioa_cfg->host);
6867
6868	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6869	ioa_cfg->reset_cmd = ipr_cmd;
6870	ipr_cmd->job_step = job_step;
6871	ipr_cmd->u.shutdown_type = shutdown_type;
6872
6873	ipr_reset_ioa_job(ipr_cmd);
6874}
6875
6876/**
6877 * ipr_initiate_ioa_reset - Initiate an adapter reset
6878 * @ioa_cfg:		ioa config struct
6879 * @shutdown_type:	shutdown type
6880 *
6881 * Description: This function will initiate the reset of the given adapter.
6882 * If the caller needs to wait on the completion of the reset,
6883 * the caller must sleep on the reset_wait_q.
6884 *
6885 * Return value:
6886 * 	none
6887 **/
6888static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6889				   enum ipr_shutdown_type shutdown_type)
6890{
6891	if (ioa_cfg->ioa_is_dead)
6892		return;
6893
6894	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6895		ioa_cfg->sdt_state = ABORT_DUMP;
6896
6897	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6898		dev_err(&ioa_cfg->pdev->dev,
6899			"IOA taken offline - error recovery failed\n");
6900
6901		ioa_cfg->reset_retries = 0;
6902		ioa_cfg->ioa_is_dead = 1;
6903
6904		if (ioa_cfg->in_ioa_bringdown) {
6905			ioa_cfg->reset_cmd = NULL;
6906			ioa_cfg->in_reset_reload = 0;
6907			ipr_fail_all_ops(ioa_cfg);
6908			wake_up_all(&ioa_cfg->reset_wait_q);
6909
6910			spin_unlock_irq(ioa_cfg->host->host_lock);
6911			scsi_unblock_requests(ioa_cfg->host);
6912			spin_lock_irq(ioa_cfg->host->host_lock);
6913			return;
6914		} else {
6915			ioa_cfg->in_ioa_bringdown = 1;
6916			shutdown_type = IPR_SHUTDOWN_NONE;
6917		}
6918	}
6919
6920	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6921				shutdown_type);
6922}
6923
6924/**
6925 * ipr_reset_freeze - Hold off all I/O activity
6926 * @ipr_cmd:	ipr command struct
6927 *
6928 * Description: If the PCI slot is frozen, hold off all I/O
6929 * activity; then, as soon as the slot is available again,
6930 * initiate an adapter reset.
6931 */
6932static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6933{
6934	/* Disallow new interrupts, avoid loop */
6935	ipr_cmd->ioa_cfg->allow_interrupts = 0;
6936	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6937	ipr_cmd->done = ipr_reset_ioa_job;
6938	return IPR_RC_JOB_RETURN;
6939}
6940
6941/**
6942 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6943 * @pdev:	PCI device struct
6944 *
6945 * Description: This routine is called to tell us that the PCI bus
6946 * is down. Can't do anything here, except put the device driver
6947 * into a holding pattern, waiting for the PCI bus to come back.
6948 */
6949static void ipr_pci_frozen(struct pci_dev *pdev)
6950{
6951	unsigned long flags = 0;
6952	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6953
6954	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6955	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6956	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6957}
6958
6959/**
6960 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6961 * @pdev:	PCI device struct
6962 *
6963 * Description: This routine is called by the pci error recovery
6964 * code after the PCI slot has been reset, just before we
6965 * should resume normal operations.
6966 */
6967static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6968{
6969	unsigned long flags = 0;
6970	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6971
6972	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6973	if (ioa_cfg->needs_warm_reset)
6974		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6975	else
6976		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6977					IPR_SHUTDOWN_NONE);
6978	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6979	return PCI_ERS_RESULT_RECOVERED;
6980}
6981
6982/**
6983 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6984 * @pdev:	PCI device struct
6985 *
6986 * Description: This routine is called when the PCI bus has
6987 * permanently failed.
6988 */
6989static void ipr_pci_perm_failure(struct pci_dev *pdev)
6990{
6991	unsigned long flags = 0;
6992	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6993
6994	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6995	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6996		ioa_cfg->sdt_state = ABORT_DUMP;
6997	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6998	ioa_cfg->in_ioa_bringdown = 1;
6999	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7000	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7001}
7002
7003/**
7004 * ipr_pci_error_detected - Called when a PCI error is detected.
7005 * @pdev:	PCI device struct
7006 * @state:	PCI channel state
7007 *
7008 * Description: Called when a PCI error is detected.
7009 *
7010 * Return value:
7011 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7012 */
7013static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7014					       pci_channel_state_t state)
7015{
7016	switch (state) {
7017	case pci_channel_io_frozen:
7018		ipr_pci_frozen(pdev);
7019		return PCI_ERS_RESULT_NEED_RESET;
7020	case pci_channel_io_perm_failure:
7021		ipr_pci_perm_failure(pdev);
7022		return PCI_ERS_RESULT_DISCONNECT;
7023		break;
7024	default:
7025		break;
7026	}
7027	return PCI_ERS_RESULT_NEED_RESET;
7028}
7029
7030/**
7031 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7032 * @ioa_cfg:	ioa cfg struct
7033 *
7034 * Description: This is the second phase of adapter intialization
7035 * This function takes care of initilizing the adapter to the point
7036 * where it can accept new commands.
7037
7038 * Return value:
7039 * 	0 on success / -EIO on failure
7040 **/
7041static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7042{
7043	int rc = 0;
7044	unsigned long host_lock_flags = 0;
7045
7046	ENTER;
7047	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7048	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7049	if (ioa_cfg->needs_hard_reset) {
7050		ioa_cfg->needs_hard_reset = 0;
7051		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7052	} else
7053		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7054					IPR_SHUTDOWN_NONE);
7055
7056	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7057	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7058	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7059
7060	if (ioa_cfg->ioa_is_dead) {
7061		rc = -EIO;
7062	} else if (ipr_invalid_adapter(ioa_cfg)) {
7063		if (!ipr_testmode)
7064			rc = -EIO;
7065
7066		dev_err(&ioa_cfg->pdev->dev,
7067			"Adapter not supported in this hardware configuration.\n");
7068	}
7069
7070	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7071
7072	LEAVE;
7073	return rc;
7074}
7075
7076/**
7077 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7078 * @ioa_cfg:	ioa config struct
7079 *
7080 * Return value:
7081 * 	none
7082 **/
7083static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7084{
7085	int i;
7086
7087	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7088		if (ioa_cfg->ipr_cmnd_list[i])
7089			pci_pool_free(ioa_cfg->ipr_cmd_pool,
7090				      ioa_cfg->ipr_cmnd_list[i],
7091				      ioa_cfg->ipr_cmnd_list_dma[i]);
7092
7093		ioa_cfg->ipr_cmnd_list[i] = NULL;
7094	}
7095
7096	if (ioa_cfg->ipr_cmd_pool)
7097		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7098
7099	ioa_cfg->ipr_cmd_pool = NULL;
7100}
7101
7102/**
7103 * ipr_free_mem - Frees memory allocated for an adapter
7104 * @ioa_cfg:	ioa cfg struct
7105 *
7106 * Return value:
7107 * 	nothing
7108 **/
7109static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7110{
7111	int i;
7112
7113	kfree(ioa_cfg->res_entries);
7114	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7115			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7116	ipr_free_cmd_blks(ioa_cfg);
7117	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7118			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7119	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7120			    ioa_cfg->cfg_table,
7121			    ioa_cfg->cfg_table_dma);
7122
7123	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7124		pci_free_consistent(ioa_cfg->pdev,
7125				    sizeof(struct ipr_hostrcb),
7126				    ioa_cfg->hostrcb[i],
7127				    ioa_cfg->hostrcb_dma[i]);
7128	}
7129
7130	ipr_free_dump(ioa_cfg);
7131	kfree(ioa_cfg->trace);
7132}
7133
7134/**
7135 * ipr_free_all_resources - Free all allocated resources for an adapter.
7136 * @ipr_cmd:	ipr command struct
7137 *
7138 * This function frees all allocated resources for the
7139 * specified adapter.
7140 *
7141 * Return value:
7142 * 	none
7143 **/
7144static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7145{
7146	struct pci_dev *pdev = ioa_cfg->pdev;
7147
7148	ENTER;
7149	free_irq(pdev->irq, ioa_cfg);
7150	iounmap(ioa_cfg->hdw_dma_regs);
7151	pci_release_regions(pdev);
7152	ipr_free_mem(ioa_cfg);
7153	scsi_host_put(ioa_cfg->host);
7154	pci_disable_device(pdev);
7155	LEAVE;
7156}
7157
7158/**
7159 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7160 * @ioa_cfg:	ioa config struct
7161 *
7162 * Return value:
7163 * 	0 on success / -ENOMEM on allocation failure
7164 **/
7165static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7166{
7167	struct ipr_cmnd *ipr_cmd;
7168	struct ipr_ioarcb *ioarcb;
7169	dma_addr_t dma_addr;
7170	int i;
7171
7172	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7173						 sizeof(struct ipr_cmnd), 8, 0);
7174
7175	if (!ioa_cfg->ipr_cmd_pool)
7176		return -ENOMEM;
7177
7178	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7179		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7180
7181		if (!ipr_cmd) {
7182			ipr_free_cmd_blks(ioa_cfg);
7183			return -ENOMEM;
7184		}
7185
7186		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7187		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7188		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7189
7190		ioarcb = &ipr_cmd->ioarcb;
7191		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7192		ioarcb->host_response_handle = cpu_to_be32(i << 2);
7193		ioarcb->write_ioadl_addr =
7194			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7195		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7196		ioarcb->ioasa_host_pci_addr =
7197			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7198		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7199		ipr_cmd->cmd_index = i;
7200		ipr_cmd->ioa_cfg = ioa_cfg;
7201		ipr_cmd->sense_buffer_dma = dma_addr +
7202			offsetof(struct ipr_cmnd, sense_buffer);
7203
7204		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7205	}
7206
7207	return 0;
7208}
7209
7210/**
7211 * ipr_alloc_mem - Allocate memory for an adapter
7212 * @ioa_cfg:	ioa config struct
7213 *
7214 * Return value:
7215 * 	0 on success / non-zero for error
7216 **/
7217static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7218{
7219	struct pci_dev *pdev = ioa_cfg->pdev;
7220	int i, rc = -ENOMEM;
7221
7222	ENTER;
7223	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7224				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7225
7226	if (!ioa_cfg->res_entries)
7227		goto out;
7228
7229	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7230		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7231
7232	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7233						sizeof(struct ipr_misc_cbs),
7234						&ioa_cfg->vpd_cbs_dma);
7235
7236	if (!ioa_cfg->vpd_cbs)
7237		goto out_free_res_entries;
7238
7239	if (ipr_alloc_cmd_blks(ioa_cfg))
7240		goto out_free_vpd_cbs;
7241
7242	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7243						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7244						 &ioa_cfg->host_rrq_dma);
7245
7246	if (!ioa_cfg->host_rrq)
7247		goto out_ipr_free_cmd_blocks;
7248
7249	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7250						  sizeof(struct ipr_config_table),
7251						  &ioa_cfg->cfg_table_dma);
7252
7253	if (!ioa_cfg->cfg_table)
7254		goto out_free_host_rrq;
7255
7256	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7257		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7258							   sizeof(struct ipr_hostrcb),
7259							   &ioa_cfg->hostrcb_dma[i]);
7260
7261		if (!ioa_cfg->hostrcb[i])
7262			goto out_free_hostrcb_dma;
7263
7264		ioa_cfg->hostrcb[i]->hostrcb_dma =
7265			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7266		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7267		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7268	}
7269
7270	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7271				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7272
7273	if (!ioa_cfg->trace)
7274		goto out_free_hostrcb_dma;
7275
7276	rc = 0;
7277out:
7278	LEAVE;
7279	return rc;
7280
7281out_free_hostrcb_dma:
7282	while (i-- > 0) {
7283		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7284				    ioa_cfg->hostrcb[i],
7285				    ioa_cfg->hostrcb_dma[i]);
7286	}
7287	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7288			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7289out_free_host_rrq:
7290	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7291			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7292out_ipr_free_cmd_blocks:
7293	ipr_free_cmd_blks(ioa_cfg);
7294out_free_vpd_cbs:
7295	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7296			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7297out_free_res_entries:
7298	kfree(ioa_cfg->res_entries);
7299	goto out;
7300}
7301
7302/**
7303 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7304 * @ioa_cfg:	ioa config struct
7305 *
7306 * Return value:
7307 * 	none
7308 **/
7309static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7310{
7311	int i;
7312
7313	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7314		ioa_cfg->bus_attr[i].bus = i;
7315		ioa_cfg->bus_attr[i].qas_enabled = 0;
7316		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7317		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7318			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7319		else
7320			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7321	}
7322}
7323
7324/**
7325 * ipr_init_ioa_cfg - Initialize IOA config struct
7326 * @ioa_cfg:	ioa config struct
7327 * @host:		scsi host struct
7328 * @pdev:		PCI dev struct
7329 *
7330 * Return value:
7331 * 	none
7332 **/
7333static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7334				       struct Scsi_Host *host, struct pci_dev *pdev)
7335{
7336	const struct ipr_interrupt_offsets *p;
7337	struct ipr_interrupts *t;
7338	void __iomem *base;
7339
7340	ioa_cfg->host = host;
7341	ioa_cfg->pdev = pdev;
7342	ioa_cfg->log_level = ipr_log_level;
7343	ioa_cfg->doorbell = IPR_DOORBELL;
7344	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7345	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7346	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7347	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7348	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7349	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7350	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7351	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7352
7353	INIT_LIST_HEAD(&ioa_cfg->free_q);
7354	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7355	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7356	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7357	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7358	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7359	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7360	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7361	ioa_cfg->sdt_state = INACTIVE;
7362	if (ipr_enable_cache)
7363		ioa_cfg->cache_state = CACHE_ENABLED;
7364	else
7365		ioa_cfg->cache_state = CACHE_DISABLED;
7366
7367	ipr_initialize_bus_attr(ioa_cfg);
7368
7369	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7370	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7371	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7372	host->unique_id = host->host_no;
7373	host->max_cmd_len = IPR_MAX_CDB_LEN;
7374	pci_set_drvdata(pdev, ioa_cfg);
7375
7376	p = &ioa_cfg->chip_cfg->regs;
7377	t = &ioa_cfg->regs;
7378	base = ioa_cfg->hdw_dma_regs;
7379
7380	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7381	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7382	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7383	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7384	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7385	t->ioarrin_reg = base + p->ioarrin_reg;
7386	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7387	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7388	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7389}
7390
7391/**
7392 * ipr_get_chip_cfg - Find adapter chip configuration
7393 * @dev_id:		PCI device id struct
7394 *
7395 * Return value:
7396 * 	ptr to chip config on success / NULL on failure
7397 **/
7398static const struct ipr_chip_cfg_t * __devinit
7399ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7400{
7401	int i;
7402
7403	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7404		if (ipr_chip[i].vendor == dev_id->vendor &&
7405		    ipr_chip[i].device == dev_id->device)
7406			return ipr_chip[i].cfg;
7407	return NULL;
7408}
7409
7410/**
7411 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7412 * @pdev:		PCI device struct
7413 * @dev_id:		PCI device id struct
7414 *
7415 * Return value:
7416 * 	0 on success / non-zero on failure
7417 **/
7418static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7419				   const struct pci_device_id *dev_id)
7420{
7421	struct ipr_ioa_cfg *ioa_cfg;
7422	struct Scsi_Host *host;
7423	unsigned long ipr_regs_pci;
7424	void __iomem *ipr_regs;
7425	int rc = PCIBIOS_SUCCESSFUL;
7426	volatile u32 mask, uproc, interrupts;
7427
7428	ENTER;
7429
7430	if ((rc = pci_enable_device(pdev))) {
7431		dev_err(&pdev->dev, "Cannot enable adapter\n");
7432		goto out;
7433	}
7434
7435	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7436
7437	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7438
7439	if (!host) {
7440		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7441		rc = -ENOMEM;
7442		goto out_disable;
7443	}
7444
7445	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7446	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7447	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7448		      sata_port_info.flags, &ipr_sata_ops);
7449
7450	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7451
7452	if (!ioa_cfg->chip_cfg) {
7453		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7454			dev_id->vendor, dev_id->device);
7455		goto out_scsi_host_put;
7456	}
7457
7458	if (ipr_transop_timeout)
7459		ioa_cfg->transop_timeout = ipr_transop_timeout;
7460	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7461		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7462	else
7463		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7464
7465	ioa_cfg->revid = pdev->revision;
7466
7467	ipr_regs_pci = pci_resource_start(pdev, 0);
7468
7469	rc = pci_request_regions(pdev, IPR_NAME);
7470	if (rc < 0) {
7471		dev_err(&pdev->dev,
7472			"Couldn't register memory range of registers\n");
7473		goto out_scsi_host_put;
7474	}
7475
7476	ipr_regs = pci_ioremap_bar(pdev, 0);
7477
7478	if (!ipr_regs) {
7479		dev_err(&pdev->dev,
7480			"Couldn't map memory range of registers\n");
7481		rc = -ENOMEM;
7482		goto out_release_regions;
7483	}
7484
7485	ioa_cfg->hdw_dma_regs = ipr_regs;
7486	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7487	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7488
7489	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7490
7491	pci_set_master(pdev);
7492
7493	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7494	if (rc < 0) {
7495		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7496		goto cleanup_nomem;
7497	}
7498
7499	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7500				   ioa_cfg->chip_cfg->cache_line_size);
7501
7502	if (rc != PCIBIOS_SUCCESSFUL) {
7503		dev_err(&pdev->dev, "Write of cache line size failed\n");
7504		rc = -EIO;
7505		goto cleanup_nomem;
7506	}
7507
7508	/* Save away PCI config space for use following IOA reset */
7509	rc = pci_save_state(pdev);
7510
7511	if (rc != PCIBIOS_SUCCESSFUL) {
7512		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7513		rc = -EIO;
7514		goto cleanup_nomem;
7515	}
7516
7517	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7518		goto cleanup_nomem;
7519
7520	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7521		goto cleanup_nomem;
7522
7523	rc = ipr_alloc_mem(ioa_cfg);
7524	if (rc < 0) {
7525		dev_err(&pdev->dev,
7526			"Couldn't allocate enough memory for device driver!\n");
7527		goto cleanup_nomem;
7528	}
7529
7530	/*
7531	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7532	 * the card is in an unknown state and needs a hard reset
7533	 */
7534	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7535	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
7536	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7537	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7538		ioa_cfg->needs_hard_reset = 1;
7539	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7540		ioa_cfg->needs_hard_reset = 1;
7541	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7542		ioa_cfg->ioa_unit_checked = 1;
7543
7544	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7545	rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7546
7547	if (rc) {
7548		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7549			pdev->irq, rc);
7550		goto cleanup_nolog;
7551	}
7552
7553	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7554	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7555		ioa_cfg->needs_warm_reset = 1;
7556		ioa_cfg->reset = ipr_reset_slot_reset;
7557	} else
7558		ioa_cfg->reset = ipr_reset_start_bist;
7559
7560	spin_lock(&ipr_driver_lock);
7561	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7562	spin_unlock(&ipr_driver_lock);
7563
7564	LEAVE;
7565out:
7566	return rc;
7567
7568cleanup_nolog:
7569	ipr_free_mem(ioa_cfg);
7570cleanup_nomem:
7571	iounmap(ipr_regs);
7572out_release_regions:
7573	pci_release_regions(pdev);
7574out_scsi_host_put:
7575	scsi_host_put(host);
7576out_disable:
7577	pci_disable_device(pdev);
7578	goto out;
7579}
7580
7581/**
7582 * ipr_scan_vsets - Scans for VSET devices
7583 * @ioa_cfg:	ioa config struct
7584 *
7585 * Description: Since the VSET resources do not follow SAM in that we can have
7586 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7587 *
7588 * Return value:
7589 * 	none
7590 **/
7591static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7592{
7593	int target, lun;
7594
7595	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7596		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7597			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7598}
7599
7600/**
7601 * ipr_initiate_ioa_bringdown - Bring down an adapter
7602 * @ioa_cfg:		ioa config struct
7603 * @shutdown_type:	shutdown type
7604 *
7605 * Description: This function will initiate bringing down the adapter.
7606 * This consists of issuing an IOA shutdown to the adapter
7607 * to flush the cache, and running BIST.
7608 * If the caller needs to wait on the completion of the reset,
7609 * the caller must sleep on the reset_wait_q.
7610 *
7611 * Return value:
7612 * 	none
7613 **/
7614static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7615				       enum ipr_shutdown_type shutdown_type)
7616{
7617	ENTER;
7618	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7619		ioa_cfg->sdt_state = ABORT_DUMP;
7620	ioa_cfg->reset_retries = 0;
7621	ioa_cfg->in_ioa_bringdown = 1;
7622	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7623	LEAVE;
7624}
7625
7626/**
7627 * __ipr_remove - Remove a single adapter
7628 * @pdev:	pci device struct
7629 *
7630 * Adapter hot plug remove entry point.
7631 *
7632 * Return value:
7633 * 	none
7634 **/
7635static void __ipr_remove(struct pci_dev *pdev)
7636{
7637	unsigned long host_lock_flags = 0;
7638	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7639	ENTER;
7640
7641	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7642	while(ioa_cfg->in_reset_reload) {
7643		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7644		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7645		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7646	}
7647
7648	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7649
7650	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7651	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7652	flush_scheduled_work();
7653	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7654
7655	spin_lock(&ipr_driver_lock);
7656	list_del(&ioa_cfg->queue);
7657	spin_unlock(&ipr_driver_lock);
7658
7659	if (ioa_cfg->sdt_state == ABORT_DUMP)
7660		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7661	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7662
7663	ipr_free_all_resources(ioa_cfg);
7664
7665	LEAVE;
7666}
7667
7668/**
7669 * ipr_remove - IOA hot plug remove entry point
7670 * @pdev:	pci device struct
7671 *
7672 * Adapter hot plug remove entry point.
7673 *
7674 * Return value:
7675 * 	none
7676 **/
7677static void ipr_remove(struct pci_dev *pdev)
7678{
7679	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7680
7681	ENTER;
7682
7683	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
7684			      &ipr_trace_attr);
7685	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
7686			     &ipr_dump_attr);
7687	scsi_remove_host(ioa_cfg->host);
7688
7689	__ipr_remove(pdev);
7690
7691	LEAVE;
7692}
7693
7694/**
7695 * ipr_probe - Adapter hot plug add entry point
7696 *
7697 * Return value:
7698 * 	0 on success / non-zero on failure
7699 **/
7700static int __devinit ipr_probe(struct pci_dev *pdev,
7701			       const struct pci_device_id *dev_id)
7702{
7703	struct ipr_ioa_cfg *ioa_cfg;
7704	int rc;
7705
7706	rc = ipr_probe_ioa(pdev, dev_id);
7707
7708	if (rc)
7709		return rc;
7710
7711	ioa_cfg = pci_get_drvdata(pdev);
7712	rc = ipr_probe_ioa_part2(ioa_cfg);
7713
7714	if (rc) {
7715		__ipr_remove(pdev);
7716		return rc;
7717	}
7718
7719	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7720
7721	if (rc) {
7722		__ipr_remove(pdev);
7723		return rc;
7724	}
7725
7726	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
7727				   &ipr_trace_attr);
7728
7729	if (rc) {
7730		scsi_remove_host(ioa_cfg->host);
7731		__ipr_remove(pdev);
7732		return rc;
7733	}
7734
7735	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
7736				   &ipr_dump_attr);
7737
7738	if (rc) {
7739		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
7740				      &ipr_trace_attr);
7741		scsi_remove_host(ioa_cfg->host);
7742		__ipr_remove(pdev);
7743		return rc;
7744	}
7745
7746	scsi_scan_host(ioa_cfg->host);
7747	ipr_scan_vsets(ioa_cfg);
7748	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7749	ioa_cfg->allow_ml_add_del = 1;
7750	ioa_cfg->host->max_channel = IPR_VSET_BUS;
7751	schedule_work(&ioa_cfg->work_q);
7752	return 0;
7753}
7754
7755/**
7756 * ipr_shutdown - Shutdown handler.
7757 * @pdev:	pci device struct
7758 *
7759 * This function is invoked upon system shutdown/reboot. It will issue
7760 * an adapter shutdown to the adapter to flush the write cache.
7761 *
7762 * Return value:
7763 * 	none
7764 **/
7765static void ipr_shutdown(struct pci_dev *pdev)
7766{
7767	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7768	unsigned long lock_flags = 0;
7769
7770	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7771	while(ioa_cfg->in_reset_reload) {
7772		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7773		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7774		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7775	}
7776
7777	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7778	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7779	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7780}
7781
7782static struct pci_device_id ipr_pci_table[] __devinitdata = {
7783	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7784		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7785	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7786		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7787	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7788		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7789	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7790		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7791	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7792		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7793	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7794		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7795	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7796		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7797	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7798		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7799		IPR_USE_LONG_TRANSOP_TIMEOUT },
7800	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7801	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7802	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7803	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7804	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7805	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7806	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7807	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7808	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7809	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7810	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7811	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7812	      IPR_USE_LONG_TRANSOP_TIMEOUT},
7813	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7814	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7815	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7816	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7817	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7818	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7819	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7820	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7821	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7822	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7823	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7824	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7825	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7826	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
7827	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7828		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7829	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7830		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7831	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7832		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7833		IPR_USE_LONG_TRANSOP_TIMEOUT },
7834	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7835		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7836		IPR_USE_LONG_TRANSOP_TIMEOUT },
7837	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7838		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7839		IPR_USE_LONG_TRANSOP_TIMEOUT },
7840	{ }
7841};
7842MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7843
7844static struct pci_error_handlers ipr_err_handler = {
7845	.error_detected = ipr_pci_error_detected,
7846	.slot_reset = ipr_pci_slot_reset,
7847};
7848
7849static struct pci_driver ipr_driver = {
7850	.name = IPR_NAME,
7851	.id_table = ipr_pci_table,
7852	.probe = ipr_probe,
7853	.remove = ipr_remove,
7854	.shutdown = ipr_shutdown,
7855	.err_handler = &ipr_err_handler,
7856};
7857
7858/**
7859 * ipr_init - Module entry point
7860 *
7861 * Return value:
7862 * 	0 on success / negative value on failure
7863 **/
7864static int __init ipr_init(void)
7865{
7866	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7867		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7868
7869	return pci_register_driver(&ipr_driver);
7870}
7871
7872/**
7873 * ipr_exit - Module unload
7874 *
7875 * Module unload entry point.
7876 *
7877 * Return value:
7878 * 	none
7879 **/
7880static void __exit ipr_exit(void)
7881{
7882	pci_unregister_driver(&ipr_driver);
7883}
7884
7885module_init(ipr_init);
7886module_exit(ipr_exit);
7887