ipr.c revision 22d2e402d1f671ca66919a36e04a650b735f4f0d
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/libata.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include "ipr.h"
83
84/*
85 *   Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = 0;
93static unsigned int ipr_enable_cache = 1;
94static unsigned int ipr_debug = 0;
95static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
100		.mailbox = 0x0042C,
101		.cache_line_size = 0x20,
102		{
103			.set_interrupt_mask_reg = 0x0022C,
104			.clr_interrupt_mask_reg = 0x00230,
105			.sense_interrupt_mask_reg = 0x0022C,
106			.clr_interrupt_reg = 0x00228,
107			.sense_interrupt_reg = 0x00224,
108			.ioarrin_reg = 0x00404,
109			.sense_uproc_interrupt_reg = 0x00214,
110			.set_uproc_interrupt_reg = 0x00214,
111			.clr_uproc_interrupt_reg = 0x00218
112		}
113	},
114	{ /* Snipe and Scamp */
115		.mailbox = 0x0052C,
116		.cache_line_size = 0x20,
117		{
118			.set_interrupt_mask_reg = 0x00288,
119			.clr_interrupt_mask_reg = 0x0028C,
120			.sense_interrupt_mask_reg = 0x00288,
121			.clr_interrupt_reg = 0x00284,
122			.sense_interrupt_reg = 0x00280,
123			.ioarrin_reg = 0x00504,
124			.sense_uproc_interrupt_reg = 0x00290,
125			.set_uproc_interrupt_reg = 0x00290,
126			.clr_uproc_interrupt_reg = 0x00294
127		}
128	},
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
137	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
139};
140
141static int ipr_max_bus_speeds [] = {
142	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
143};
144
145MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147module_param_named(max_speed, ipr_max_speed, uint, 0);
148MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149module_param_named(log_level, ipr_log_level, uint, 0);
150MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151module_param_named(testmode, ipr_testmode, int, 0);
152MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153module_param_named(fastfail, ipr_fastfail, int, 0);
154MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
157module_param_named(enable_cache, ipr_enable_cache, int, 0);
158MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
159module_param_named(debug, ipr_debug, int, 0);
160MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
161MODULE_LICENSE("GPL");
162MODULE_VERSION(IPR_DRIVER_VERSION);
163
164/*  A constant array of IOASCs/URCs/Error Messages */
165static const
166struct ipr_error_table_t ipr_error_table[] = {
167	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
168	"8155: An unknown error was received"},
169	{0x00330000, 0, 0,
170	"Soft underlength error"},
171	{0x005A0000, 0, 0,
172	"Command to be cancelled not found"},
173	{0x00808000, 0, 0,
174	"Qualified success"},
175	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
176	"FFFE: Soft device bus error recovered by the IOA"},
177	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
178	"4101: Soft device bus fabric error"},
179	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
180	"FFF9: Device sector reassign successful"},
181	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
182	"FFF7: Media error recovered by device rewrite procedures"},
183	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
184	"7001: IOA sector reassignment successful"},
185	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
186	"FFF9: Soft media error. Sector reassignment recommended"},
187	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
188	"FFF7: Media error recovered by IOA rewrite procedures"},
189	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
190	"FF3D: Soft PCI bus error recovered by the IOA"},
191	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
192	"FFF6: Device hardware error recovered by the IOA"},
193	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
194	"FFF6: Device hardware error recovered by the device"},
195	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
196	"FF3D: Soft IOA error recovered by the IOA"},
197	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
198	"FFFA: Undefined device response recovered by the IOA"},
199	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
200	"FFF6: Device bus error, message or command phase"},
201	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
202	"FFFE: Task Management Function failed"},
203	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
204	"FFF6: Failure prediction threshold exceeded"},
205	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
206	"8009: Impending cache battery pack failure"},
207	{0x02040400, 0, 0,
208	"34FF: Disk device format in progress"},
209	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
210	"9070: IOA requested reset"},
211	{0x023F0000, 0, 0,
212	"Synchronization required"},
213	{0x024E0000, 0, 0,
214	"No ready, IOA shutdown"},
215	{0x025A0000, 0, 0,
216	"Not ready, IOA has been shutdown"},
217	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
218	"3020: Storage subsystem configuration error"},
219	{0x03110B00, 0, 0,
220	"FFF5: Medium error, data unreadable, recommend reassign"},
221	{0x03110C00, 0, 0,
222	"7000: Medium error, data unreadable, do not reassign"},
223	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
224	"FFF3: Disk media format bad"},
225	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
226	"3002: Addressed device failed to respond to selection"},
227	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
228	"3100: Device bus error"},
229	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
230	"3109: IOA timed out a device command"},
231	{0x04088000, 0, 0,
232	"3120: SCSI bus is not operational"},
233	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
234	"4100: Hard device bus fabric error"},
235	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
236	"9000: IOA reserved area data check"},
237	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
238	"9001: IOA reserved area invalid data pattern"},
239	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
240	"9002: IOA reserved area LRC error"},
241	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
242	"102E: Out of alternate sectors for disk storage"},
243	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
244	"FFF4: Data transfer underlength error"},
245	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
246	"FFF4: Data transfer overlength error"},
247	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
248	"3400: Logical unit failure"},
249	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
250	"FFF4: Device microcode is corrupt"},
251	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
252	"8150: PCI bus error"},
253	{0x04430000, 1, 0,
254	"Unsupported device bus message received"},
255	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
256	"FFF4: Disk device problem"},
257	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
258	"8150: Permanent IOA failure"},
259	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
260	"3010: Disk device returned wrong response to IOA"},
261	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
262	"8151: IOA microcode error"},
263	{0x04448500, 0, 0,
264	"Device bus status error"},
265	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
266	"8157: IOA error requiring IOA reset to recover"},
267	{0x04448700, 0, 0,
268	"ATA device status error"},
269	{0x04490000, 0, 0,
270	"Message reject received from the device"},
271	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
272	"8008: A permanent cache battery pack failure occurred"},
273	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
274	"9090: Disk unit has been modified after the last known status"},
275	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
276	"9081: IOA detected device error"},
277	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
278	"9082: IOA detected device error"},
279	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280	"3110: Device bus error, message or command phase"},
281	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
282	"3110: SAS Command / Task Management Function failed"},
283	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
284	"9091: Incorrect hardware configuration change has been detected"},
285	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
286	"9073: Invalid multi-adapter configuration"},
287	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
288	"4010: Incorrect connection between cascaded expanders"},
289	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
290	"4020: Connections exceed IOA design limits"},
291	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
292	"4030: Incorrect multipath connection"},
293	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
294	"4110: Unsupported enclosure function"},
295	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
296	"FFF4: Command to logical unit failed"},
297	{0x05240000, 1, 0,
298	"Illegal request, invalid request type or request packet"},
299	{0x05250000, 0, 0,
300	"Illegal request, invalid resource handle"},
301	{0x05258000, 0, 0,
302	"Illegal request, commands not allowed to this device"},
303	{0x05258100, 0, 0,
304	"Illegal request, command not allowed to a secondary adapter"},
305	{0x05260000, 0, 0,
306	"Illegal request, invalid field in parameter list"},
307	{0x05260100, 0, 0,
308	"Illegal request, parameter not supported"},
309	{0x05260200, 0, 0,
310	"Illegal request, parameter value invalid"},
311	{0x052C0000, 0, 0,
312	"Illegal request, command sequence error"},
313	{0x052C8000, 1, 0,
314	"Illegal request, dual adapter support not enabled"},
315	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
316	"9031: Array protection temporarily suspended, protection resuming"},
317	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
318	"9040: Array protection temporarily suspended, protection resuming"},
319	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
320	"3140: Device bus not ready to ready transition"},
321	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
322	"FFFB: SCSI bus was reset"},
323	{0x06290500, 0, 0,
324	"FFFE: SCSI bus transition to single ended"},
325	{0x06290600, 0, 0,
326	"FFFE: SCSI bus transition to LVD"},
327	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
328	"FFFB: SCSI bus was reset by another initiator"},
329	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
330	"3029: A device replacement has occurred"},
331	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
332	"9051: IOA cache data exists for a missing or failed device"},
333	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
334	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
335	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
336	"9025: Disk unit is not supported at its physical location"},
337	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
338	"3020: IOA detected a SCSI bus configuration error"},
339	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
340	"3150: SCSI bus configuration error"},
341	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
342	"9074: Asymmetric advanced function disk configuration"},
343	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
344	"4040: Incomplete multipath connection between IOA and enclosure"},
345	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
346	"4041: Incomplete multipath connection between enclosure and device"},
347	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
348	"9075: Incomplete multipath connection between IOA and remote IOA"},
349	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
350	"9076: Configuration error, missing remote IOA"},
351	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
352	"4050: Enclosure does not support a required multipath function"},
353	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
354	"9041: Array protection temporarily suspended"},
355	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
356	"9042: Corrupt array parity detected on specified device"},
357	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
358	"9030: Array no longer protected due to missing or failed disk unit"},
359	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
360	"9071: Link operational transition"},
361	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
362	"9072: Link not operational transition"},
363	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
364	"9032: Array exposed but still protected"},
365	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
366	"70DD: Device forced failed by disrupt device command"},
367	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
368	"4061: Multipath redundancy level got better"},
369	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
370	"4060: Multipath redundancy level got worse"},
371	{0x07270000, 0, 0,
372	"Failure due to other device"},
373	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
374	"9008: IOA does not support functions expected by devices"},
375	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
376	"9010: Cache data associated with attached devices cannot be found"},
377	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
378	"9011: Cache data belongs to devices other than those attached"},
379	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
380	"9020: Array missing 2 or more devices with only 1 device present"},
381	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
382	"9021: Array missing 2 or more devices with 2 or more devices present"},
383	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
384	"9022: Exposed array is missing a required device"},
385	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
386	"9023: Array member(s) not at required physical locations"},
387	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
388	"9024: Array not functional due to present hardware configuration"},
389	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
390	"9026: Array not functional due to present hardware configuration"},
391	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
392	"9027: Array is missing a device and parity is out of sync"},
393	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
394	"9028: Maximum number of arrays already exist"},
395	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
396	"9050: Required cache data cannot be located for a disk unit"},
397	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
398	"9052: Cache data exists for a device that has been modified"},
399	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
400	"9054: IOA resources not available due to previous problems"},
401	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
402	"9092: Disk unit requires initialization before use"},
403	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
404	"9029: Incorrect hardware configuration change has been detected"},
405	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
406	"9060: One or more disk pairs are missing from an array"},
407	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
408	"9061: One or more disks are missing from an array"},
409	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
410	"9062: One or more disks are missing from an array"},
411	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
412	"9063: Maximum number of functional arrays has been exceeded"},
413	{0x0B260000, 0, 0,
414	"Aborted command, invalid descriptor"},
415	{0x0B5A0000, 0, 0,
416	"Command terminated by host"}
417};
418
419static const struct ipr_ses_table_entry ipr_ses_table[] = {
420	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
421	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
422	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
423	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
424	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
425	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
426	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
427	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
428	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
429	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
430	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
431	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
432	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
433};
434
435/*
436 *  Function Prototypes
437 */
438static int ipr_reset_alert(struct ipr_cmnd *);
439static void ipr_process_ccn(struct ipr_cmnd *);
440static void ipr_process_error(struct ipr_cmnd *);
441static void ipr_reset_ioa_job(struct ipr_cmnd *);
442static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
443				   enum ipr_shutdown_type);
444
445#ifdef CONFIG_SCSI_IPR_TRACE
446/**
447 * ipr_trc_hook - Add a trace entry to the driver trace
448 * @ipr_cmd:	ipr command struct
449 * @type:		trace type
450 * @add_data:	additional data
451 *
452 * Return value:
453 * 	none
454 **/
455static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
456			 u8 type, u32 add_data)
457{
458	struct ipr_trace_entry *trace_entry;
459	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
460
461	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
462	trace_entry->time = jiffies;
463	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
464	trace_entry->type = type;
465	trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
466	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
467	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
468	trace_entry->u.add_data = add_data;
469}
470#else
471#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
472#endif
473
474/**
475 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
476 * @ipr_cmd:	ipr command struct
477 *
478 * Return value:
479 * 	none
480 **/
481static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
482{
483	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
484	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
485	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
486
487	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
488	ioarcb->write_data_transfer_length = 0;
489	ioarcb->read_data_transfer_length = 0;
490	ioarcb->write_ioadl_len = 0;
491	ioarcb->read_ioadl_len = 0;
492	ioarcb->write_ioadl_addr =
493		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
494	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
495	ioasa->ioasc = 0;
496	ioasa->residual_data_len = 0;
497	ioasa->u.gata.status = 0;
498
499	ipr_cmd->scsi_cmd = NULL;
500	ipr_cmd->qc = NULL;
501	ipr_cmd->sense_buffer[0] = 0;
502	ipr_cmd->dma_use_sg = 0;
503}
504
505/**
506 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
507 * @ipr_cmd:	ipr command struct
508 *
509 * Return value:
510 * 	none
511 **/
512static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
513{
514	ipr_reinit_ipr_cmnd(ipr_cmd);
515	ipr_cmd->u.scratch = 0;
516	ipr_cmd->sibling = NULL;
517	init_timer(&ipr_cmd->timer);
518}
519
520/**
521 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
522 * @ioa_cfg:	ioa config struct
523 *
524 * Return value:
525 * 	pointer to ipr command struct
526 **/
527static
528struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
529{
530	struct ipr_cmnd *ipr_cmd;
531
532	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
533	list_del(&ipr_cmd->queue);
534	ipr_init_ipr_cmnd(ipr_cmd);
535
536	return ipr_cmd;
537}
538
539/**
540 * ipr_unmap_sglist - Unmap scatterlist if mapped
541 * @ioa_cfg:	ioa config struct
542 * @ipr_cmd:	ipr command struct
543 *
544 * Return value:
545 * 	nothing
546 **/
547static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
548			     struct ipr_cmnd *ipr_cmd)
549{
550	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
551
552	if (ipr_cmd->dma_use_sg) {
553		if (scsi_cmd->use_sg > 0) {
554			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
555				     scsi_cmd->use_sg,
556				     scsi_cmd->sc_data_direction);
557		} else {
558			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
559					 scsi_cmd->request_bufflen,
560					 scsi_cmd->sc_data_direction);
561		}
562	}
563}
564
565/**
566 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
567 * @ioa_cfg:	ioa config struct
568 * @clr_ints:     interrupts to clear
569 *
570 * This function masks all interrupts on the adapter, then clears the
571 * interrupts specified in the mask
572 *
573 * Return value:
574 * 	none
575 **/
576static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
577					  u32 clr_ints)
578{
579	volatile u32 int_reg;
580
581	/* Stop new interrupts */
582	ioa_cfg->allow_interrupts = 0;
583
584	/* Set interrupt mask to stop all new interrupts */
585	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
586
587	/* Clear any pending interrupts */
588	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
589	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
590}
591
592/**
593 * ipr_save_pcix_cmd_reg - Save PCI-X command register
594 * @ioa_cfg:	ioa config struct
595 *
596 * Return value:
597 * 	0 on success / -EIO on failure
598 **/
599static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
600{
601	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
602
603	if (pcix_cmd_reg == 0)
604		return 0;
605
606	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
607				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
608		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
609		return -EIO;
610	}
611
612	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
613	return 0;
614}
615
616/**
617 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
618 * @ioa_cfg:	ioa config struct
619 *
620 * Return value:
621 * 	0 on success / -EIO on failure
622 **/
623static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
624{
625	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
626
627	if (pcix_cmd_reg) {
628		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
629					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
630			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
631			return -EIO;
632		}
633	}
634
635	return 0;
636}
637
638/**
639 * ipr_sata_eh_done - done function for aborted SATA commands
640 * @ipr_cmd:	ipr command struct
641 *
642 * This function is invoked for ops generated to SATA
643 * devices which are being aborted.
644 *
645 * Return value:
646 * 	none
647 **/
648static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
649{
650	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
651	struct ata_queued_cmd *qc = ipr_cmd->qc;
652	struct ipr_sata_port *sata_port = qc->ap->private_data;
653
654	qc->err_mask |= AC_ERR_OTHER;
655	sata_port->ioasa.status |= ATA_BUSY;
656	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
657	ata_qc_complete(qc);
658}
659
660/**
661 * ipr_scsi_eh_done - mid-layer done function for aborted ops
662 * @ipr_cmd:	ipr command struct
663 *
664 * This function is invoked by the interrupt handler for
665 * ops generated by the SCSI mid-layer which are being aborted.
666 *
667 * Return value:
668 * 	none
669 **/
670static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
671{
672	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
673	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
674
675	scsi_cmd->result |= (DID_ERROR << 16);
676
677	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
678	scsi_cmd->scsi_done(scsi_cmd);
679	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
680}
681
682/**
683 * ipr_fail_all_ops - Fails all outstanding ops.
684 * @ioa_cfg:	ioa config struct
685 *
686 * This function fails all outstanding ops.
687 *
688 * Return value:
689 * 	none
690 **/
691static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
692{
693	struct ipr_cmnd *ipr_cmd, *temp;
694
695	ENTER;
696	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
697		list_del(&ipr_cmd->queue);
698
699		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
700		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
701
702		if (ipr_cmd->scsi_cmd)
703			ipr_cmd->done = ipr_scsi_eh_done;
704		else if (ipr_cmd->qc)
705			ipr_cmd->done = ipr_sata_eh_done;
706
707		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
708		del_timer(&ipr_cmd->timer);
709		ipr_cmd->done(ipr_cmd);
710	}
711
712	LEAVE;
713}
714
715/**
716 * ipr_do_req -  Send driver initiated requests.
717 * @ipr_cmd:		ipr command struct
718 * @done:			done function
719 * @timeout_func:	timeout function
720 * @timeout:		timeout value
721 *
722 * This function sends the specified command to the adapter with the
723 * timeout given. The done function is invoked on command completion.
724 *
725 * Return value:
726 * 	none
727 **/
728static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
729		       void (*done) (struct ipr_cmnd *),
730		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
731{
732	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
733
734	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
735
736	ipr_cmd->done = done;
737
738	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
739	ipr_cmd->timer.expires = jiffies + timeout;
740	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
741
742	add_timer(&ipr_cmd->timer);
743
744	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
745
746	mb();
747	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
748	       ioa_cfg->regs.ioarrin_reg);
749}
750
751/**
752 * ipr_internal_cmd_done - Op done function for an internally generated op.
753 * @ipr_cmd:	ipr command struct
754 *
755 * This function is the op done function for an internally generated,
756 * blocking op. It simply wakes the sleeping thread.
757 *
758 * Return value:
759 * 	none
760 **/
761static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
762{
763	if (ipr_cmd->sibling)
764		ipr_cmd->sibling = NULL;
765	else
766		complete(&ipr_cmd->completion);
767}
768
769/**
770 * ipr_send_blocking_cmd - Send command and sleep on its completion.
771 * @ipr_cmd:	ipr command struct
772 * @timeout_func:	function to invoke if command times out
773 * @timeout:	timeout
774 *
775 * Return value:
776 * 	none
777 **/
778static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
779				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
780				  u32 timeout)
781{
782	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
783
784	init_completion(&ipr_cmd->completion);
785	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
786
787	spin_unlock_irq(ioa_cfg->host->host_lock);
788	wait_for_completion(&ipr_cmd->completion);
789	spin_lock_irq(ioa_cfg->host->host_lock);
790}
791
792/**
793 * ipr_send_hcam - Send an HCAM to the adapter.
794 * @ioa_cfg:	ioa config struct
795 * @type:		HCAM type
796 * @hostrcb:	hostrcb struct
797 *
798 * This function will send a Host Controlled Async command to the adapter.
799 * If HCAMs are currently not allowed to be issued to the adapter, it will
800 * place the hostrcb on the free queue.
801 *
802 * Return value:
803 * 	none
804 **/
805static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
806			  struct ipr_hostrcb *hostrcb)
807{
808	struct ipr_cmnd *ipr_cmd;
809	struct ipr_ioarcb *ioarcb;
810
811	if (ioa_cfg->allow_cmds) {
812		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
813		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
814		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
815
816		ipr_cmd->u.hostrcb = hostrcb;
817		ioarcb = &ipr_cmd->ioarcb;
818
819		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
820		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
821		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
822		ioarcb->cmd_pkt.cdb[1] = type;
823		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
824		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
825
826		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
827		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
828		ipr_cmd->ioadl[0].flags_and_data_len =
829			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
830		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
831
832		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
833			ipr_cmd->done = ipr_process_ccn;
834		else
835			ipr_cmd->done = ipr_process_error;
836
837		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
838
839		mb();
840		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
841		       ioa_cfg->regs.ioarrin_reg);
842	} else {
843		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
844	}
845}
846
847/**
848 * ipr_init_res_entry - Initialize a resource entry struct.
849 * @res:	resource entry struct
850 *
851 * Return value:
852 * 	none
853 **/
854static void ipr_init_res_entry(struct ipr_resource_entry *res)
855{
856	res->needs_sync_complete = 0;
857	res->in_erp = 0;
858	res->add_to_ml = 0;
859	res->del_from_ml = 0;
860	res->resetting_device = 0;
861	res->sdev = NULL;
862	res->sata_port = NULL;
863}
864
865/**
866 * ipr_handle_config_change - Handle a config change from the adapter
867 * @ioa_cfg:	ioa config struct
868 * @hostrcb:	hostrcb
869 *
870 * Return value:
871 * 	none
872 **/
873static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
874			      struct ipr_hostrcb *hostrcb)
875{
876	struct ipr_resource_entry *res = NULL;
877	struct ipr_config_table_entry *cfgte;
878	u32 is_ndn = 1;
879
880	cfgte = &hostrcb->hcam.u.ccn.cfgte;
881
882	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
883		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
884			    sizeof(cfgte->res_addr))) {
885			is_ndn = 0;
886			break;
887		}
888	}
889
890	if (is_ndn) {
891		if (list_empty(&ioa_cfg->free_res_q)) {
892			ipr_send_hcam(ioa_cfg,
893				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
894				      hostrcb);
895			return;
896		}
897
898		res = list_entry(ioa_cfg->free_res_q.next,
899				 struct ipr_resource_entry, queue);
900
901		list_del(&res->queue);
902		ipr_init_res_entry(res);
903		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
904	}
905
906	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
907
908	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
909		if (res->sdev) {
910			res->del_from_ml = 1;
911			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
912			if (ioa_cfg->allow_ml_add_del)
913				schedule_work(&ioa_cfg->work_q);
914		} else
915			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
916	} else if (!res->sdev) {
917		res->add_to_ml = 1;
918		if (ioa_cfg->allow_ml_add_del)
919			schedule_work(&ioa_cfg->work_q);
920	}
921
922	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
923}
924
925/**
926 * ipr_process_ccn - Op done function for a CCN.
927 * @ipr_cmd:	ipr command struct
928 *
929 * This function is the op done function for a configuration
930 * change notification host controlled async from the adapter.
931 *
932 * Return value:
933 * 	none
934 **/
935static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
936{
937	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
938	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
939	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
940
941	list_del(&hostrcb->queue);
942	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
943
944	if (ioasc) {
945		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
946			dev_err(&ioa_cfg->pdev->dev,
947				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
948
949		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
950	} else {
951		ipr_handle_config_change(ioa_cfg, hostrcb);
952	}
953}
954
955/**
956 * ipr_log_vpd - Log the passed VPD to the error log.
957 * @vpd:		vendor/product id/sn struct
958 *
959 * Return value:
960 * 	none
961 **/
962static void ipr_log_vpd(struct ipr_vpd *vpd)
963{
964	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
965		    + IPR_SERIAL_NUM_LEN];
966
967	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
968	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
969	       IPR_PROD_ID_LEN);
970	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
971	ipr_err("Vendor/Product ID: %s\n", buffer);
972
973	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
974	buffer[IPR_SERIAL_NUM_LEN] = '\0';
975	ipr_err("    Serial Number: %s\n", buffer);
976}
977
978/**
979 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
980 * @vpd:		vendor/product id/sn/wwn struct
981 *
982 * Return value:
983 * 	none
984 **/
985static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
986{
987	ipr_log_vpd(&vpd->vpd);
988	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
989		be32_to_cpu(vpd->wwid[1]));
990}
991
992/**
993 * ipr_log_enhanced_cache_error - Log a cache error.
994 * @ioa_cfg:	ioa config struct
995 * @hostrcb:	hostrcb struct
996 *
997 * Return value:
998 * 	none
999 **/
1000static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1001					 struct ipr_hostrcb *hostrcb)
1002{
1003	struct ipr_hostrcb_type_12_error *error =
1004		&hostrcb->hcam.u.error.u.type_12_error;
1005
1006	ipr_err("-----Current Configuration-----\n");
1007	ipr_err("Cache Directory Card Information:\n");
1008	ipr_log_ext_vpd(&error->ioa_vpd);
1009	ipr_err("Adapter Card Information:\n");
1010	ipr_log_ext_vpd(&error->cfc_vpd);
1011
1012	ipr_err("-----Expected Configuration-----\n");
1013	ipr_err("Cache Directory Card Information:\n");
1014	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1015	ipr_err("Adapter Card Information:\n");
1016	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1017
1018	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1019		     be32_to_cpu(error->ioa_data[0]),
1020		     be32_to_cpu(error->ioa_data[1]),
1021		     be32_to_cpu(error->ioa_data[2]));
1022}
1023
1024/**
1025 * ipr_log_cache_error - Log a cache error.
1026 * @ioa_cfg:	ioa config struct
1027 * @hostrcb:	hostrcb struct
1028 *
1029 * Return value:
1030 * 	none
1031 **/
1032static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1033				struct ipr_hostrcb *hostrcb)
1034{
1035	struct ipr_hostrcb_type_02_error *error =
1036		&hostrcb->hcam.u.error.u.type_02_error;
1037
1038	ipr_err("-----Current Configuration-----\n");
1039	ipr_err("Cache Directory Card Information:\n");
1040	ipr_log_vpd(&error->ioa_vpd);
1041	ipr_err("Adapter Card Information:\n");
1042	ipr_log_vpd(&error->cfc_vpd);
1043
1044	ipr_err("-----Expected Configuration-----\n");
1045	ipr_err("Cache Directory Card Information:\n");
1046	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1047	ipr_err("Adapter Card Information:\n");
1048	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1049
1050	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1051		     be32_to_cpu(error->ioa_data[0]),
1052		     be32_to_cpu(error->ioa_data[1]),
1053		     be32_to_cpu(error->ioa_data[2]));
1054}
1055
1056/**
1057 * ipr_log_enhanced_config_error - Log a configuration error.
1058 * @ioa_cfg:	ioa config struct
1059 * @hostrcb:	hostrcb struct
1060 *
1061 * Return value:
1062 * 	none
1063 **/
1064static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1065					  struct ipr_hostrcb *hostrcb)
1066{
1067	int errors_logged, i;
1068	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1069	struct ipr_hostrcb_type_13_error *error;
1070
1071	error = &hostrcb->hcam.u.error.u.type_13_error;
1072	errors_logged = be32_to_cpu(error->errors_logged);
1073
1074	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1075		be32_to_cpu(error->errors_detected), errors_logged);
1076
1077	dev_entry = error->dev;
1078
1079	for (i = 0; i < errors_logged; i++, dev_entry++) {
1080		ipr_err_separator;
1081
1082		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1083		ipr_log_ext_vpd(&dev_entry->vpd);
1084
1085		ipr_err("-----New Device Information-----\n");
1086		ipr_log_ext_vpd(&dev_entry->new_vpd);
1087
1088		ipr_err("Cache Directory Card Information:\n");
1089		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1090
1091		ipr_err("Adapter Card Information:\n");
1092		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1093	}
1094}
1095
1096/**
1097 * ipr_log_config_error - Log a configuration error.
1098 * @ioa_cfg:	ioa config struct
1099 * @hostrcb:	hostrcb struct
1100 *
1101 * Return value:
1102 * 	none
1103 **/
1104static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1105				 struct ipr_hostrcb *hostrcb)
1106{
1107	int errors_logged, i;
1108	struct ipr_hostrcb_device_data_entry *dev_entry;
1109	struct ipr_hostrcb_type_03_error *error;
1110
1111	error = &hostrcb->hcam.u.error.u.type_03_error;
1112	errors_logged = be32_to_cpu(error->errors_logged);
1113
1114	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1115		be32_to_cpu(error->errors_detected), errors_logged);
1116
1117	dev_entry = error->dev;
1118
1119	for (i = 0; i < errors_logged; i++, dev_entry++) {
1120		ipr_err_separator;
1121
1122		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1123		ipr_log_vpd(&dev_entry->vpd);
1124
1125		ipr_err("-----New Device Information-----\n");
1126		ipr_log_vpd(&dev_entry->new_vpd);
1127
1128		ipr_err("Cache Directory Card Information:\n");
1129		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1130
1131		ipr_err("Adapter Card Information:\n");
1132		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1133
1134		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1135			be32_to_cpu(dev_entry->ioa_data[0]),
1136			be32_to_cpu(dev_entry->ioa_data[1]),
1137			be32_to_cpu(dev_entry->ioa_data[2]),
1138			be32_to_cpu(dev_entry->ioa_data[3]),
1139			be32_to_cpu(dev_entry->ioa_data[4]));
1140	}
1141}
1142
1143/**
1144 * ipr_log_enhanced_array_error - Log an array configuration error.
1145 * @ioa_cfg:	ioa config struct
1146 * @hostrcb:	hostrcb struct
1147 *
1148 * Return value:
1149 * 	none
1150 **/
1151static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1152					 struct ipr_hostrcb *hostrcb)
1153{
1154	int i, num_entries;
1155	struct ipr_hostrcb_type_14_error *error;
1156	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1157	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1158
1159	error = &hostrcb->hcam.u.error.u.type_14_error;
1160
1161	ipr_err_separator;
1162
1163	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1164		error->protection_level,
1165		ioa_cfg->host->host_no,
1166		error->last_func_vset_res_addr.bus,
1167		error->last_func_vset_res_addr.target,
1168		error->last_func_vset_res_addr.lun);
1169
1170	ipr_err_separator;
1171
1172	array_entry = error->array_member;
1173	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1174			    sizeof(error->array_member));
1175
1176	for (i = 0; i < num_entries; i++, array_entry++) {
1177		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1178			continue;
1179
1180		if (be32_to_cpu(error->exposed_mode_adn) == i)
1181			ipr_err("Exposed Array Member %d:\n", i);
1182		else
1183			ipr_err("Array Member %d:\n", i);
1184
1185		ipr_log_ext_vpd(&array_entry->vpd);
1186		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1187		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1188				 "Expected Location");
1189
1190		ipr_err_separator;
1191	}
1192}
1193
1194/**
1195 * ipr_log_array_error - Log an array configuration error.
1196 * @ioa_cfg:	ioa config struct
1197 * @hostrcb:	hostrcb struct
1198 *
1199 * Return value:
1200 * 	none
1201 **/
1202static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1203				struct ipr_hostrcb *hostrcb)
1204{
1205	int i;
1206	struct ipr_hostrcb_type_04_error *error;
1207	struct ipr_hostrcb_array_data_entry *array_entry;
1208	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1209
1210	error = &hostrcb->hcam.u.error.u.type_04_error;
1211
1212	ipr_err_separator;
1213
1214	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1215		error->protection_level,
1216		ioa_cfg->host->host_no,
1217		error->last_func_vset_res_addr.bus,
1218		error->last_func_vset_res_addr.target,
1219		error->last_func_vset_res_addr.lun);
1220
1221	ipr_err_separator;
1222
1223	array_entry = error->array_member;
1224
1225	for (i = 0; i < 18; i++) {
1226		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1227			continue;
1228
1229		if (be32_to_cpu(error->exposed_mode_adn) == i)
1230			ipr_err("Exposed Array Member %d:\n", i);
1231		else
1232			ipr_err("Array Member %d:\n", i);
1233
1234		ipr_log_vpd(&array_entry->vpd);
1235
1236		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1237		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1238				 "Expected Location");
1239
1240		ipr_err_separator;
1241
1242		if (i == 9)
1243			array_entry = error->array_member2;
1244		else
1245			array_entry++;
1246	}
1247}
1248
1249/**
1250 * ipr_log_hex_data - Log additional hex IOA error data.
1251 * @ioa_cfg:	ioa config struct
1252 * @data:		IOA error data
1253 * @len:		data length
1254 *
1255 * Return value:
1256 * 	none
1257 **/
1258static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1259{
1260	int i;
1261
1262	if (len == 0)
1263		return;
1264
1265	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1266		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1267
1268	for (i = 0; i < len / 4; i += 4) {
1269		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1270			be32_to_cpu(data[i]),
1271			be32_to_cpu(data[i+1]),
1272			be32_to_cpu(data[i+2]),
1273			be32_to_cpu(data[i+3]));
1274	}
1275}
1276
1277/**
1278 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1279 * @ioa_cfg:	ioa config struct
1280 * @hostrcb:	hostrcb struct
1281 *
1282 * Return value:
1283 * 	none
1284 **/
1285static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1286					    struct ipr_hostrcb *hostrcb)
1287{
1288	struct ipr_hostrcb_type_17_error *error;
1289
1290	error = &hostrcb->hcam.u.error.u.type_17_error;
1291	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1292
1293	ipr_err("%s\n", error->failure_reason);
1294	ipr_err("Remote Adapter VPD:\n");
1295	ipr_log_ext_vpd(&error->vpd);
1296	ipr_log_hex_data(ioa_cfg, error->data,
1297			 be32_to_cpu(hostrcb->hcam.length) -
1298			 (offsetof(struct ipr_hostrcb_error, u) +
1299			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1300}
1301
1302/**
1303 * ipr_log_dual_ioa_error - Log a dual adapter error.
1304 * @ioa_cfg:	ioa config struct
1305 * @hostrcb:	hostrcb struct
1306 *
1307 * Return value:
1308 * 	none
1309 **/
1310static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1311				   struct ipr_hostrcb *hostrcb)
1312{
1313	struct ipr_hostrcb_type_07_error *error;
1314
1315	error = &hostrcb->hcam.u.error.u.type_07_error;
1316	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1317
1318	ipr_err("%s\n", error->failure_reason);
1319	ipr_err("Remote Adapter VPD:\n");
1320	ipr_log_vpd(&error->vpd);
1321	ipr_log_hex_data(ioa_cfg, error->data,
1322			 be32_to_cpu(hostrcb->hcam.length) -
1323			 (offsetof(struct ipr_hostrcb_error, u) +
1324			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1325}
1326
1327static const struct {
1328	u8 active;
1329	char *desc;
1330} path_active_desc[] = {
1331	{ IPR_PATH_NO_INFO, "Path" },
1332	{ IPR_PATH_ACTIVE, "Active path" },
1333	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1334};
1335
1336static const struct {
1337	u8 state;
1338	char *desc;
1339} path_state_desc[] = {
1340	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1341	{ IPR_PATH_HEALTHY, "is healthy" },
1342	{ IPR_PATH_DEGRADED, "is degraded" },
1343	{ IPR_PATH_FAILED, "is failed" }
1344};
1345
1346/**
1347 * ipr_log_fabric_path - Log a fabric path error
1348 * @hostrcb:	hostrcb struct
1349 * @fabric:		fabric descriptor
1350 *
1351 * Return value:
1352 * 	none
1353 **/
1354static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1355				struct ipr_hostrcb_fabric_desc *fabric)
1356{
1357	int i, j;
1358	u8 path_state = fabric->path_state;
1359	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1360	u8 state = path_state & IPR_PATH_STATE_MASK;
1361
1362	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1363		if (path_active_desc[i].active != active)
1364			continue;
1365
1366		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1367			if (path_state_desc[j].state != state)
1368				continue;
1369
1370			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1371				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1372					     path_active_desc[i].desc, path_state_desc[j].desc,
1373					     fabric->ioa_port);
1374			} else if (fabric->cascaded_expander == 0xff) {
1375				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1376					     path_active_desc[i].desc, path_state_desc[j].desc,
1377					     fabric->ioa_port, fabric->phy);
1378			} else if (fabric->phy == 0xff) {
1379				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1380					     path_active_desc[i].desc, path_state_desc[j].desc,
1381					     fabric->ioa_port, fabric->cascaded_expander);
1382			} else {
1383				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1384					     path_active_desc[i].desc, path_state_desc[j].desc,
1385					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1386			}
1387			return;
1388		}
1389	}
1390
1391	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1392		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1393}
1394
1395static const struct {
1396	u8 type;
1397	char *desc;
1398} path_type_desc[] = {
1399	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1400	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1401	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1402	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1403};
1404
1405static const struct {
1406	u8 status;
1407	char *desc;
1408} path_status_desc[] = {
1409	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1410	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1411	{ IPR_PATH_CFG_FAILED, "Failed" },
1412	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1413	{ IPR_PATH_NOT_DETECTED, "Missing" },
1414	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1415};
1416
1417static const char *link_rate[] = {
1418	"unknown",
1419	"disabled",
1420	"phy reset problem",
1421	"spinup hold",
1422	"port selector",
1423	"unknown",
1424	"unknown",
1425	"unknown",
1426	"1.5Gbps",
1427	"3.0Gbps",
1428	"unknown",
1429	"unknown",
1430	"unknown",
1431	"unknown",
1432	"unknown",
1433	"unknown"
1434};
1435
1436/**
1437 * ipr_log_path_elem - Log a fabric path element.
1438 * @hostrcb:	hostrcb struct
1439 * @cfg:		fabric path element struct
1440 *
1441 * Return value:
1442 * 	none
1443 **/
1444static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1445			      struct ipr_hostrcb_config_element *cfg)
1446{
1447	int i, j;
1448	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1449	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1450
1451	if (type == IPR_PATH_CFG_NOT_EXIST)
1452		return;
1453
1454	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1455		if (path_type_desc[i].type != type)
1456			continue;
1457
1458		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1459			if (path_status_desc[j].status != status)
1460				continue;
1461
1462			if (type == IPR_PATH_CFG_IOA_PORT) {
1463				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1464					     path_status_desc[j].desc, path_type_desc[i].desc,
1465					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1466					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1467			} else {
1468				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1469					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1470						     path_status_desc[j].desc, path_type_desc[i].desc,
1471						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1472						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1473				} else if (cfg->cascaded_expander == 0xff) {
1474					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1475						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1476						     path_type_desc[i].desc, cfg->phy,
1477						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1478						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1479				} else if (cfg->phy == 0xff) {
1480					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1481						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1482						     path_type_desc[i].desc, cfg->cascaded_expander,
1483						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1484						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1485				} else {
1486					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1487						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1488						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1489						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1490						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1491				}
1492			}
1493			return;
1494		}
1495	}
1496
1497	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1498		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1499		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1500		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1501}
1502
1503/**
1504 * ipr_log_fabric_error - Log a fabric error.
1505 * @ioa_cfg:	ioa config struct
1506 * @hostrcb:	hostrcb struct
1507 *
1508 * Return value:
1509 * 	none
1510 **/
1511static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1512				 struct ipr_hostrcb *hostrcb)
1513{
1514	struct ipr_hostrcb_type_20_error *error;
1515	struct ipr_hostrcb_fabric_desc *fabric;
1516	struct ipr_hostrcb_config_element *cfg;
1517	int i, add_len;
1518
1519	error = &hostrcb->hcam.u.error.u.type_20_error;
1520	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1521	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1522
1523	add_len = be32_to_cpu(hostrcb->hcam.length) -
1524		(offsetof(struct ipr_hostrcb_error, u) +
1525		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1526
1527	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1528		ipr_log_fabric_path(hostrcb, fabric);
1529		for_each_fabric_cfg(fabric, cfg)
1530			ipr_log_path_elem(hostrcb, cfg);
1531
1532		add_len -= be16_to_cpu(fabric->length);
1533		fabric = (struct ipr_hostrcb_fabric_desc *)
1534			((unsigned long)fabric + be16_to_cpu(fabric->length));
1535	}
1536
1537	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1538}
1539
1540/**
1541 * ipr_log_generic_error - Log an adapter error.
1542 * @ioa_cfg:	ioa config struct
1543 * @hostrcb:	hostrcb struct
1544 *
1545 * Return value:
1546 * 	none
1547 **/
1548static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1549				  struct ipr_hostrcb *hostrcb)
1550{
1551	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1552			 be32_to_cpu(hostrcb->hcam.length));
1553}
1554
1555/**
1556 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1557 * @ioasc:	IOASC
1558 *
1559 * This function will return the index of into the ipr_error_table
1560 * for the specified IOASC. If the IOASC is not in the table,
1561 * 0 will be returned, which points to the entry used for unknown errors.
1562 *
1563 * Return value:
1564 * 	index into the ipr_error_table
1565 **/
1566static u32 ipr_get_error(u32 ioasc)
1567{
1568	int i;
1569
1570	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1571		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1572			return i;
1573
1574	return 0;
1575}
1576
1577/**
1578 * ipr_handle_log_data - Log an adapter error.
1579 * @ioa_cfg:	ioa config struct
1580 * @hostrcb:	hostrcb struct
1581 *
1582 * This function logs an adapter error to the system.
1583 *
1584 * Return value:
1585 * 	none
1586 **/
1587static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1588				struct ipr_hostrcb *hostrcb)
1589{
1590	u32 ioasc;
1591	int error_index;
1592
1593	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1594		return;
1595
1596	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1597		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1598
1599	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1600
1601	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1602	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1603		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1604		scsi_report_bus_reset(ioa_cfg->host,
1605				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1606	}
1607
1608	error_index = ipr_get_error(ioasc);
1609
1610	if (!ipr_error_table[error_index].log_hcam)
1611		return;
1612
1613	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1614
1615	/* Set indication we have logged an error */
1616	ioa_cfg->errors_logged++;
1617
1618	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1619		return;
1620	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1621		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1622
1623	switch (hostrcb->hcam.overlay_id) {
1624	case IPR_HOST_RCB_OVERLAY_ID_2:
1625		ipr_log_cache_error(ioa_cfg, hostrcb);
1626		break;
1627	case IPR_HOST_RCB_OVERLAY_ID_3:
1628		ipr_log_config_error(ioa_cfg, hostrcb);
1629		break;
1630	case IPR_HOST_RCB_OVERLAY_ID_4:
1631	case IPR_HOST_RCB_OVERLAY_ID_6:
1632		ipr_log_array_error(ioa_cfg, hostrcb);
1633		break;
1634	case IPR_HOST_RCB_OVERLAY_ID_7:
1635		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1636		break;
1637	case IPR_HOST_RCB_OVERLAY_ID_12:
1638		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1639		break;
1640	case IPR_HOST_RCB_OVERLAY_ID_13:
1641		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1642		break;
1643	case IPR_HOST_RCB_OVERLAY_ID_14:
1644	case IPR_HOST_RCB_OVERLAY_ID_16:
1645		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1646		break;
1647	case IPR_HOST_RCB_OVERLAY_ID_17:
1648		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1649		break;
1650	case IPR_HOST_RCB_OVERLAY_ID_20:
1651		ipr_log_fabric_error(ioa_cfg, hostrcb);
1652		break;
1653	case IPR_HOST_RCB_OVERLAY_ID_1:
1654	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1655	default:
1656		ipr_log_generic_error(ioa_cfg, hostrcb);
1657		break;
1658	}
1659}
1660
1661/**
1662 * ipr_process_error - Op done function for an adapter error log.
1663 * @ipr_cmd:	ipr command struct
1664 *
1665 * This function is the op done function for an error log host
1666 * controlled async from the adapter. It will log the error and
1667 * send the HCAM back to the adapter.
1668 *
1669 * Return value:
1670 * 	none
1671 **/
1672static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1673{
1674	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1675	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1676	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1677	u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1678
1679	list_del(&hostrcb->queue);
1680	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1681
1682	if (!ioasc) {
1683		ipr_handle_log_data(ioa_cfg, hostrcb);
1684		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1685			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1686	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1687		dev_err(&ioa_cfg->pdev->dev,
1688			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1689	}
1690
1691	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1692}
1693
1694/**
1695 * ipr_timeout -  An internally generated op has timed out.
1696 * @ipr_cmd:	ipr command struct
1697 *
1698 * This function blocks host requests and initiates an
1699 * adapter reset.
1700 *
1701 * Return value:
1702 * 	none
1703 **/
1704static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1705{
1706	unsigned long lock_flags = 0;
1707	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1708
1709	ENTER;
1710	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1711
1712	ioa_cfg->errors_logged++;
1713	dev_err(&ioa_cfg->pdev->dev,
1714		"Adapter being reset due to command timeout.\n");
1715
1716	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1717		ioa_cfg->sdt_state = GET_DUMP;
1718
1719	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1720		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1721
1722	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1723	LEAVE;
1724}
1725
1726/**
1727 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1728 * @ipr_cmd:	ipr command struct
1729 *
1730 * This function blocks host requests and initiates an
1731 * adapter reset.
1732 *
1733 * Return value:
1734 * 	none
1735 **/
1736static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1737{
1738	unsigned long lock_flags = 0;
1739	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1740
1741	ENTER;
1742	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1743
1744	ioa_cfg->errors_logged++;
1745	dev_err(&ioa_cfg->pdev->dev,
1746		"Adapter timed out transitioning to operational.\n");
1747
1748	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1749		ioa_cfg->sdt_state = GET_DUMP;
1750
1751	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1752		if (ipr_fastfail)
1753			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1754		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1755	}
1756
1757	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1758	LEAVE;
1759}
1760
1761/**
1762 * ipr_reset_reload - Reset/Reload the IOA
1763 * @ioa_cfg:		ioa config struct
1764 * @shutdown_type:	shutdown type
1765 *
1766 * This function resets the adapter and re-initializes it.
1767 * This function assumes that all new host commands have been stopped.
1768 * Return value:
1769 * 	SUCCESS / FAILED
1770 **/
1771static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1772			    enum ipr_shutdown_type shutdown_type)
1773{
1774	if (!ioa_cfg->in_reset_reload)
1775		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1776
1777	spin_unlock_irq(ioa_cfg->host->host_lock);
1778	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1779	spin_lock_irq(ioa_cfg->host->host_lock);
1780
1781	/* If we got hit with a host reset while we were already resetting
1782	 the adapter for some reason, and the reset failed. */
1783	if (ioa_cfg->ioa_is_dead) {
1784		ipr_trace;
1785		return FAILED;
1786	}
1787
1788	return SUCCESS;
1789}
1790
1791/**
1792 * ipr_find_ses_entry - Find matching SES in SES table
1793 * @res:	resource entry struct of SES
1794 *
1795 * Return value:
1796 * 	pointer to SES table entry / NULL on failure
1797 **/
1798static const struct ipr_ses_table_entry *
1799ipr_find_ses_entry(struct ipr_resource_entry *res)
1800{
1801	int i, j, matches;
1802	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1803
1804	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1805		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1806			if (ste->compare_product_id_byte[j] == 'X') {
1807				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1808					matches++;
1809				else
1810					break;
1811			} else
1812				matches++;
1813		}
1814
1815		if (matches == IPR_PROD_ID_LEN)
1816			return ste;
1817	}
1818
1819	return NULL;
1820}
1821
1822/**
1823 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1824 * @ioa_cfg:	ioa config struct
1825 * @bus:		SCSI bus
1826 * @bus_width:	bus width
1827 *
1828 * Return value:
1829 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1830 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1831 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1832 *	max 160MHz = max 320MB/sec).
1833 **/
1834static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1835{
1836	struct ipr_resource_entry *res;
1837	const struct ipr_ses_table_entry *ste;
1838	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1839
1840	/* Loop through each config table entry in the config table buffer */
1841	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1842		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1843			continue;
1844
1845		if (bus != res->cfgte.res_addr.bus)
1846			continue;
1847
1848		if (!(ste = ipr_find_ses_entry(res)))
1849			continue;
1850
1851		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1852	}
1853
1854	return max_xfer_rate;
1855}
1856
1857/**
1858 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1859 * @ioa_cfg:		ioa config struct
1860 * @max_delay:		max delay in micro-seconds to wait
1861 *
1862 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1863 *
1864 * Return value:
1865 * 	0 on success / other on failure
1866 **/
1867static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1868{
1869	volatile u32 pcii_reg;
1870	int delay = 1;
1871
1872	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1873	while (delay < max_delay) {
1874		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1875
1876		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1877			return 0;
1878
1879		/* udelay cannot be used if delay is more than a few milliseconds */
1880		if ((delay / 1000) > MAX_UDELAY_MS)
1881			mdelay(delay / 1000);
1882		else
1883			udelay(delay);
1884
1885		delay += delay;
1886	}
1887	return -EIO;
1888}
1889
1890/**
1891 * ipr_get_ldump_data_section - Dump IOA memory
1892 * @ioa_cfg:			ioa config struct
1893 * @start_addr:			adapter address to dump
1894 * @dest:				destination kernel buffer
1895 * @length_in_words:	length to dump in 4 byte words
1896 *
1897 * Return value:
1898 * 	0 on success / -EIO on failure
1899 **/
1900static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1901				      u32 start_addr,
1902				      __be32 *dest, u32 length_in_words)
1903{
1904	volatile u32 temp_pcii_reg;
1905	int i, delay = 0;
1906
1907	/* Write IOA interrupt reg starting LDUMP state  */
1908	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1909	       ioa_cfg->regs.set_uproc_interrupt_reg);
1910
1911	/* Wait for IO debug acknowledge */
1912	if (ipr_wait_iodbg_ack(ioa_cfg,
1913			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1914		dev_err(&ioa_cfg->pdev->dev,
1915			"IOA dump long data transfer timeout\n");
1916		return -EIO;
1917	}
1918
1919	/* Signal LDUMP interlocked - clear IO debug ack */
1920	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1921	       ioa_cfg->regs.clr_interrupt_reg);
1922
1923	/* Write Mailbox with starting address */
1924	writel(start_addr, ioa_cfg->ioa_mailbox);
1925
1926	/* Signal address valid - clear IOA Reset alert */
1927	writel(IPR_UPROCI_RESET_ALERT,
1928	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1929
1930	for (i = 0; i < length_in_words; i++) {
1931		/* Wait for IO debug acknowledge */
1932		if (ipr_wait_iodbg_ack(ioa_cfg,
1933				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1934			dev_err(&ioa_cfg->pdev->dev,
1935				"IOA dump short data transfer timeout\n");
1936			return -EIO;
1937		}
1938
1939		/* Read data from mailbox and increment destination pointer */
1940		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1941		dest++;
1942
1943		/* For all but the last word of data, signal data received */
1944		if (i < (length_in_words - 1)) {
1945			/* Signal dump data received - Clear IO debug Ack */
1946			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1947			       ioa_cfg->regs.clr_interrupt_reg);
1948		}
1949	}
1950
1951	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1952	writel(IPR_UPROCI_RESET_ALERT,
1953	       ioa_cfg->regs.set_uproc_interrupt_reg);
1954
1955	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1956	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1957
1958	/* Signal dump data received - Clear IO debug Ack */
1959	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1960	       ioa_cfg->regs.clr_interrupt_reg);
1961
1962	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1963	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1964		temp_pcii_reg =
1965		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1966
1967		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1968			return 0;
1969
1970		udelay(10);
1971		delay += 10;
1972	}
1973
1974	return 0;
1975}
1976
1977#ifdef CONFIG_SCSI_IPR_DUMP
1978/**
1979 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1980 * @ioa_cfg:		ioa config struct
1981 * @pci_address:	adapter address
1982 * @length:			length of data to copy
1983 *
1984 * Copy data from PCI adapter to kernel buffer.
1985 * Note: length MUST be a 4 byte multiple
1986 * Return value:
1987 * 	0 on success / other on failure
1988 **/
1989static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1990			unsigned long pci_address, u32 length)
1991{
1992	int bytes_copied = 0;
1993	int cur_len, rc, rem_len, rem_page_len;
1994	__be32 *page;
1995	unsigned long lock_flags = 0;
1996	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1997
1998	while (bytes_copied < length &&
1999	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2000		if (ioa_dump->page_offset >= PAGE_SIZE ||
2001		    ioa_dump->page_offset == 0) {
2002			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2003
2004			if (!page) {
2005				ipr_trace;
2006				return bytes_copied;
2007			}
2008
2009			ioa_dump->page_offset = 0;
2010			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2011			ioa_dump->next_page_index++;
2012		} else
2013			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2014
2015		rem_len = length - bytes_copied;
2016		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2017		cur_len = min(rem_len, rem_page_len);
2018
2019		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2020		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2021			rc = -EIO;
2022		} else {
2023			rc = ipr_get_ldump_data_section(ioa_cfg,
2024							pci_address + bytes_copied,
2025							&page[ioa_dump->page_offset / 4],
2026							(cur_len / sizeof(u32)));
2027		}
2028		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2029
2030		if (!rc) {
2031			ioa_dump->page_offset += cur_len;
2032			bytes_copied += cur_len;
2033		} else {
2034			ipr_trace;
2035			break;
2036		}
2037		schedule();
2038	}
2039
2040	return bytes_copied;
2041}
2042
2043/**
2044 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2045 * @hdr:	dump entry header struct
2046 *
2047 * Return value:
2048 * 	nothing
2049 **/
2050static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2051{
2052	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2053	hdr->num_elems = 1;
2054	hdr->offset = sizeof(*hdr);
2055	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2056}
2057
2058/**
2059 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2060 * @ioa_cfg:	ioa config struct
2061 * @driver_dump:	driver dump struct
2062 *
2063 * Return value:
2064 * 	nothing
2065 **/
2066static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2067				   struct ipr_driver_dump *driver_dump)
2068{
2069	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2070
2071	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2072	driver_dump->ioa_type_entry.hdr.len =
2073		sizeof(struct ipr_dump_ioa_type_entry) -
2074		sizeof(struct ipr_dump_entry_header);
2075	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2076	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2077	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2078	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2079		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2080		ucode_vpd->minor_release[1];
2081	driver_dump->hdr.num_entries++;
2082}
2083
2084/**
2085 * ipr_dump_version_data - Fill in the driver version in the dump.
2086 * @ioa_cfg:	ioa config struct
2087 * @driver_dump:	driver dump struct
2088 *
2089 * Return value:
2090 * 	nothing
2091 **/
2092static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2093				  struct ipr_driver_dump *driver_dump)
2094{
2095	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2096	driver_dump->version_entry.hdr.len =
2097		sizeof(struct ipr_dump_version_entry) -
2098		sizeof(struct ipr_dump_entry_header);
2099	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2100	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2101	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2102	driver_dump->hdr.num_entries++;
2103}
2104
2105/**
2106 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2107 * @ioa_cfg:	ioa config struct
2108 * @driver_dump:	driver dump struct
2109 *
2110 * Return value:
2111 * 	nothing
2112 **/
2113static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2114				   struct ipr_driver_dump *driver_dump)
2115{
2116	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2117	driver_dump->trace_entry.hdr.len =
2118		sizeof(struct ipr_dump_trace_entry) -
2119		sizeof(struct ipr_dump_entry_header);
2120	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2121	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2122	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2123	driver_dump->hdr.num_entries++;
2124}
2125
2126/**
2127 * ipr_dump_location_data - Fill in the IOA location in the dump.
2128 * @ioa_cfg:	ioa config struct
2129 * @driver_dump:	driver dump struct
2130 *
2131 * Return value:
2132 * 	nothing
2133 **/
2134static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2135				   struct ipr_driver_dump *driver_dump)
2136{
2137	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2138	driver_dump->location_entry.hdr.len =
2139		sizeof(struct ipr_dump_location_entry) -
2140		sizeof(struct ipr_dump_entry_header);
2141	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2142	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2143	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2144	driver_dump->hdr.num_entries++;
2145}
2146
2147/**
2148 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2149 * @ioa_cfg:	ioa config struct
2150 * @dump:		dump struct
2151 *
2152 * Return value:
2153 * 	nothing
2154 **/
2155static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2156{
2157	unsigned long start_addr, sdt_word;
2158	unsigned long lock_flags = 0;
2159	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2160	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2161	u32 num_entries, start_off, end_off;
2162	u32 bytes_to_copy, bytes_copied, rc;
2163	struct ipr_sdt *sdt;
2164	int i;
2165
2166	ENTER;
2167
2168	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2169
2170	if (ioa_cfg->sdt_state != GET_DUMP) {
2171		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2172		return;
2173	}
2174
2175	start_addr = readl(ioa_cfg->ioa_mailbox);
2176
2177	if (!ipr_sdt_is_fmt2(start_addr)) {
2178		dev_err(&ioa_cfg->pdev->dev,
2179			"Invalid dump table format: %lx\n", start_addr);
2180		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2181		return;
2182	}
2183
2184	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2185
2186	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2187
2188	/* Initialize the overall dump header */
2189	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2190	driver_dump->hdr.num_entries = 1;
2191	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2192	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2193	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2194	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2195
2196	ipr_dump_version_data(ioa_cfg, driver_dump);
2197	ipr_dump_location_data(ioa_cfg, driver_dump);
2198	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2199	ipr_dump_trace_data(ioa_cfg, driver_dump);
2200
2201	/* Update dump_header */
2202	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2203
2204	/* IOA Dump entry */
2205	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2206	ioa_dump->format = IPR_SDT_FMT2;
2207	ioa_dump->hdr.len = 0;
2208	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2209	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2210
2211	/* First entries in sdt are actually a list of dump addresses and
2212	 lengths to gather the real dump data.  sdt represents the pointer
2213	 to the ioa generated dump table.  Dump data will be extracted based
2214	 on entries in this table */
2215	sdt = &ioa_dump->sdt;
2216
2217	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2218					sizeof(struct ipr_sdt) / sizeof(__be32));
2219
2220	/* Smart Dump table is ready to use and the first entry is valid */
2221	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2222		dev_err(&ioa_cfg->pdev->dev,
2223			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2224			rc, be32_to_cpu(sdt->hdr.state));
2225		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2226		ioa_cfg->sdt_state = DUMP_OBTAINED;
2227		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2228		return;
2229	}
2230
2231	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2232
2233	if (num_entries > IPR_NUM_SDT_ENTRIES)
2234		num_entries = IPR_NUM_SDT_ENTRIES;
2235
2236	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2237
2238	for (i = 0; i < num_entries; i++) {
2239		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2240			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2241			break;
2242		}
2243
2244		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2245			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2246			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2247			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2248
2249			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2250				bytes_to_copy = end_off - start_off;
2251				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2252					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2253					continue;
2254				}
2255
2256				/* Copy data from adapter to driver buffers */
2257				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2258							    bytes_to_copy);
2259
2260				ioa_dump->hdr.len += bytes_copied;
2261
2262				if (bytes_copied != bytes_to_copy) {
2263					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2264					break;
2265				}
2266			}
2267		}
2268	}
2269
2270	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2271
2272	/* Update dump_header */
2273	driver_dump->hdr.len += ioa_dump->hdr.len;
2274	wmb();
2275	ioa_cfg->sdt_state = DUMP_OBTAINED;
2276	LEAVE;
2277}
2278
2279#else
2280#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2281#endif
2282
2283/**
2284 * ipr_release_dump - Free adapter dump memory
2285 * @kref:	kref struct
2286 *
2287 * Return value:
2288 *	nothing
2289 **/
2290static void ipr_release_dump(struct kref *kref)
2291{
2292	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2293	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2294	unsigned long lock_flags = 0;
2295	int i;
2296
2297	ENTER;
2298	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2299	ioa_cfg->dump = NULL;
2300	ioa_cfg->sdt_state = INACTIVE;
2301	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2302
2303	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2304		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2305
2306	kfree(dump);
2307	LEAVE;
2308}
2309
2310/**
2311 * ipr_worker_thread - Worker thread
2312 * @work:		ioa config struct
2313 *
2314 * Called at task level from a work thread. This function takes care
2315 * of adding and removing device from the mid-layer as configuration
2316 * changes are detected by the adapter.
2317 *
2318 * Return value:
2319 * 	nothing
2320 **/
2321static void ipr_worker_thread(struct work_struct *work)
2322{
2323	unsigned long lock_flags;
2324	struct ipr_resource_entry *res;
2325	struct scsi_device *sdev;
2326	struct ipr_dump *dump;
2327	struct ipr_ioa_cfg *ioa_cfg =
2328		container_of(work, struct ipr_ioa_cfg, work_q);
2329	u8 bus, target, lun;
2330	int did_work;
2331
2332	ENTER;
2333	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2334
2335	if (ioa_cfg->sdt_state == GET_DUMP) {
2336		dump = ioa_cfg->dump;
2337		if (!dump) {
2338			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2339			return;
2340		}
2341		kref_get(&dump->kref);
2342		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2343		ipr_get_ioa_dump(ioa_cfg, dump);
2344		kref_put(&dump->kref, ipr_release_dump);
2345
2346		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2347		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2348			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2349		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2350		return;
2351	}
2352
2353restart:
2354	do {
2355		did_work = 0;
2356		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2357			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2358			return;
2359		}
2360
2361		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2362			if (res->del_from_ml && res->sdev) {
2363				did_work = 1;
2364				sdev = res->sdev;
2365				if (!scsi_device_get(sdev)) {
2366					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2367					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2368					scsi_remove_device(sdev);
2369					scsi_device_put(sdev);
2370					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2371				}
2372				break;
2373			}
2374		}
2375	} while(did_work);
2376
2377	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2378		if (res->add_to_ml) {
2379			bus = res->cfgte.res_addr.bus;
2380			target = res->cfgte.res_addr.target;
2381			lun = res->cfgte.res_addr.lun;
2382			res->add_to_ml = 0;
2383			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2384			scsi_add_device(ioa_cfg->host, bus, target, lun);
2385			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2386			goto restart;
2387		}
2388	}
2389
2390	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2391	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2392	LEAVE;
2393}
2394
2395#ifdef CONFIG_SCSI_IPR_TRACE
2396/**
2397 * ipr_read_trace - Dump the adapter trace
2398 * @kobj:		kobject struct
2399 * @buf:		buffer
2400 * @off:		offset
2401 * @count:		buffer size
2402 *
2403 * Return value:
2404 *	number of bytes printed to buffer
2405 **/
2406static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2407			      loff_t off, size_t count)
2408{
2409	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2410	struct Scsi_Host *shost = class_to_shost(cdev);
2411	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2412	unsigned long lock_flags = 0;
2413	int size = IPR_TRACE_SIZE;
2414	char *src = (char *)ioa_cfg->trace;
2415
2416	if (off > size)
2417		return 0;
2418	if (off + count > size) {
2419		size -= off;
2420		count = size;
2421	}
2422
2423	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2424	memcpy(buf, &src[off], count);
2425	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2426	return count;
2427}
2428
2429static struct bin_attribute ipr_trace_attr = {
2430	.attr =	{
2431		.name = "trace",
2432		.mode = S_IRUGO,
2433	},
2434	.size = 0,
2435	.read = ipr_read_trace,
2436};
2437#endif
2438
2439static const struct {
2440	enum ipr_cache_state state;
2441	char *name;
2442} cache_state [] = {
2443	{ CACHE_NONE, "none" },
2444	{ CACHE_DISABLED, "disabled" },
2445	{ CACHE_ENABLED, "enabled" }
2446};
2447
2448/**
2449 * ipr_show_write_caching - Show the write caching attribute
2450 * @class_dev:	class device struct
2451 * @buf:		buffer
2452 *
2453 * Return value:
2454 *	number of bytes printed to buffer
2455 **/
2456static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2457{
2458	struct Scsi_Host *shost = class_to_shost(class_dev);
2459	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2460	unsigned long lock_flags = 0;
2461	int i, len = 0;
2462
2463	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2464	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2465		if (cache_state[i].state == ioa_cfg->cache_state) {
2466			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2467			break;
2468		}
2469	}
2470	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2471	return len;
2472}
2473
2474
2475/**
2476 * ipr_store_write_caching - Enable/disable adapter write cache
2477 * @class_dev:	class_device struct
2478 * @buf:		buffer
2479 * @count:		buffer size
2480 *
2481 * This function will enable/disable adapter write cache.
2482 *
2483 * Return value:
2484 * 	count on success / other on failure
2485 **/
2486static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2487					const char *buf, size_t count)
2488{
2489	struct Scsi_Host *shost = class_to_shost(class_dev);
2490	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2491	unsigned long lock_flags = 0;
2492	enum ipr_cache_state new_state = CACHE_INVALID;
2493	int i;
2494
2495	if (!capable(CAP_SYS_ADMIN))
2496		return -EACCES;
2497	if (ioa_cfg->cache_state == CACHE_NONE)
2498		return -EINVAL;
2499
2500	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2501		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2502			new_state = cache_state[i].state;
2503			break;
2504		}
2505	}
2506
2507	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2508		return -EINVAL;
2509
2510	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2511	if (ioa_cfg->cache_state == new_state) {
2512		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2513		return count;
2514	}
2515
2516	ioa_cfg->cache_state = new_state;
2517	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2518		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2519	if (!ioa_cfg->in_reset_reload)
2520		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2521	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2522	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2523
2524	return count;
2525}
2526
2527static struct class_device_attribute ipr_ioa_cache_attr = {
2528	.attr = {
2529		.name =		"write_cache",
2530		.mode =		S_IRUGO | S_IWUSR,
2531	},
2532	.show = ipr_show_write_caching,
2533	.store = ipr_store_write_caching
2534};
2535
2536/**
2537 * ipr_show_fw_version - Show the firmware version
2538 * @class_dev:	class device struct
2539 * @buf:		buffer
2540 *
2541 * Return value:
2542 *	number of bytes printed to buffer
2543 **/
2544static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2545{
2546	struct Scsi_Host *shost = class_to_shost(class_dev);
2547	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2548	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2549	unsigned long lock_flags = 0;
2550	int len;
2551
2552	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2553	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2554		       ucode_vpd->major_release, ucode_vpd->card_type,
2555		       ucode_vpd->minor_release[0],
2556		       ucode_vpd->minor_release[1]);
2557	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2558	return len;
2559}
2560
2561static struct class_device_attribute ipr_fw_version_attr = {
2562	.attr = {
2563		.name =		"fw_version",
2564		.mode =		S_IRUGO,
2565	},
2566	.show = ipr_show_fw_version,
2567};
2568
2569/**
2570 * ipr_show_log_level - Show the adapter's error logging level
2571 * @class_dev:	class device struct
2572 * @buf:		buffer
2573 *
2574 * Return value:
2575 * 	number of bytes printed to buffer
2576 **/
2577static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2578{
2579	struct Scsi_Host *shost = class_to_shost(class_dev);
2580	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2581	unsigned long lock_flags = 0;
2582	int len;
2583
2584	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2585	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2586	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2587	return len;
2588}
2589
2590/**
2591 * ipr_store_log_level - Change the adapter's error logging level
2592 * @class_dev:	class device struct
2593 * @buf:		buffer
2594 *
2595 * Return value:
2596 * 	number of bytes printed to buffer
2597 **/
2598static ssize_t ipr_store_log_level(struct class_device *class_dev,
2599				   const char *buf, size_t count)
2600{
2601	struct Scsi_Host *shost = class_to_shost(class_dev);
2602	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2603	unsigned long lock_flags = 0;
2604
2605	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2606	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2607	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2608	return strlen(buf);
2609}
2610
2611static struct class_device_attribute ipr_log_level_attr = {
2612	.attr = {
2613		.name =		"log_level",
2614		.mode =		S_IRUGO | S_IWUSR,
2615	},
2616	.show = ipr_show_log_level,
2617	.store = ipr_store_log_level
2618};
2619
2620/**
2621 * ipr_store_diagnostics - IOA Diagnostics interface
2622 * @class_dev:	class_device struct
2623 * @buf:		buffer
2624 * @count:		buffer size
2625 *
2626 * This function will reset the adapter and wait a reasonable
2627 * amount of time for any errors that the adapter might log.
2628 *
2629 * Return value:
2630 * 	count on success / other on failure
2631 **/
2632static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2633				     const char *buf, size_t count)
2634{
2635	struct Scsi_Host *shost = class_to_shost(class_dev);
2636	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2637	unsigned long lock_flags = 0;
2638	int rc = count;
2639
2640	if (!capable(CAP_SYS_ADMIN))
2641		return -EACCES;
2642
2643	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2644	while(ioa_cfg->in_reset_reload) {
2645		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2646		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2647		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2648	}
2649
2650	ioa_cfg->errors_logged = 0;
2651	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2652
2653	if (ioa_cfg->in_reset_reload) {
2654		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2655		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2656
2657		/* Wait for a second for any errors to be logged */
2658		msleep(1000);
2659	} else {
2660		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2661		return -EIO;
2662	}
2663
2664	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2665	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2666		rc = -EIO;
2667	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2668
2669	return rc;
2670}
2671
2672static struct class_device_attribute ipr_diagnostics_attr = {
2673	.attr = {
2674		.name =		"run_diagnostics",
2675		.mode =		S_IWUSR,
2676	},
2677	.store = ipr_store_diagnostics
2678};
2679
2680/**
2681 * ipr_show_adapter_state - Show the adapter's state
2682 * @class_dev:	class device struct
2683 * @buf:		buffer
2684 *
2685 * Return value:
2686 * 	number of bytes printed to buffer
2687 **/
2688static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2689{
2690	struct Scsi_Host *shost = class_to_shost(class_dev);
2691	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2692	unsigned long lock_flags = 0;
2693	int len;
2694
2695	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2696	if (ioa_cfg->ioa_is_dead)
2697		len = snprintf(buf, PAGE_SIZE, "offline\n");
2698	else
2699		len = snprintf(buf, PAGE_SIZE, "online\n");
2700	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2701	return len;
2702}
2703
2704/**
2705 * ipr_store_adapter_state - Change adapter state
2706 * @class_dev:	class_device struct
2707 * @buf:		buffer
2708 * @count:		buffer size
2709 *
2710 * This function will change the adapter's state.
2711 *
2712 * Return value:
2713 * 	count on success / other on failure
2714 **/
2715static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2716				       const char *buf, size_t count)
2717{
2718	struct Scsi_Host *shost = class_to_shost(class_dev);
2719	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2720	unsigned long lock_flags;
2721	int result = count;
2722
2723	if (!capable(CAP_SYS_ADMIN))
2724		return -EACCES;
2725
2726	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2727	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2728		ioa_cfg->ioa_is_dead = 0;
2729		ioa_cfg->reset_retries = 0;
2730		ioa_cfg->in_ioa_bringdown = 0;
2731		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2732	}
2733	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2734	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2735
2736	return result;
2737}
2738
2739static struct class_device_attribute ipr_ioa_state_attr = {
2740	.attr = {
2741		.name =		"state",
2742		.mode =		S_IRUGO | S_IWUSR,
2743	},
2744	.show = ipr_show_adapter_state,
2745	.store = ipr_store_adapter_state
2746};
2747
2748/**
2749 * ipr_store_reset_adapter - Reset the adapter
2750 * @class_dev:	class_device struct
2751 * @buf:		buffer
2752 * @count:		buffer size
2753 *
2754 * This function will reset the adapter.
2755 *
2756 * Return value:
2757 * 	count on success / other on failure
2758 **/
2759static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2760				       const char *buf, size_t count)
2761{
2762	struct Scsi_Host *shost = class_to_shost(class_dev);
2763	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2764	unsigned long lock_flags;
2765	int result = count;
2766
2767	if (!capable(CAP_SYS_ADMIN))
2768		return -EACCES;
2769
2770	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2771	if (!ioa_cfg->in_reset_reload)
2772		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2773	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2774	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2775
2776	return result;
2777}
2778
2779static struct class_device_attribute ipr_ioa_reset_attr = {
2780	.attr = {
2781		.name =		"reset_host",
2782		.mode =		S_IWUSR,
2783	},
2784	.store = ipr_store_reset_adapter
2785};
2786
2787/**
2788 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2789 * @buf_len:		buffer length
2790 *
2791 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2792 * list to use for microcode download
2793 *
2794 * Return value:
2795 * 	pointer to sglist / NULL on failure
2796 **/
2797static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2798{
2799	int sg_size, order, bsize_elem, num_elem, i, j;
2800	struct ipr_sglist *sglist;
2801	struct scatterlist *scatterlist;
2802	struct page *page;
2803
2804	/* Get the minimum size per scatter/gather element */
2805	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2806
2807	/* Get the actual size per element */
2808	order = get_order(sg_size);
2809
2810	/* Determine the actual number of bytes per element */
2811	bsize_elem = PAGE_SIZE * (1 << order);
2812
2813	/* Determine the actual number of sg entries needed */
2814	if (buf_len % bsize_elem)
2815		num_elem = (buf_len / bsize_elem) + 1;
2816	else
2817		num_elem = buf_len / bsize_elem;
2818
2819	/* Allocate a scatter/gather list for the DMA */
2820	sglist = kzalloc(sizeof(struct ipr_sglist) +
2821			 (sizeof(struct scatterlist) * (num_elem - 1)),
2822			 GFP_KERNEL);
2823
2824	if (sglist == NULL) {
2825		ipr_trace;
2826		return NULL;
2827	}
2828
2829	scatterlist = sglist->scatterlist;
2830
2831	sglist->order = order;
2832	sglist->num_sg = num_elem;
2833
2834	/* Allocate a bunch of sg elements */
2835	for (i = 0; i < num_elem; i++) {
2836		page = alloc_pages(GFP_KERNEL, order);
2837		if (!page) {
2838			ipr_trace;
2839
2840			/* Free up what we already allocated */
2841			for (j = i - 1; j >= 0; j--)
2842				__free_pages(scatterlist[j].page, order);
2843			kfree(sglist);
2844			return NULL;
2845		}
2846
2847		scatterlist[i].page = page;
2848	}
2849
2850	return sglist;
2851}
2852
2853/**
2854 * ipr_free_ucode_buffer - Frees a microcode download buffer
2855 * @p_dnld:		scatter/gather list pointer
2856 *
2857 * Free a DMA'able ucode download buffer previously allocated with
2858 * ipr_alloc_ucode_buffer
2859 *
2860 * Return value:
2861 * 	nothing
2862 **/
2863static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2864{
2865	int i;
2866
2867	for (i = 0; i < sglist->num_sg; i++)
2868		__free_pages(sglist->scatterlist[i].page, sglist->order);
2869
2870	kfree(sglist);
2871}
2872
2873/**
2874 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2875 * @sglist:		scatter/gather list pointer
2876 * @buffer:		buffer pointer
2877 * @len:		buffer length
2878 *
2879 * Copy a microcode image from a user buffer into a buffer allocated by
2880 * ipr_alloc_ucode_buffer
2881 *
2882 * Return value:
2883 * 	0 on success / other on failure
2884 **/
2885static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2886				 u8 *buffer, u32 len)
2887{
2888	int bsize_elem, i, result = 0;
2889	struct scatterlist *scatterlist;
2890	void *kaddr;
2891
2892	/* Determine the actual number of bytes per element */
2893	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2894
2895	scatterlist = sglist->scatterlist;
2896
2897	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2898		kaddr = kmap(scatterlist[i].page);
2899		memcpy(kaddr, buffer, bsize_elem);
2900		kunmap(scatterlist[i].page);
2901
2902		scatterlist[i].length = bsize_elem;
2903
2904		if (result != 0) {
2905			ipr_trace;
2906			return result;
2907		}
2908	}
2909
2910	if (len % bsize_elem) {
2911		kaddr = kmap(scatterlist[i].page);
2912		memcpy(kaddr, buffer, len % bsize_elem);
2913		kunmap(scatterlist[i].page);
2914
2915		scatterlist[i].length = len % bsize_elem;
2916	}
2917
2918	sglist->buffer_len = len;
2919	return result;
2920}
2921
2922/**
2923 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2924 * @ipr_cmd:	ipr command struct
2925 * @sglist:		scatter/gather list
2926 *
2927 * Builds a microcode download IOA data list (IOADL).
2928 *
2929 **/
2930static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2931				  struct ipr_sglist *sglist)
2932{
2933	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2934	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2935	struct scatterlist *scatterlist = sglist->scatterlist;
2936	int i;
2937
2938	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2939	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2940	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2941	ioarcb->write_ioadl_len =
2942		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2943
2944	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2945		ioadl[i].flags_and_data_len =
2946			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2947		ioadl[i].address =
2948			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2949	}
2950
2951	ioadl[i-1].flags_and_data_len |=
2952		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2953}
2954
2955/**
2956 * ipr_update_ioa_ucode - Update IOA's microcode
2957 * @ioa_cfg:	ioa config struct
2958 * @sglist:		scatter/gather list
2959 *
2960 * Initiate an adapter reset to update the IOA's microcode
2961 *
2962 * Return value:
2963 * 	0 on success / -EIO on failure
2964 **/
2965static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2966				struct ipr_sglist *sglist)
2967{
2968	unsigned long lock_flags;
2969
2970	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2971	while(ioa_cfg->in_reset_reload) {
2972		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2973		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2974		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2975	}
2976
2977	if (ioa_cfg->ucode_sglist) {
2978		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2979		dev_err(&ioa_cfg->pdev->dev,
2980			"Microcode download already in progress\n");
2981		return -EIO;
2982	}
2983
2984	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2985					sglist->num_sg, DMA_TO_DEVICE);
2986
2987	if (!sglist->num_dma_sg) {
2988		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2989		dev_err(&ioa_cfg->pdev->dev,
2990			"Failed to map microcode download buffer!\n");
2991		return -EIO;
2992	}
2993
2994	ioa_cfg->ucode_sglist = sglist;
2995	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2996	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2997	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2998
2999	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3000	ioa_cfg->ucode_sglist = NULL;
3001	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3002	return 0;
3003}
3004
3005/**
3006 * ipr_store_update_fw - Update the firmware on the adapter
3007 * @class_dev:	class_device struct
3008 * @buf:		buffer
3009 * @count:		buffer size
3010 *
3011 * This function will update the firmware on the adapter.
3012 *
3013 * Return value:
3014 * 	count on success / other on failure
3015 **/
3016static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3017				       const char *buf, size_t count)
3018{
3019	struct Scsi_Host *shost = class_to_shost(class_dev);
3020	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3021	struct ipr_ucode_image_header *image_hdr;
3022	const struct firmware *fw_entry;
3023	struct ipr_sglist *sglist;
3024	char fname[100];
3025	char *src;
3026	int len, result, dnld_size;
3027
3028	if (!capable(CAP_SYS_ADMIN))
3029		return -EACCES;
3030
3031	len = snprintf(fname, 99, "%s", buf);
3032	fname[len-1] = '\0';
3033
3034	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3035		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3036		return -EIO;
3037	}
3038
3039	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3040
3041	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3042	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3043	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3044		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3045		release_firmware(fw_entry);
3046		return -EINVAL;
3047	}
3048
3049	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3050	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3051	sglist = ipr_alloc_ucode_buffer(dnld_size);
3052
3053	if (!sglist) {
3054		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3055		release_firmware(fw_entry);
3056		return -ENOMEM;
3057	}
3058
3059	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3060
3061	if (result) {
3062		dev_err(&ioa_cfg->pdev->dev,
3063			"Microcode buffer copy to DMA buffer failed\n");
3064		goto out;
3065	}
3066
3067	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3068
3069	if (!result)
3070		result = count;
3071out:
3072	ipr_free_ucode_buffer(sglist);
3073	release_firmware(fw_entry);
3074	return result;
3075}
3076
3077static struct class_device_attribute ipr_update_fw_attr = {
3078	.attr = {
3079		.name =		"update_fw",
3080		.mode =		S_IWUSR,
3081	},
3082	.store = ipr_store_update_fw
3083};
3084
3085static struct class_device_attribute *ipr_ioa_attrs[] = {
3086	&ipr_fw_version_attr,
3087	&ipr_log_level_attr,
3088	&ipr_diagnostics_attr,
3089	&ipr_ioa_state_attr,
3090	&ipr_ioa_reset_attr,
3091	&ipr_update_fw_attr,
3092	&ipr_ioa_cache_attr,
3093	NULL,
3094};
3095
3096#ifdef CONFIG_SCSI_IPR_DUMP
3097/**
3098 * ipr_read_dump - Dump the adapter
3099 * @kobj:		kobject struct
3100 * @buf:		buffer
3101 * @off:		offset
3102 * @count:		buffer size
3103 *
3104 * Return value:
3105 *	number of bytes printed to buffer
3106 **/
3107static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3108			      loff_t off, size_t count)
3109{
3110	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3111	struct Scsi_Host *shost = class_to_shost(cdev);
3112	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3113	struct ipr_dump *dump;
3114	unsigned long lock_flags = 0;
3115	char *src;
3116	int len;
3117	size_t rc = count;
3118
3119	if (!capable(CAP_SYS_ADMIN))
3120		return -EACCES;
3121
3122	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3123	dump = ioa_cfg->dump;
3124
3125	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3126		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3127		return 0;
3128	}
3129	kref_get(&dump->kref);
3130	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3131
3132	if (off > dump->driver_dump.hdr.len) {
3133		kref_put(&dump->kref, ipr_release_dump);
3134		return 0;
3135	}
3136
3137	if (off + count > dump->driver_dump.hdr.len) {
3138		count = dump->driver_dump.hdr.len - off;
3139		rc = count;
3140	}
3141
3142	if (count && off < sizeof(dump->driver_dump)) {
3143		if (off + count > sizeof(dump->driver_dump))
3144			len = sizeof(dump->driver_dump) - off;
3145		else
3146			len = count;
3147		src = (u8 *)&dump->driver_dump + off;
3148		memcpy(buf, src, len);
3149		buf += len;
3150		off += len;
3151		count -= len;
3152	}
3153
3154	off -= sizeof(dump->driver_dump);
3155
3156	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3157		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3158			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3159		else
3160			len = count;
3161		src = (u8 *)&dump->ioa_dump + off;
3162		memcpy(buf, src, len);
3163		buf += len;
3164		off += len;
3165		count -= len;
3166	}
3167
3168	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3169
3170	while (count) {
3171		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3172			len = PAGE_ALIGN(off) - off;
3173		else
3174			len = count;
3175		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3176		src += off & ~PAGE_MASK;
3177		memcpy(buf, src, len);
3178		buf += len;
3179		off += len;
3180		count -= len;
3181	}
3182
3183	kref_put(&dump->kref, ipr_release_dump);
3184	return rc;
3185}
3186
3187/**
3188 * ipr_alloc_dump - Prepare for adapter dump
3189 * @ioa_cfg:	ioa config struct
3190 *
3191 * Return value:
3192 *	0 on success / other on failure
3193 **/
3194static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3195{
3196	struct ipr_dump *dump;
3197	unsigned long lock_flags = 0;
3198
3199	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3200
3201	if (!dump) {
3202		ipr_err("Dump memory allocation failed\n");
3203		return -ENOMEM;
3204	}
3205
3206	kref_init(&dump->kref);
3207	dump->ioa_cfg = ioa_cfg;
3208
3209	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3210
3211	if (INACTIVE != ioa_cfg->sdt_state) {
3212		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3213		kfree(dump);
3214		return 0;
3215	}
3216
3217	ioa_cfg->dump = dump;
3218	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3219	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3220		ioa_cfg->dump_taken = 1;
3221		schedule_work(&ioa_cfg->work_q);
3222	}
3223	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3224
3225	return 0;
3226}
3227
3228/**
3229 * ipr_free_dump - Free adapter dump memory
3230 * @ioa_cfg:	ioa config struct
3231 *
3232 * Return value:
3233 *	0 on success / other on failure
3234 **/
3235static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3236{
3237	struct ipr_dump *dump;
3238	unsigned long lock_flags = 0;
3239
3240	ENTER;
3241
3242	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3243	dump = ioa_cfg->dump;
3244	if (!dump) {
3245		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3246		return 0;
3247	}
3248
3249	ioa_cfg->dump = NULL;
3250	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3251
3252	kref_put(&dump->kref, ipr_release_dump);
3253
3254	LEAVE;
3255	return 0;
3256}
3257
3258/**
3259 * ipr_write_dump - Setup dump state of adapter
3260 * @kobj:		kobject struct
3261 * @buf:		buffer
3262 * @off:		offset
3263 * @count:		buffer size
3264 *
3265 * Return value:
3266 *	number of bytes printed to buffer
3267 **/
3268static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3269			      loff_t off, size_t count)
3270{
3271	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3272	struct Scsi_Host *shost = class_to_shost(cdev);
3273	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3274	int rc;
3275
3276	if (!capable(CAP_SYS_ADMIN))
3277		return -EACCES;
3278
3279	if (buf[0] == '1')
3280		rc = ipr_alloc_dump(ioa_cfg);
3281	else if (buf[0] == '0')
3282		rc = ipr_free_dump(ioa_cfg);
3283	else
3284		return -EINVAL;
3285
3286	if (rc)
3287		return rc;
3288	else
3289		return count;
3290}
3291
3292static struct bin_attribute ipr_dump_attr = {
3293	.attr =	{
3294		.name = "dump",
3295		.mode = S_IRUSR | S_IWUSR,
3296	},
3297	.size = 0,
3298	.read = ipr_read_dump,
3299	.write = ipr_write_dump
3300};
3301#else
3302static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3303#endif
3304
3305/**
3306 * ipr_change_queue_depth - Change the device's queue depth
3307 * @sdev:	scsi device struct
3308 * @qdepth:	depth to set
3309 *
3310 * Return value:
3311 * 	actual depth set
3312 **/
3313static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3314{
3315	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3316	struct ipr_resource_entry *res;
3317	unsigned long lock_flags = 0;
3318
3319	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3320	res = (struct ipr_resource_entry *)sdev->hostdata;
3321
3322	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3323		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3324	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3325
3326	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3327	return sdev->queue_depth;
3328}
3329
3330/**
3331 * ipr_change_queue_type - Change the device's queue type
3332 * @dsev:		scsi device struct
3333 * @tag_type:	type of tags to use
3334 *
3335 * Return value:
3336 * 	actual queue type set
3337 **/
3338static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3339{
3340	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3341	struct ipr_resource_entry *res;
3342	unsigned long lock_flags = 0;
3343
3344	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3345	res = (struct ipr_resource_entry *)sdev->hostdata;
3346
3347	if (res) {
3348		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3349			/*
3350			 * We don't bother quiescing the device here since the
3351			 * adapter firmware does it for us.
3352			 */
3353			scsi_set_tag_type(sdev, tag_type);
3354
3355			if (tag_type)
3356				scsi_activate_tcq(sdev, sdev->queue_depth);
3357			else
3358				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3359		} else
3360			tag_type = 0;
3361	} else
3362		tag_type = 0;
3363
3364	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3365	return tag_type;
3366}
3367
3368/**
3369 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3370 * @dev:	device struct
3371 * @buf:	buffer
3372 *
3373 * Return value:
3374 * 	number of bytes printed to buffer
3375 **/
3376static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3377{
3378	struct scsi_device *sdev = to_scsi_device(dev);
3379	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3380	struct ipr_resource_entry *res;
3381	unsigned long lock_flags = 0;
3382	ssize_t len = -ENXIO;
3383
3384	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3385	res = (struct ipr_resource_entry *)sdev->hostdata;
3386	if (res)
3387		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3388	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3389	return len;
3390}
3391
3392static struct device_attribute ipr_adapter_handle_attr = {
3393	.attr = {
3394		.name = 	"adapter_handle",
3395		.mode =		S_IRUSR,
3396	},
3397	.show = ipr_show_adapter_handle
3398};
3399
3400static struct device_attribute *ipr_dev_attrs[] = {
3401	&ipr_adapter_handle_attr,
3402	NULL,
3403};
3404
3405/**
3406 * ipr_biosparam - Return the HSC mapping
3407 * @sdev:			scsi device struct
3408 * @block_device:	block device pointer
3409 * @capacity:		capacity of the device
3410 * @parm:			Array containing returned HSC values.
3411 *
3412 * This function generates the HSC parms that fdisk uses.
3413 * We want to make sure we return something that places partitions
3414 * on 4k boundaries for best performance with the IOA.
3415 *
3416 * Return value:
3417 * 	0 on success
3418 **/
3419static int ipr_biosparam(struct scsi_device *sdev,
3420			 struct block_device *block_device,
3421			 sector_t capacity, int *parm)
3422{
3423	int heads, sectors;
3424	sector_t cylinders;
3425
3426	heads = 128;
3427	sectors = 32;
3428
3429	cylinders = capacity;
3430	sector_div(cylinders, (128 * 32));
3431
3432	/* return result */
3433	parm[0] = heads;
3434	parm[1] = sectors;
3435	parm[2] = cylinders;
3436
3437	return 0;
3438}
3439
3440/**
3441 * ipr_find_starget - Find target based on bus/target.
3442 * @starget:	scsi target struct
3443 *
3444 * Return value:
3445 * 	resource entry pointer if found / NULL if not found
3446 **/
3447static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3448{
3449	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3450	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3451	struct ipr_resource_entry *res;
3452
3453	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3454		if ((res->cfgte.res_addr.bus == starget->channel) &&
3455		    (res->cfgte.res_addr.target == starget->id) &&
3456		    (res->cfgte.res_addr.lun == 0)) {
3457			return res;
3458		}
3459	}
3460
3461	return NULL;
3462}
3463
3464static struct ata_port_info sata_port_info;
3465
3466/**
3467 * ipr_target_alloc - Prepare for commands to a SCSI target
3468 * @starget:	scsi target struct
3469 *
3470 * If the device is a SATA device, this function allocates an
3471 * ATA port with libata, else it does nothing.
3472 *
3473 * Return value:
3474 * 	0 on success / non-0 on failure
3475 **/
3476static int ipr_target_alloc(struct scsi_target *starget)
3477{
3478	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3479	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3480	struct ipr_sata_port *sata_port;
3481	struct ata_port *ap;
3482	struct ipr_resource_entry *res;
3483	unsigned long lock_flags;
3484
3485	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3486	res = ipr_find_starget(starget);
3487	starget->hostdata = NULL;
3488
3489	if (res && ipr_is_gata(res)) {
3490		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3491		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3492		if (!sata_port)
3493			return -ENOMEM;
3494
3495		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3496		if (ap) {
3497			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3498			sata_port->ioa_cfg = ioa_cfg;
3499			sata_port->ap = ap;
3500			sata_port->res = res;
3501
3502			res->sata_port = sata_port;
3503			ap->private_data = sata_port;
3504			starget->hostdata = sata_port;
3505		} else {
3506			kfree(sata_port);
3507			return -ENOMEM;
3508		}
3509	}
3510	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3511
3512	return 0;
3513}
3514
3515/**
3516 * ipr_target_destroy - Destroy a SCSI target
3517 * @starget:	scsi target struct
3518 *
3519 * If the device was a SATA device, this function frees the libata
3520 * ATA port, else it does nothing.
3521 *
3522 **/
3523static void ipr_target_destroy(struct scsi_target *starget)
3524{
3525	struct ipr_sata_port *sata_port = starget->hostdata;
3526
3527	if (sata_port) {
3528		starget->hostdata = NULL;
3529		ata_sas_port_destroy(sata_port->ap);
3530		kfree(sata_port);
3531	}
3532}
3533
3534/**
3535 * ipr_find_sdev - Find device based on bus/target/lun.
3536 * @sdev:	scsi device struct
3537 *
3538 * Return value:
3539 * 	resource entry pointer if found / NULL if not found
3540 **/
3541static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3542{
3543	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3544	struct ipr_resource_entry *res;
3545
3546	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3547		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3548		    (res->cfgte.res_addr.target == sdev->id) &&
3549		    (res->cfgte.res_addr.lun == sdev->lun))
3550			return res;
3551	}
3552
3553	return NULL;
3554}
3555
3556/**
3557 * ipr_slave_destroy - Unconfigure a SCSI device
3558 * @sdev:	scsi device struct
3559 *
3560 * Return value:
3561 * 	nothing
3562 **/
3563static void ipr_slave_destroy(struct scsi_device *sdev)
3564{
3565	struct ipr_resource_entry *res;
3566	struct ipr_ioa_cfg *ioa_cfg;
3567	unsigned long lock_flags = 0;
3568
3569	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3570
3571	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3572	res = (struct ipr_resource_entry *) sdev->hostdata;
3573	if (res) {
3574		if (res->sata_port)
3575			ata_port_disable(res->sata_port->ap);
3576		sdev->hostdata = NULL;
3577		res->sdev = NULL;
3578		res->sata_port = NULL;
3579	}
3580	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3581}
3582
3583/**
3584 * ipr_slave_configure - Configure a SCSI device
3585 * @sdev:	scsi device struct
3586 *
3587 * This function configures the specified scsi device.
3588 *
3589 * Return value:
3590 * 	0 on success
3591 **/
3592static int ipr_slave_configure(struct scsi_device *sdev)
3593{
3594	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3595	struct ipr_resource_entry *res;
3596	unsigned long lock_flags = 0;
3597
3598	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3599	res = sdev->hostdata;
3600	if (res) {
3601		if (ipr_is_af_dasd_device(res))
3602			sdev->type = TYPE_RAID;
3603		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3604			sdev->scsi_level = 4;
3605			sdev->no_uld_attach = 1;
3606		}
3607		if (ipr_is_vset_device(res)) {
3608			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3609			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3610		}
3611		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3612			sdev->allow_restart = 1;
3613		if (ipr_is_gata(res) && res->sata_port) {
3614			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3615			ata_sas_slave_configure(sdev, res->sata_port->ap);
3616		} else {
3617			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3618		}
3619	}
3620	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621	return 0;
3622}
3623
3624/**
3625 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3626 * @sdev:	scsi device struct
3627 *
3628 * This function initializes an ATA port so that future commands
3629 * sent through queuecommand will work.
3630 *
3631 * Return value:
3632 * 	0 on success
3633 **/
3634static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3635{
3636	struct ipr_sata_port *sata_port = NULL;
3637	int rc = -ENXIO;
3638
3639	ENTER;
3640	if (sdev->sdev_target)
3641		sata_port = sdev->sdev_target->hostdata;
3642	if (sata_port)
3643		rc = ata_sas_port_init(sata_port->ap);
3644	if (rc)
3645		ipr_slave_destroy(sdev);
3646
3647	LEAVE;
3648	return rc;
3649}
3650
3651/**
3652 * ipr_slave_alloc - Prepare for commands to a device.
3653 * @sdev:	scsi device struct
3654 *
3655 * This function saves a pointer to the resource entry
3656 * in the scsi device struct if the device exists. We
3657 * can then use this pointer in ipr_queuecommand when
3658 * handling new commands.
3659 *
3660 * Return value:
3661 * 	0 on success / -ENXIO if device does not exist
3662 **/
3663static int ipr_slave_alloc(struct scsi_device *sdev)
3664{
3665	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3666	struct ipr_resource_entry *res;
3667	unsigned long lock_flags;
3668	int rc = -ENXIO;
3669
3670	sdev->hostdata = NULL;
3671
3672	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3673
3674	res = ipr_find_sdev(sdev);
3675	if (res) {
3676		res->sdev = sdev;
3677		res->add_to_ml = 0;
3678		res->in_erp = 0;
3679		sdev->hostdata = res;
3680		if (!ipr_is_naca_model(res))
3681			res->needs_sync_complete = 1;
3682		rc = 0;
3683		if (ipr_is_gata(res)) {
3684			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3685			return ipr_ata_slave_alloc(sdev);
3686		}
3687	}
3688
3689	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3690
3691	return rc;
3692}
3693
3694/**
3695 * ipr_eh_host_reset - Reset the host adapter
3696 * @scsi_cmd:	scsi command struct
3697 *
3698 * Return value:
3699 * 	SUCCESS / FAILED
3700 **/
3701static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3702{
3703	struct ipr_ioa_cfg *ioa_cfg;
3704	int rc;
3705
3706	ENTER;
3707	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3708
3709	dev_err(&ioa_cfg->pdev->dev,
3710		"Adapter being reset as a result of error recovery.\n");
3711
3712	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3713		ioa_cfg->sdt_state = GET_DUMP;
3714
3715	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3716
3717	LEAVE;
3718	return rc;
3719}
3720
3721static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3722{
3723	int rc;
3724
3725	spin_lock_irq(cmd->device->host->host_lock);
3726	rc = __ipr_eh_host_reset(cmd);
3727	spin_unlock_irq(cmd->device->host->host_lock);
3728
3729	return rc;
3730}
3731
3732/**
3733 * ipr_device_reset - Reset the device
3734 * @ioa_cfg:	ioa config struct
3735 * @res:		resource entry struct
3736 *
3737 * This function issues a device reset to the affected device.
3738 * If the device is a SCSI device, a LUN reset will be sent
3739 * to the device first. If that does not work, a target reset
3740 * will be sent. If the device is a SATA device, a PHY reset will
3741 * be sent.
3742 *
3743 * Return value:
3744 *	0 on success / non-zero on failure
3745 **/
3746static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3747			    struct ipr_resource_entry *res)
3748{
3749	struct ipr_cmnd *ipr_cmd;
3750	struct ipr_ioarcb *ioarcb;
3751	struct ipr_cmd_pkt *cmd_pkt;
3752	struct ipr_ioarcb_ata_regs *regs;
3753	u32 ioasc;
3754
3755	ENTER;
3756	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3757	ioarcb = &ipr_cmd->ioarcb;
3758	cmd_pkt = &ioarcb->cmd_pkt;
3759	regs = &ioarcb->add_data.u.regs;
3760
3761	ioarcb->res_handle = res->cfgte.res_handle;
3762	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3763	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3764	if (ipr_is_gata(res)) {
3765		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3766		ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3767		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3768	}
3769
3770	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3771	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3772	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3773	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3774		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3775		       sizeof(struct ipr_ioasa_gata));
3776
3777	LEAVE;
3778	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3779}
3780
3781/**
3782 * ipr_sata_reset - Reset the SATA port
3783 * @ap:		SATA port to reset
3784 * @classes:	class of the attached device
3785 *
3786 * This function issues a SATA phy reset to the affected ATA port.
3787 *
3788 * Return value:
3789 *	0 on success / non-zero on failure
3790 **/
3791static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3792				unsigned long deadline)
3793{
3794	struct ipr_sata_port *sata_port = ap->private_data;
3795	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3796	struct ipr_resource_entry *res;
3797	unsigned long lock_flags = 0;
3798	int rc = -ENXIO;
3799
3800	ENTER;
3801	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3802	while(ioa_cfg->in_reset_reload) {
3803		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3804		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3805		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3806	}
3807
3808	res = sata_port->res;
3809	if (res) {
3810		rc = ipr_device_reset(ioa_cfg, res);
3811		switch(res->cfgte.proto) {
3812		case IPR_PROTO_SATA:
3813		case IPR_PROTO_SAS_STP:
3814			*classes = ATA_DEV_ATA;
3815			break;
3816		case IPR_PROTO_SATA_ATAPI:
3817		case IPR_PROTO_SAS_STP_ATAPI:
3818			*classes = ATA_DEV_ATAPI;
3819			break;
3820		default:
3821			*classes = ATA_DEV_UNKNOWN;
3822			break;
3823		};
3824	}
3825
3826	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3827	LEAVE;
3828	return rc;
3829}
3830
3831/**
3832 * ipr_eh_dev_reset - Reset the device
3833 * @scsi_cmd:	scsi command struct
3834 *
3835 * This function issues a device reset to the affected device.
3836 * A LUN reset will be sent to the device first. If that does
3837 * not work, a target reset will be sent.
3838 *
3839 * Return value:
3840 *	SUCCESS / FAILED
3841 **/
3842static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3843{
3844	struct ipr_cmnd *ipr_cmd;
3845	struct ipr_ioa_cfg *ioa_cfg;
3846	struct ipr_resource_entry *res;
3847	struct ata_port *ap;
3848	int rc = 0;
3849
3850	ENTER;
3851	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3852	res = scsi_cmd->device->hostdata;
3853
3854	if (!res)
3855		return FAILED;
3856
3857	/*
3858	 * If we are currently going through reset/reload, return failed. This will force the
3859	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3860	 * reset to complete
3861	 */
3862	if (ioa_cfg->in_reset_reload)
3863		return FAILED;
3864	if (ioa_cfg->ioa_is_dead)
3865		return FAILED;
3866
3867	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3868		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3869			if (ipr_cmd->scsi_cmd)
3870				ipr_cmd->done = ipr_scsi_eh_done;
3871			if (ipr_cmd->qc)
3872				ipr_cmd->done = ipr_sata_eh_done;
3873			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3874				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3875				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3876			}
3877		}
3878	}
3879
3880	res->resetting_device = 1;
3881	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3882
3883	if (ipr_is_gata(res) && res->sata_port) {
3884		ap = res->sata_port->ap;
3885		spin_unlock_irq(scsi_cmd->device->host->host_lock);
3886		ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3887		spin_lock_irq(scsi_cmd->device->host->host_lock);
3888	} else
3889		rc = ipr_device_reset(ioa_cfg, res);
3890	res->resetting_device = 0;
3891
3892	LEAVE;
3893	return (rc ? FAILED : SUCCESS);
3894}
3895
3896static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3897{
3898	int rc;
3899
3900	spin_lock_irq(cmd->device->host->host_lock);
3901	rc = __ipr_eh_dev_reset(cmd);
3902	spin_unlock_irq(cmd->device->host->host_lock);
3903
3904	return rc;
3905}
3906
3907/**
3908 * ipr_bus_reset_done - Op done function for bus reset.
3909 * @ipr_cmd:	ipr command struct
3910 *
3911 * This function is the op done function for a bus reset
3912 *
3913 * Return value:
3914 * 	none
3915 **/
3916static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3917{
3918	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3919	struct ipr_resource_entry *res;
3920
3921	ENTER;
3922	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3923		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3924			    sizeof(res->cfgte.res_handle))) {
3925			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3926			break;
3927		}
3928	}
3929
3930	/*
3931	 * If abort has not completed, indicate the reset has, else call the
3932	 * abort's done function to wake the sleeping eh thread
3933	 */
3934	if (ipr_cmd->sibling->sibling)
3935		ipr_cmd->sibling->sibling = NULL;
3936	else
3937		ipr_cmd->sibling->done(ipr_cmd->sibling);
3938
3939	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3940	LEAVE;
3941}
3942
3943/**
3944 * ipr_abort_timeout - An abort task has timed out
3945 * @ipr_cmd:	ipr command struct
3946 *
3947 * This function handles when an abort task times out. If this
3948 * happens we issue a bus reset since we have resources tied
3949 * up that must be freed before returning to the midlayer.
3950 *
3951 * Return value:
3952 *	none
3953 **/
3954static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3955{
3956	struct ipr_cmnd *reset_cmd;
3957	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3958	struct ipr_cmd_pkt *cmd_pkt;
3959	unsigned long lock_flags = 0;
3960
3961	ENTER;
3962	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3963	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3964		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965		return;
3966	}
3967
3968	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3969	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3970	ipr_cmd->sibling = reset_cmd;
3971	reset_cmd->sibling = ipr_cmd;
3972	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3973	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3974	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3975	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3976	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3977
3978	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3979	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3980	LEAVE;
3981}
3982
3983/**
3984 * ipr_cancel_op - Cancel specified op
3985 * @scsi_cmd:	scsi command struct
3986 *
3987 * This function cancels specified op.
3988 *
3989 * Return value:
3990 *	SUCCESS / FAILED
3991 **/
3992static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3993{
3994	struct ipr_cmnd *ipr_cmd;
3995	struct ipr_ioa_cfg *ioa_cfg;
3996	struct ipr_resource_entry *res;
3997	struct ipr_cmd_pkt *cmd_pkt;
3998	u32 ioasc;
3999	int op_found = 0;
4000
4001	ENTER;
4002	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4003	res = scsi_cmd->device->hostdata;
4004
4005	/* If we are currently going through reset/reload, return failed.
4006	 * This will force the mid-layer to call ipr_eh_host_reset,
4007	 * which will then go to sleep and wait for the reset to complete
4008	 */
4009	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4010		return FAILED;
4011	if (!res || !ipr_is_gscsi(res))
4012		return FAILED;
4013
4014	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4015		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4016			ipr_cmd->done = ipr_scsi_eh_done;
4017			op_found = 1;
4018			break;
4019		}
4020	}
4021
4022	if (!op_found)
4023		return SUCCESS;
4024
4025	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4026	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4027	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4028	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4029	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4030	ipr_cmd->u.sdev = scsi_cmd->device;
4031
4032	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4033		    scsi_cmd->cmnd[0]);
4034	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4035	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4036
4037	/*
4038	 * If the abort task timed out and we sent a bus reset, we will get
4039	 * one the following responses to the abort
4040	 */
4041	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4042		ioasc = 0;
4043		ipr_trace;
4044	}
4045
4046	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4047	if (!ipr_is_naca_model(res))
4048		res->needs_sync_complete = 1;
4049
4050	LEAVE;
4051	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4052}
4053
4054/**
4055 * ipr_eh_abort - Abort a single op
4056 * @scsi_cmd:	scsi command struct
4057 *
4058 * Return value:
4059 * 	SUCCESS / FAILED
4060 **/
4061static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4062{
4063	unsigned long flags;
4064	int rc;
4065
4066	ENTER;
4067
4068	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4069	rc = ipr_cancel_op(scsi_cmd);
4070	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4071
4072	LEAVE;
4073	return rc;
4074}
4075
4076/**
4077 * ipr_handle_other_interrupt - Handle "other" interrupts
4078 * @ioa_cfg:	ioa config struct
4079 * @int_reg:	interrupt register
4080 *
4081 * Return value:
4082 * 	IRQ_NONE / IRQ_HANDLED
4083 **/
4084static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4085					      volatile u32 int_reg)
4086{
4087	irqreturn_t rc = IRQ_HANDLED;
4088
4089	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4090		/* Mask the interrupt */
4091		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4092
4093		/* Clear the interrupt */
4094		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4095		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4096
4097		list_del(&ioa_cfg->reset_cmd->queue);
4098		del_timer(&ioa_cfg->reset_cmd->timer);
4099		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4100	} else {
4101		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4102			ioa_cfg->ioa_unit_checked = 1;
4103		else
4104			dev_err(&ioa_cfg->pdev->dev,
4105				"Permanent IOA failure. 0x%08X\n", int_reg);
4106
4107		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4108			ioa_cfg->sdt_state = GET_DUMP;
4109
4110		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4111		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4112	}
4113
4114	return rc;
4115}
4116
4117/**
4118 * ipr_isr - Interrupt service routine
4119 * @irq:	irq number
4120 * @devp:	pointer to ioa config struct
4121 *
4122 * Return value:
4123 * 	IRQ_NONE / IRQ_HANDLED
4124 **/
4125static irqreturn_t ipr_isr(int irq, void *devp)
4126{
4127	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4128	unsigned long lock_flags = 0;
4129	volatile u32 int_reg, int_mask_reg;
4130	u32 ioasc;
4131	u16 cmd_index;
4132	struct ipr_cmnd *ipr_cmd;
4133	irqreturn_t rc = IRQ_NONE;
4134
4135	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4136
4137	/* If interrupts are disabled, ignore the interrupt */
4138	if (!ioa_cfg->allow_interrupts) {
4139		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4140		return IRQ_NONE;
4141	}
4142
4143	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4144	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4145
4146	/* If an interrupt on the adapter did not occur, ignore it */
4147	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4148		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4149		return IRQ_NONE;
4150	}
4151
4152	while (1) {
4153		ipr_cmd = NULL;
4154
4155		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4156		       ioa_cfg->toggle_bit) {
4157
4158			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4159				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4160
4161			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4162				ioa_cfg->errors_logged++;
4163				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4164
4165				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4166					ioa_cfg->sdt_state = GET_DUMP;
4167
4168				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4169				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4170				return IRQ_HANDLED;
4171			}
4172
4173			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4174
4175			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4176
4177			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4178
4179			list_del(&ipr_cmd->queue);
4180			del_timer(&ipr_cmd->timer);
4181			ipr_cmd->done(ipr_cmd);
4182
4183			rc = IRQ_HANDLED;
4184
4185			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4186				ioa_cfg->hrrq_curr++;
4187			} else {
4188				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4189				ioa_cfg->toggle_bit ^= 1u;
4190			}
4191		}
4192
4193		if (ipr_cmd != NULL) {
4194			/* Clear the PCI interrupt */
4195			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4196			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4197		} else
4198			break;
4199	}
4200
4201	if (unlikely(rc == IRQ_NONE))
4202		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4203
4204	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4205	return rc;
4206}
4207
4208/**
4209 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4210 * @ioa_cfg:	ioa config struct
4211 * @ipr_cmd:	ipr command struct
4212 *
4213 * Return value:
4214 * 	0 on success / -1 on failure
4215 **/
4216static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4217			   struct ipr_cmnd *ipr_cmd)
4218{
4219	int i;
4220	struct scatterlist *sglist;
4221	u32 length;
4222	u32 ioadl_flags = 0;
4223	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4224	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4225	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4226
4227	length = scsi_cmd->request_bufflen;
4228
4229	if (length == 0)
4230		return 0;
4231
4232	if (scsi_cmd->use_sg) {
4233		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4234						 scsi_cmd->request_buffer,
4235						 scsi_cmd->use_sg,
4236						 scsi_cmd->sc_data_direction);
4237
4238		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4239			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4240			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4241			ioarcb->write_data_transfer_length = cpu_to_be32(length);
4242			ioarcb->write_ioadl_len =
4243				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4244		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4245			ioadl_flags = IPR_IOADL_FLAGS_READ;
4246			ioarcb->read_data_transfer_length = cpu_to_be32(length);
4247			ioarcb->read_ioadl_len =
4248				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4249		}
4250
4251		sglist = scsi_cmd->request_buffer;
4252
4253		if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4254			ioadl = ioarcb->add_data.u.ioadl;
4255			ioarcb->write_ioadl_addr =
4256				cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4257					    offsetof(struct ipr_ioarcb, add_data));
4258			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4259		}
4260
4261		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4262			ioadl[i].flags_and_data_len =
4263				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4264			ioadl[i].address =
4265				cpu_to_be32(sg_dma_address(&sglist[i]));
4266		}
4267
4268		if (likely(ipr_cmd->dma_use_sg)) {
4269			ioadl[i-1].flags_and_data_len |=
4270				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4271			return 0;
4272		} else
4273			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4274	} else {
4275		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4276			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4277			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4278			ioarcb->write_data_transfer_length = cpu_to_be32(length);
4279			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4280		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4281			ioadl_flags = IPR_IOADL_FLAGS_READ;
4282			ioarcb->read_data_transfer_length = cpu_to_be32(length);
4283			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4284		}
4285
4286		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4287						     scsi_cmd->request_buffer, length,
4288						     scsi_cmd->sc_data_direction);
4289
4290		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4291			ioadl = ioarcb->add_data.u.ioadl;
4292			ioarcb->write_ioadl_addr =
4293				cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4294					    offsetof(struct ipr_ioarcb, add_data));
4295			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4296			ipr_cmd->dma_use_sg = 1;
4297			ioadl[0].flags_and_data_len =
4298				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4299			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4300			return 0;
4301		} else
4302			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4303	}
4304
4305	return -1;
4306}
4307
4308/**
4309 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4310 * @scsi_cmd:	scsi command struct
4311 *
4312 * Return value:
4313 * 	task attributes
4314 **/
4315static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4316{
4317	u8 tag[2];
4318	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4319
4320	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4321		switch (tag[0]) {
4322		case MSG_SIMPLE_TAG:
4323			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4324			break;
4325		case MSG_HEAD_TAG:
4326			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4327			break;
4328		case MSG_ORDERED_TAG:
4329			rc = IPR_FLAGS_LO_ORDERED_TASK;
4330			break;
4331		};
4332	}
4333
4334	return rc;
4335}
4336
4337/**
4338 * ipr_erp_done - Process completion of ERP for a device
4339 * @ipr_cmd:		ipr command struct
4340 *
4341 * This function copies the sense buffer into the scsi_cmd
4342 * struct and pushes the scsi_done function.
4343 *
4344 * Return value:
4345 * 	nothing
4346 **/
4347static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4348{
4349	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4350	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4351	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4352	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4353
4354	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4355		scsi_cmd->result |= (DID_ERROR << 16);
4356		scmd_printk(KERN_ERR, scsi_cmd,
4357			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4358	} else {
4359		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4360		       SCSI_SENSE_BUFFERSIZE);
4361	}
4362
4363	if (res) {
4364		if (!ipr_is_naca_model(res))
4365			res->needs_sync_complete = 1;
4366		res->in_erp = 0;
4367	}
4368	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4369	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4370	scsi_cmd->scsi_done(scsi_cmd);
4371}
4372
4373/**
4374 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4375 * @ipr_cmd:	ipr command struct
4376 *
4377 * Return value:
4378 * 	none
4379 **/
4380static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4381{
4382	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4383	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4384	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4385
4386	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4387	ioarcb->write_data_transfer_length = 0;
4388	ioarcb->read_data_transfer_length = 0;
4389	ioarcb->write_ioadl_len = 0;
4390	ioarcb->read_ioadl_len = 0;
4391	ioasa->ioasc = 0;
4392	ioasa->residual_data_len = 0;
4393	ioarcb->write_ioadl_addr =
4394		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4395	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4396}
4397
4398/**
4399 * ipr_erp_request_sense - Send request sense to a device
4400 * @ipr_cmd:	ipr command struct
4401 *
4402 * This function sends a request sense to a device as a result
4403 * of a check condition.
4404 *
4405 * Return value:
4406 * 	nothing
4407 **/
4408static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4409{
4410	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4411	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4412
4413	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4414		ipr_erp_done(ipr_cmd);
4415		return;
4416	}
4417
4418	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4419
4420	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4421	cmd_pkt->cdb[0] = REQUEST_SENSE;
4422	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4423	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4424	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4425	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4426
4427	ipr_cmd->ioadl[0].flags_and_data_len =
4428		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4429	ipr_cmd->ioadl[0].address =
4430		cpu_to_be32(ipr_cmd->sense_buffer_dma);
4431
4432	ipr_cmd->ioarcb.read_ioadl_len =
4433		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4434	ipr_cmd->ioarcb.read_data_transfer_length =
4435		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4436
4437	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4438		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4439}
4440
4441/**
4442 * ipr_erp_cancel_all - Send cancel all to a device
4443 * @ipr_cmd:	ipr command struct
4444 *
4445 * This function sends a cancel all to a device to clear the
4446 * queue. If we are running TCQ on the device, QERR is set to 1,
4447 * which means all outstanding ops have been dropped on the floor.
4448 * Cancel all will return them to us.
4449 *
4450 * Return value:
4451 * 	nothing
4452 **/
4453static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4454{
4455	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4456	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4457	struct ipr_cmd_pkt *cmd_pkt;
4458
4459	res->in_erp = 1;
4460
4461	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4462
4463	if (!scsi_get_tag_type(scsi_cmd->device)) {
4464		ipr_erp_request_sense(ipr_cmd);
4465		return;
4466	}
4467
4468	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4469	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4470	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4471
4472	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4473		   IPR_CANCEL_ALL_TIMEOUT);
4474}
4475
4476/**
4477 * ipr_dump_ioasa - Dump contents of IOASA
4478 * @ioa_cfg:	ioa config struct
4479 * @ipr_cmd:	ipr command struct
4480 * @res:		resource entry struct
4481 *
4482 * This function is invoked by the interrupt handler when ops
4483 * fail. It will log the IOASA if appropriate. Only called
4484 * for GPDD ops.
4485 *
4486 * Return value:
4487 * 	none
4488 **/
4489static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4490			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4491{
4492	int i;
4493	u16 data_len;
4494	u32 ioasc, fd_ioasc;
4495	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4496	__be32 *ioasa_data = (__be32 *)ioasa;
4497	int error_index;
4498
4499	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4500	fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4501
4502	if (0 == ioasc)
4503		return;
4504
4505	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4506		return;
4507
4508	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4509		error_index = ipr_get_error(fd_ioasc);
4510	else
4511		error_index = ipr_get_error(ioasc);
4512
4513	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4514		/* Don't log an error if the IOA already logged one */
4515		if (ioasa->ilid != 0)
4516			return;
4517
4518		if (!ipr_is_gscsi(res))
4519			return;
4520
4521		if (ipr_error_table[error_index].log_ioasa == 0)
4522			return;
4523	}
4524
4525	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4526
4527	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4528		data_len = sizeof(struct ipr_ioasa);
4529	else
4530		data_len = be16_to_cpu(ioasa->ret_stat_len);
4531
4532	ipr_err("IOASA Dump:\n");
4533
4534	for (i = 0; i < data_len / 4; i += 4) {
4535		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4536			be32_to_cpu(ioasa_data[i]),
4537			be32_to_cpu(ioasa_data[i+1]),
4538			be32_to_cpu(ioasa_data[i+2]),
4539			be32_to_cpu(ioasa_data[i+3]));
4540	}
4541}
4542
4543/**
4544 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4545 * @ioasa:		IOASA
4546 * @sense_buf:	sense data buffer
4547 *
4548 * Return value:
4549 * 	none
4550 **/
4551static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4552{
4553	u32 failing_lba;
4554	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4555	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4556	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4557	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4558
4559	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4560
4561	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4562		return;
4563
4564	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4565
4566	if (ipr_is_vset_device(res) &&
4567	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4568	    ioasa->u.vset.failing_lba_hi != 0) {
4569		sense_buf[0] = 0x72;
4570		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4571		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4572		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4573
4574		sense_buf[7] = 12;
4575		sense_buf[8] = 0;
4576		sense_buf[9] = 0x0A;
4577		sense_buf[10] = 0x80;
4578
4579		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4580
4581		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4582		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4583		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4584		sense_buf[15] = failing_lba & 0x000000ff;
4585
4586		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4587
4588		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4589		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4590		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4591		sense_buf[19] = failing_lba & 0x000000ff;
4592	} else {
4593		sense_buf[0] = 0x70;
4594		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4595		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4596		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4597
4598		/* Illegal request */
4599		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4600		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4601			sense_buf[7] = 10;	/* additional length */
4602
4603			/* IOARCB was in error */
4604			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4605				sense_buf[15] = 0xC0;
4606			else	/* Parameter data was invalid */
4607				sense_buf[15] = 0x80;
4608
4609			sense_buf[16] =
4610			    ((IPR_FIELD_POINTER_MASK &
4611			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4612			sense_buf[17] =
4613			    (IPR_FIELD_POINTER_MASK &
4614			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4615		} else {
4616			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4617				if (ipr_is_vset_device(res))
4618					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4619				else
4620					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4621
4622				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4623				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4624				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4625				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4626				sense_buf[6] = failing_lba & 0x000000ff;
4627			}
4628
4629			sense_buf[7] = 6;	/* additional length */
4630		}
4631	}
4632}
4633
4634/**
4635 * ipr_get_autosense - Copy autosense data to sense buffer
4636 * @ipr_cmd:	ipr command struct
4637 *
4638 * This function copies the autosense buffer to the buffer
4639 * in the scsi_cmd, if there is autosense available.
4640 *
4641 * Return value:
4642 *	1 if autosense was available / 0 if not
4643 **/
4644static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4645{
4646	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4647
4648	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4649		return 0;
4650
4651	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4652	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4653		   SCSI_SENSE_BUFFERSIZE));
4654	return 1;
4655}
4656
4657/**
4658 * ipr_erp_start - Process an error response for a SCSI op
4659 * @ioa_cfg:	ioa config struct
4660 * @ipr_cmd:	ipr command struct
4661 *
4662 * This function determines whether or not to initiate ERP
4663 * on the affected device.
4664 *
4665 * Return value:
4666 * 	nothing
4667 **/
4668static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4669			      struct ipr_cmnd *ipr_cmd)
4670{
4671	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4672	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4673	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4674	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4675
4676	if (!res) {
4677		ipr_scsi_eh_done(ipr_cmd);
4678		return;
4679	}
4680
4681	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4682		ipr_gen_sense(ipr_cmd);
4683
4684	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4685
4686	switch (masked_ioasc) {
4687	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4688		if (ipr_is_naca_model(res))
4689			scsi_cmd->result |= (DID_ABORT << 16);
4690		else
4691			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4692		break;
4693	case IPR_IOASC_IR_RESOURCE_HANDLE:
4694	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4695		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4696		break;
4697	case IPR_IOASC_HW_SEL_TIMEOUT:
4698		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4699		if (!ipr_is_naca_model(res))
4700			res->needs_sync_complete = 1;
4701		break;
4702	case IPR_IOASC_SYNC_REQUIRED:
4703		if (!res->in_erp)
4704			res->needs_sync_complete = 1;
4705		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4706		break;
4707	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4708	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4709		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4710		break;
4711	case IPR_IOASC_BUS_WAS_RESET:
4712	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4713		/*
4714		 * Report the bus reset and ask for a retry. The device
4715		 * will give CC/UA the next command.
4716		 */
4717		if (!res->resetting_device)
4718			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4719		scsi_cmd->result |= (DID_ERROR << 16);
4720		if (!ipr_is_naca_model(res))
4721			res->needs_sync_complete = 1;
4722		break;
4723	case IPR_IOASC_HW_DEV_BUS_STATUS:
4724		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4725		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4726			if (!ipr_get_autosense(ipr_cmd)) {
4727				if (!ipr_is_naca_model(res)) {
4728					ipr_erp_cancel_all(ipr_cmd);
4729					return;
4730				}
4731			}
4732		}
4733		if (!ipr_is_naca_model(res))
4734			res->needs_sync_complete = 1;
4735		break;
4736	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4737		break;
4738	default:
4739		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4740			scsi_cmd->result |= (DID_ERROR << 16);
4741		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4742			res->needs_sync_complete = 1;
4743		break;
4744	}
4745
4746	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4747	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4748	scsi_cmd->scsi_done(scsi_cmd);
4749}
4750
4751/**
4752 * ipr_scsi_done - mid-layer done function
4753 * @ipr_cmd:	ipr command struct
4754 *
4755 * This function is invoked by the interrupt handler for
4756 * ops generated by the SCSI mid-layer
4757 *
4758 * Return value:
4759 * 	none
4760 **/
4761static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4762{
4763	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4764	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4765	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4766
4767	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4768
4769	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4770		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4771		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4772		scsi_cmd->scsi_done(scsi_cmd);
4773	} else
4774		ipr_erp_start(ioa_cfg, ipr_cmd);
4775}
4776
4777/**
4778 * ipr_queuecommand - Queue a mid-layer request
4779 * @scsi_cmd:	scsi command struct
4780 * @done:		done function
4781 *
4782 * This function queues a request generated by the mid-layer.
4783 *
4784 * Return value:
4785 *	0 on success
4786 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4787 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4788 **/
4789static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4790			    void (*done) (struct scsi_cmnd *))
4791{
4792	struct ipr_ioa_cfg *ioa_cfg;
4793	struct ipr_resource_entry *res;
4794	struct ipr_ioarcb *ioarcb;
4795	struct ipr_cmnd *ipr_cmd;
4796	int rc = 0;
4797
4798	scsi_cmd->scsi_done = done;
4799	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4800	res = scsi_cmd->device->hostdata;
4801	scsi_cmd->result = (DID_OK << 16);
4802
4803	/*
4804	 * We are currently blocking all devices due to a host reset
4805	 * We have told the host to stop giving us new requests, but
4806	 * ERP ops don't count. FIXME
4807	 */
4808	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4809		return SCSI_MLQUEUE_HOST_BUSY;
4810
4811	/*
4812	 * FIXME - Create scsi_set_host_offline interface
4813	 *  and the ioa_is_dead check can be removed
4814	 */
4815	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4816		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4817		scsi_cmd->result = (DID_NO_CONNECT << 16);
4818		scsi_cmd->scsi_done(scsi_cmd);
4819		return 0;
4820	}
4821
4822	if (ipr_is_gata(res) && res->sata_port)
4823		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4824
4825	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4826	ioarcb = &ipr_cmd->ioarcb;
4827	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4828
4829	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4830	ipr_cmd->scsi_cmd = scsi_cmd;
4831	ioarcb->res_handle = res->cfgte.res_handle;
4832	ipr_cmd->done = ipr_scsi_done;
4833	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4834
4835	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4836		if (scsi_cmd->underflow == 0)
4837			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4838
4839		if (res->needs_sync_complete) {
4840			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4841			res->needs_sync_complete = 0;
4842		}
4843
4844		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4845		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4846		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4847		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4848	}
4849
4850	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4851	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4852		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4853
4854	if (likely(rc == 0))
4855		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4856
4857	if (likely(rc == 0)) {
4858		mb();
4859		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4860		       ioa_cfg->regs.ioarrin_reg);
4861	} else {
4862		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4863		 return SCSI_MLQUEUE_HOST_BUSY;
4864	}
4865
4866	return 0;
4867}
4868
4869/**
4870 * ipr_ioctl - IOCTL handler
4871 * @sdev:	scsi device struct
4872 * @cmd:	IOCTL cmd
4873 * @arg:	IOCTL arg
4874 *
4875 * Return value:
4876 * 	0 on success / other on failure
4877 **/
4878static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4879{
4880	struct ipr_resource_entry *res;
4881
4882	res = (struct ipr_resource_entry *)sdev->hostdata;
4883	if (res && ipr_is_gata(res))
4884		return ata_scsi_ioctl(sdev, cmd, arg);
4885
4886	return -EINVAL;
4887}
4888
4889/**
4890 * ipr_info - Get information about the card/driver
4891 * @scsi_host:	scsi host struct
4892 *
4893 * Return value:
4894 * 	pointer to buffer with description string
4895 **/
4896static const char * ipr_ioa_info(struct Scsi_Host *host)
4897{
4898	static char buffer[512];
4899	struct ipr_ioa_cfg *ioa_cfg;
4900	unsigned long lock_flags = 0;
4901
4902	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4903
4904	spin_lock_irqsave(host->host_lock, lock_flags);
4905	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4906	spin_unlock_irqrestore(host->host_lock, lock_flags);
4907
4908	return buffer;
4909}
4910
4911static struct scsi_host_template driver_template = {
4912	.module = THIS_MODULE,
4913	.name = "IPR",
4914	.info = ipr_ioa_info,
4915	.ioctl = ipr_ioctl,
4916	.queuecommand = ipr_queuecommand,
4917	.eh_abort_handler = ipr_eh_abort,
4918	.eh_device_reset_handler = ipr_eh_dev_reset,
4919	.eh_host_reset_handler = ipr_eh_host_reset,
4920	.slave_alloc = ipr_slave_alloc,
4921	.slave_configure = ipr_slave_configure,
4922	.slave_destroy = ipr_slave_destroy,
4923	.target_alloc = ipr_target_alloc,
4924	.target_destroy = ipr_target_destroy,
4925	.change_queue_depth = ipr_change_queue_depth,
4926	.change_queue_type = ipr_change_queue_type,
4927	.bios_param = ipr_biosparam,
4928	.can_queue = IPR_MAX_COMMANDS,
4929	.this_id = -1,
4930	.sg_tablesize = IPR_MAX_SGLIST,
4931	.max_sectors = IPR_IOA_MAX_SECTORS,
4932	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4933	.use_clustering = ENABLE_CLUSTERING,
4934	.shost_attrs = ipr_ioa_attrs,
4935	.sdev_attrs = ipr_dev_attrs,
4936	.proc_name = IPR_NAME
4937};
4938
4939/**
4940 * ipr_ata_phy_reset - libata phy_reset handler
4941 * @ap:		ata port to reset
4942 *
4943 **/
4944static void ipr_ata_phy_reset(struct ata_port *ap)
4945{
4946	unsigned long flags;
4947	struct ipr_sata_port *sata_port = ap->private_data;
4948	struct ipr_resource_entry *res = sata_port->res;
4949	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4950	int rc;
4951
4952	ENTER;
4953	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4954	while(ioa_cfg->in_reset_reload) {
4955		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4956		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4957		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4958	}
4959
4960	if (!ioa_cfg->allow_cmds)
4961		goto out_unlock;
4962
4963	rc = ipr_device_reset(ioa_cfg, res);
4964
4965	if (rc) {
4966		ap->ops->port_disable(ap);
4967		goto out_unlock;
4968	}
4969
4970	switch(res->cfgte.proto) {
4971	case IPR_PROTO_SATA:
4972	case IPR_PROTO_SAS_STP:
4973		ap->device[0].class = ATA_DEV_ATA;
4974		break;
4975	case IPR_PROTO_SATA_ATAPI:
4976	case IPR_PROTO_SAS_STP_ATAPI:
4977		ap->device[0].class = ATA_DEV_ATAPI;
4978		break;
4979	default:
4980		ap->device[0].class = ATA_DEV_UNKNOWN;
4981		ap->ops->port_disable(ap);
4982		break;
4983	};
4984
4985out_unlock:
4986	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4987	LEAVE;
4988}
4989
4990/**
4991 * ipr_ata_post_internal - Cleanup after an internal command
4992 * @qc:	ATA queued command
4993 *
4994 * Return value:
4995 * 	none
4996 **/
4997static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4998{
4999	struct ipr_sata_port *sata_port = qc->ap->private_data;
5000	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5001	struct ipr_cmnd *ipr_cmd;
5002	unsigned long flags;
5003
5004	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5005	while(ioa_cfg->in_reset_reload) {
5006		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5007		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5008		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5009	}
5010
5011	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5012		if (ipr_cmd->qc == qc) {
5013			ipr_device_reset(ioa_cfg, sata_port->res);
5014			break;
5015		}
5016	}
5017	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5018}
5019
5020/**
5021 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5022 * @ap:	ATA port
5023 * @tf:	destination ATA taskfile
5024 *
5025 * Return value:
5026 * 	none
5027 **/
5028static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5029{
5030	struct ipr_sata_port *sata_port = ap->private_data;
5031	struct ipr_ioasa_gata *g = &sata_port->ioasa;
5032
5033	tf->feature = g->error;
5034	tf->nsect = g->nsect;
5035	tf->lbal = g->lbal;
5036	tf->lbam = g->lbam;
5037	tf->lbah = g->lbah;
5038	tf->device = g->device;
5039	tf->command = g->status;
5040	tf->hob_nsect = g->hob_nsect;
5041	tf->hob_lbal = g->hob_lbal;
5042	tf->hob_lbam = g->hob_lbam;
5043	tf->hob_lbah = g->hob_lbah;
5044	tf->ctl = g->alt_status;
5045}
5046
5047/**
5048 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5049 * @regs:	destination
5050 * @tf:	source ATA taskfile
5051 *
5052 * Return value:
5053 * 	none
5054 **/
5055static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5056			     struct ata_taskfile *tf)
5057{
5058	regs->feature = tf->feature;
5059	regs->nsect = tf->nsect;
5060	regs->lbal = tf->lbal;
5061	regs->lbam = tf->lbam;
5062	regs->lbah = tf->lbah;
5063	regs->device = tf->device;
5064	regs->command = tf->command;
5065	regs->hob_feature = tf->hob_feature;
5066	regs->hob_nsect = tf->hob_nsect;
5067	regs->hob_lbal = tf->hob_lbal;
5068	regs->hob_lbam = tf->hob_lbam;
5069	regs->hob_lbah = tf->hob_lbah;
5070	regs->ctl = tf->ctl;
5071}
5072
5073/**
5074 * ipr_sata_done - done function for SATA commands
5075 * @ipr_cmd:	ipr command struct
5076 *
5077 * This function is invoked by the interrupt handler for
5078 * ops generated by the SCSI mid-layer to SATA devices
5079 *
5080 * Return value:
5081 * 	none
5082 **/
5083static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5084{
5085	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5086	struct ata_queued_cmd *qc = ipr_cmd->qc;
5087	struct ipr_sata_port *sata_port = qc->ap->private_data;
5088	struct ipr_resource_entry *res = sata_port->res;
5089	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5090
5091	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5092	       sizeof(struct ipr_ioasa_gata));
5093	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5094
5095	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5096		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5097					 res->cfgte.res_addr.target);
5098
5099	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5100		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5101	else
5102		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5103	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5104	ata_qc_complete(qc);
5105}
5106
5107/**
5108 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5109 * @ipr_cmd:	ipr command struct
5110 * @qc:		ATA queued command
5111 *
5112 **/
5113static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5114				struct ata_queued_cmd *qc)
5115{
5116	u32 ioadl_flags = 0;
5117	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5118	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5119	int len = qc->nbytes + qc->pad_len;
5120	struct scatterlist *sg;
5121
5122	if (len == 0)
5123		return;
5124
5125	if (qc->dma_dir == DMA_TO_DEVICE) {
5126		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5127		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5128		ioarcb->write_data_transfer_length = cpu_to_be32(len);
5129		ioarcb->write_ioadl_len =
5130			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5131	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5132		ioadl_flags = IPR_IOADL_FLAGS_READ;
5133		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5134		ioarcb->read_ioadl_len =
5135			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5136	}
5137
5138	ata_for_each_sg(sg, qc) {
5139		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5140		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5141		if (ata_sg_is_last(sg, qc))
5142			ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5143		else
5144			ioadl++;
5145	}
5146}
5147
5148/**
5149 * ipr_qc_issue - Issue a SATA qc to a device
5150 * @qc:	queued command
5151 *
5152 * Return value:
5153 * 	0 if success
5154 **/
5155static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5156{
5157	struct ata_port *ap = qc->ap;
5158	struct ipr_sata_port *sata_port = ap->private_data;
5159	struct ipr_resource_entry *res = sata_port->res;
5160	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5161	struct ipr_cmnd *ipr_cmd;
5162	struct ipr_ioarcb *ioarcb;
5163	struct ipr_ioarcb_ata_regs *regs;
5164
5165	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5166		return AC_ERR_SYSTEM;
5167
5168	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5169	ioarcb = &ipr_cmd->ioarcb;
5170	regs = &ioarcb->add_data.u.regs;
5171
5172	memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5173	ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5174
5175	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5176	ipr_cmd->qc = qc;
5177	ipr_cmd->done = ipr_sata_done;
5178	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5179	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5180	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5181	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5182	ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5183
5184	ipr_build_ata_ioadl(ipr_cmd, qc);
5185	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5186	ipr_copy_sata_tf(regs, &qc->tf);
5187	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5188	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5189
5190	switch (qc->tf.protocol) {
5191	case ATA_PROT_NODATA:
5192	case ATA_PROT_PIO:
5193		break;
5194
5195	case ATA_PROT_DMA:
5196		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5197		break;
5198
5199	case ATA_PROT_ATAPI:
5200	case ATA_PROT_ATAPI_NODATA:
5201		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5202		break;
5203
5204	case ATA_PROT_ATAPI_DMA:
5205		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5206		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5207		break;
5208
5209	default:
5210		WARN_ON(1);
5211		return AC_ERR_INVALID;
5212	}
5213
5214	mb();
5215	writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5216	       ioa_cfg->regs.ioarrin_reg);
5217	return 0;
5218}
5219
5220/**
5221 * ipr_ata_check_status - Return last ATA status
5222 * @ap:	ATA port
5223 *
5224 * Return value:
5225 * 	ATA status
5226 **/
5227static u8 ipr_ata_check_status(struct ata_port *ap)
5228{
5229	struct ipr_sata_port *sata_port = ap->private_data;
5230	return sata_port->ioasa.status;
5231}
5232
5233/**
5234 * ipr_ata_check_altstatus - Return last ATA altstatus
5235 * @ap:	ATA port
5236 *
5237 * Return value:
5238 * 	Alt ATA status
5239 **/
5240static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5241{
5242	struct ipr_sata_port *sata_port = ap->private_data;
5243	return sata_port->ioasa.alt_status;
5244}
5245
5246static struct ata_port_operations ipr_sata_ops = {
5247	.port_disable = ata_port_disable,
5248	.check_status = ipr_ata_check_status,
5249	.check_altstatus = ipr_ata_check_altstatus,
5250	.dev_select = ata_noop_dev_select,
5251	.phy_reset = ipr_ata_phy_reset,
5252	.post_internal_cmd = ipr_ata_post_internal,
5253	.tf_read = ipr_tf_read,
5254	.qc_prep = ata_noop_qc_prep,
5255	.qc_issue = ipr_qc_issue,
5256	.port_start = ata_sas_port_start,
5257	.port_stop = ata_sas_port_stop
5258};
5259
5260static struct ata_port_info sata_port_info = {
5261	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5262	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5263	.pio_mask	= 0x10, /* pio4 */
5264	.mwdma_mask = 0x07,
5265	.udma_mask	= 0x7f, /* udma0-6 */
5266	.port_ops	= &ipr_sata_ops
5267};
5268
5269#ifdef CONFIG_PPC_PSERIES
5270static const u16 ipr_blocked_processors[] = {
5271	PV_NORTHSTAR,
5272	PV_PULSAR,
5273	PV_POWER4,
5274	PV_ICESTAR,
5275	PV_SSTAR,
5276	PV_POWER4p,
5277	PV_630,
5278	PV_630p
5279};
5280
5281/**
5282 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5283 * @ioa_cfg:	ioa cfg struct
5284 *
5285 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5286 * certain pSeries hardware. This function determines if the given
5287 * adapter is in one of these confgurations or not.
5288 *
5289 * Return value:
5290 * 	1 if adapter is not supported / 0 if adapter is supported
5291 **/
5292static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5293{
5294	u8 rev_id;
5295	int i;
5296
5297	if (ioa_cfg->type == 0x5702) {
5298		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5299					 &rev_id) == PCIBIOS_SUCCESSFUL) {
5300			if (rev_id < 4) {
5301				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5302					if (__is_processor(ipr_blocked_processors[i]))
5303						return 1;
5304				}
5305			}
5306		}
5307	}
5308	return 0;
5309}
5310#else
5311#define ipr_invalid_adapter(ioa_cfg) 0
5312#endif
5313
5314/**
5315 * ipr_ioa_bringdown_done - IOA bring down completion.
5316 * @ipr_cmd:	ipr command struct
5317 *
5318 * This function processes the completion of an adapter bring down.
5319 * It wakes any reset sleepers.
5320 *
5321 * Return value:
5322 * 	IPR_RC_JOB_RETURN
5323 **/
5324static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5325{
5326	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5327
5328	ENTER;
5329	ioa_cfg->in_reset_reload = 0;
5330	ioa_cfg->reset_retries = 0;
5331	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5332	wake_up_all(&ioa_cfg->reset_wait_q);
5333
5334	spin_unlock_irq(ioa_cfg->host->host_lock);
5335	scsi_unblock_requests(ioa_cfg->host);
5336	spin_lock_irq(ioa_cfg->host->host_lock);
5337	LEAVE;
5338
5339	return IPR_RC_JOB_RETURN;
5340}
5341
5342/**
5343 * ipr_ioa_reset_done - IOA reset completion.
5344 * @ipr_cmd:	ipr command struct
5345 *
5346 * This function processes the completion of an adapter reset.
5347 * It schedules any necessary mid-layer add/removes and
5348 * wakes any reset sleepers.
5349 *
5350 * Return value:
5351 * 	IPR_RC_JOB_RETURN
5352 **/
5353static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5354{
5355	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5356	struct ipr_resource_entry *res;
5357	struct ipr_hostrcb *hostrcb, *temp;
5358	int i = 0;
5359
5360	ENTER;
5361	ioa_cfg->in_reset_reload = 0;
5362	ioa_cfg->allow_cmds = 1;
5363	ioa_cfg->reset_cmd = NULL;
5364	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5365
5366	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5367		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5368			ipr_trace;
5369			break;
5370		}
5371	}
5372	schedule_work(&ioa_cfg->work_q);
5373
5374	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5375		list_del(&hostrcb->queue);
5376		if (i++ < IPR_NUM_LOG_HCAMS)
5377			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5378		else
5379			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5380	}
5381
5382	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5383	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5384
5385	ioa_cfg->reset_retries = 0;
5386	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5387	wake_up_all(&ioa_cfg->reset_wait_q);
5388
5389	spin_unlock_irq(ioa_cfg->host->host_lock);
5390	scsi_unblock_requests(ioa_cfg->host);
5391	spin_lock_irq(ioa_cfg->host->host_lock);
5392
5393	if (!ioa_cfg->allow_cmds)
5394		scsi_block_requests(ioa_cfg->host);
5395
5396	LEAVE;
5397	return IPR_RC_JOB_RETURN;
5398}
5399
5400/**
5401 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5402 * @supported_dev:	supported device struct
5403 * @vpids:			vendor product id struct
5404 *
5405 * Return value:
5406 * 	none
5407 **/
5408static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5409				 struct ipr_std_inq_vpids *vpids)
5410{
5411	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5412	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5413	supported_dev->num_records = 1;
5414	supported_dev->data_length =
5415		cpu_to_be16(sizeof(struct ipr_supported_device));
5416	supported_dev->reserved = 0;
5417}
5418
5419/**
5420 * ipr_set_supported_devs - Send Set Supported Devices for a device
5421 * @ipr_cmd:	ipr command struct
5422 *
5423 * This function send a Set Supported Devices to the adapter
5424 *
5425 * Return value:
5426 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5427 **/
5428static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5429{
5430	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5431	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5432	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5433	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5434	struct ipr_resource_entry *res = ipr_cmd->u.res;
5435
5436	ipr_cmd->job_step = ipr_ioa_reset_done;
5437
5438	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5439		if (!ipr_is_scsi_disk(res))
5440			continue;
5441
5442		ipr_cmd->u.res = res;
5443		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5444
5445		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5446		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5447		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5448
5449		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5450		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5451		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5452
5453		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5454							sizeof(struct ipr_supported_device));
5455		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5456					     offsetof(struct ipr_misc_cbs, supp_dev));
5457		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5458		ioarcb->write_data_transfer_length =
5459			cpu_to_be32(sizeof(struct ipr_supported_device));
5460
5461		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5462			   IPR_SET_SUP_DEVICE_TIMEOUT);
5463
5464		ipr_cmd->job_step = ipr_set_supported_devs;
5465		return IPR_RC_JOB_RETURN;
5466	}
5467
5468	return IPR_RC_JOB_CONTINUE;
5469}
5470
5471/**
5472 * ipr_setup_write_cache - Disable write cache if needed
5473 * @ipr_cmd:	ipr command struct
5474 *
5475 * This function sets up adapters write cache to desired setting
5476 *
5477 * Return value:
5478 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5479 **/
5480static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5481{
5482	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5483
5484	ipr_cmd->job_step = ipr_set_supported_devs;
5485	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5486				    struct ipr_resource_entry, queue);
5487
5488	if (ioa_cfg->cache_state != CACHE_DISABLED)
5489		return IPR_RC_JOB_CONTINUE;
5490
5491	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5492	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5493	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5494	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5495
5496	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5497
5498	return IPR_RC_JOB_RETURN;
5499}
5500
5501/**
5502 * ipr_get_mode_page - Locate specified mode page
5503 * @mode_pages:	mode page buffer
5504 * @page_code:	page code to find
5505 * @len:		minimum required length for mode page
5506 *
5507 * Return value:
5508 * 	pointer to mode page / NULL on failure
5509 **/
5510static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5511			       u32 page_code, u32 len)
5512{
5513	struct ipr_mode_page_hdr *mode_hdr;
5514	u32 page_length;
5515	u32 length;
5516
5517	if (!mode_pages || (mode_pages->hdr.length == 0))
5518		return NULL;
5519
5520	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5521	mode_hdr = (struct ipr_mode_page_hdr *)
5522		(mode_pages->data + mode_pages->hdr.block_desc_len);
5523
5524	while (length) {
5525		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5526			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5527				return mode_hdr;
5528			break;
5529		} else {
5530			page_length = (sizeof(struct ipr_mode_page_hdr) +
5531				       mode_hdr->page_length);
5532			length -= page_length;
5533			mode_hdr = (struct ipr_mode_page_hdr *)
5534				((unsigned long)mode_hdr + page_length);
5535		}
5536	}
5537	return NULL;
5538}
5539
5540/**
5541 * ipr_check_term_power - Check for term power errors
5542 * @ioa_cfg:	ioa config struct
5543 * @mode_pages:	IOAFP mode pages buffer
5544 *
5545 * Check the IOAFP's mode page 28 for term power errors
5546 *
5547 * Return value:
5548 * 	nothing
5549 **/
5550static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5551				 struct ipr_mode_pages *mode_pages)
5552{
5553	int i;
5554	int entry_length;
5555	struct ipr_dev_bus_entry *bus;
5556	struct ipr_mode_page28 *mode_page;
5557
5558	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5559				      sizeof(struct ipr_mode_page28));
5560
5561	entry_length = mode_page->entry_length;
5562
5563	bus = mode_page->bus;
5564
5565	for (i = 0; i < mode_page->num_entries; i++) {
5566		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5567			dev_err(&ioa_cfg->pdev->dev,
5568				"Term power is absent on scsi bus %d\n",
5569				bus->res_addr.bus);
5570		}
5571
5572		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5573	}
5574}
5575
5576/**
5577 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5578 * @ioa_cfg:	ioa config struct
5579 *
5580 * Looks through the config table checking for SES devices. If
5581 * the SES device is in the SES table indicating a maximum SCSI
5582 * bus speed, the speed is limited for the bus.
5583 *
5584 * Return value:
5585 * 	none
5586 **/
5587static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5588{
5589	u32 max_xfer_rate;
5590	int i;
5591
5592	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5593		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5594						       ioa_cfg->bus_attr[i].bus_width);
5595
5596		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5597			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5598	}
5599}
5600
5601/**
5602 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5603 * @ioa_cfg:	ioa config struct
5604 * @mode_pages:	mode page 28 buffer
5605 *
5606 * Updates mode page 28 based on driver configuration
5607 *
5608 * Return value:
5609 * 	none
5610 **/
5611static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5612					  	struct ipr_mode_pages *mode_pages)
5613{
5614	int i, entry_length;
5615	struct ipr_dev_bus_entry *bus;
5616	struct ipr_bus_attributes *bus_attr;
5617	struct ipr_mode_page28 *mode_page;
5618
5619	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5620				      sizeof(struct ipr_mode_page28));
5621
5622	entry_length = mode_page->entry_length;
5623
5624	/* Loop for each device bus entry */
5625	for (i = 0, bus = mode_page->bus;
5626	     i < mode_page->num_entries;
5627	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5628		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5629			dev_err(&ioa_cfg->pdev->dev,
5630				"Invalid resource address reported: 0x%08X\n",
5631				IPR_GET_PHYS_LOC(bus->res_addr));
5632			continue;
5633		}
5634
5635		bus_attr = &ioa_cfg->bus_attr[i];
5636		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5637		bus->bus_width = bus_attr->bus_width;
5638		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5639		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5640		if (bus_attr->qas_enabled)
5641			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5642		else
5643			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5644	}
5645}
5646
5647/**
5648 * ipr_build_mode_select - Build a mode select command
5649 * @ipr_cmd:	ipr command struct
5650 * @res_handle:	resource handle to send command to
5651 * @parm:		Byte 2 of Mode Sense command
5652 * @dma_addr:	DMA buffer address
5653 * @xfer_len:	data transfer length
5654 *
5655 * Return value:
5656 * 	none
5657 **/
5658static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5659				  __be32 res_handle, u8 parm, u32 dma_addr,
5660				  u8 xfer_len)
5661{
5662	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5663	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5664
5665	ioarcb->res_handle = res_handle;
5666	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5667	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5668	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5669	ioarcb->cmd_pkt.cdb[1] = parm;
5670	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5671
5672	ioadl->flags_and_data_len =
5673		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5674	ioadl->address = cpu_to_be32(dma_addr);
5675	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5676	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5677}
5678
5679/**
5680 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5681 * @ipr_cmd:	ipr command struct
5682 *
5683 * This function sets up the SCSI bus attributes and sends
5684 * a Mode Select for Page 28 to activate them.
5685 *
5686 * Return value:
5687 * 	IPR_RC_JOB_RETURN
5688 **/
5689static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5690{
5691	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5692	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5693	int length;
5694
5695	ENTER;
5696	ipr_scsi_bus_speed_limit(ioa_cfg);
5697	ipr_check_term_power(ioa_cfg, mode_pages);
5698	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5699	length = mode_pages->hdr.length + 1;
5700	mode_pages->hdr.length = 0;
5701
5702	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5703			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5704			      length);
5705
5706	ipr_cmd->job_step = ipr_setup_write_cache;
5707	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5708
5709	LEAVE;
5710	return IPR_RC_JOB_RETURN;
5711}
5712
5713/**
5714 * ipr_build_mode_sense - Builds a mode sense command
5715 * @ipr_cmd:	ipr command struct
5716 * @res:		resource entry struct
5717 * @parm:		Byte 2 of mode sense command
5718 * @dma_addr:	DMA address of mode sense buffer
5719 * @xfer_len:	Size of DMA buffer
5720 *
5721 * Return value:
5722 * 	none
5723 **/
5724static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5725				 __be32 res_handle,
5726				 u8 parm, u32 dma_addr, u8 xfer_len)
5727{
5728	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5729	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5730
5731	ioarcb->res_handle = res_handle;
5732	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5733	ioarcb->cmd_pkt.cdb[2] = parm;
5734	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5735	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5736
5737	ioadl->flags_and_data_len =
5738		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5739	ioadl->address = cpu_to_be32(dma_addr);
5740	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5741	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5742}
5743
5744/**
5745 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5746 * @ipr_cmd:	ipr command struct
5747 *
5748 * This function handles the failure of an IOA bringup command.
5749 *
5750 * Return value:
5751 * 	IPR_RC_JOB_RETURN
5752 **/
5753static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5754{
5755	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5756	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5757
5758	dev_err(&ioa_cfg->pdev->dev,
5759		"0x%02X failed with IOASC: 0x%08X\n",
5760		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5761
5762	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5763	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5764	return IPR_RC_JOB_RETURN;
5765}
5766
5767/**
5768 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5769 * @ipr_cmd:	ipr command struct
5770 *
5771 * This function handles the failure of a Mode Sense to the IOAFP.
5772 * Some adapters do not handle all mode pages.
5773 *
5774 * Return value:
5775 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5776 **/
5777static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5778{
5779	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5780
5781	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5782		ipr_cmd->job_step = ipr_setup_write_cache;
5783		return IPR_RC_JOB_CONTINUE;
5784	}
5785
5786	return ipr_reset_cmd_failed(ipr_cmd);
5787}
5788
5789/**
5790 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5791 * @ipr_cmd:	ipr command struct
5792 *
5793 * This function send a Page 28 mode sense to the IOA to
5794 * retrieve SCSI bus attributes.
5795 *
5796 * Return value:
5797 * 	IPR_RC_JOB_RETURN
5798 **/
5799static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5800{
5801	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5802
5803	ENTER;
5804	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5805			     0x28, ioa_cfg->vpd_cbs_dma +
5806			     offsetof(struct ipr_misc_cbs, mode_pages),
5807			     sizeof(struct ipr_mode_pages));
5808
5809	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5810	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5811
5812	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5813
5814	LEAVE;
5815	return IPR_RC_JOB_RETURN;
5816}
5817
5818/**
5819 * ipr_init_res_table - Initialize the resource table
5820 * @ipr_cmd:	ipr command struct
5821 *
5822 * This function looks through the existing resource table, comparing
5823 * it with the config table. This function will take care of old/new
5824 * devices and schedule adding/removing them from the mid-layer
5825 * as appropriate.
5826 *
5827 * Return value:
5828 * 	IPR_RC_JOB_CONTINUE
5829 **/
5830static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5831{
5832	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5833	struct ipr_resource_entry *res, *temp;
5834	struct ipr_config_table_entry *cfgte;
5835	int found, i;
5836	LIST_HEAD(old_res);
5837
5838	ENTER;
5839	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5840		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5841
5842	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5843		list_move_tail(&res->queue, &old_res);
5844
5845	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5846		cfgte = &ioa_cfg->cfg_table->dev[i];
5847		found = 0;
5848
5849		list_for_each_entry_safe(res, temp, &old_res, queue) {
5850			if (!memcmp(&res->cfgte.res_addr,
5851				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5852				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5853				found = 1;
5854				break;
5855			}
5856		}
5857
5858		if (!found) {
5859			if (list_empty(&ioa_cfg->free_res_q)) {
5860				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5861				break;
5862			}
5863
5864			found = 1;
5865			res = list_entry(ioa_cfg->free_res_q.next,
5866					 struct ipr_resource_entry, queue);
5867			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5868			ipr_init_res_entry(res);
5869			res->add_to_ml = 1;
5870		}
5871
5872		if (found)
5873			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5874	}
5875
5876	list_for_each_entry_safe(res, temp, &old_res, queue) {
5877		if (res->sdev) {
5878			res->del_from_ml = 1;
5879			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5880			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5881		} else {
5882			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5883		}
5884	}
5885
5886	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5887
5888	LEAVE;
5889	return IPR_RC_JOB_CONTINUE;
5890}
5891
5892/**
5893 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5894 * @ipr_cmd:	ipr command struct
5895 *
5896 * This function sends a Query IOA Configuration command
5897 * to the adapter to retrieve the IOA configuration table.
5898 *
5899 * Return value:
5900 * 	IPR_RC_JOB_RETURN
5901 **/
5902static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5903{
5904	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5905	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5906	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5907	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5908
5909	ENTER;
5910	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5911		 ucode_vpd->major_release, ucode_vpd->card_type,
5912		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5913	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5914	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5915
5916	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5917	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5918	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5919
5920	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5921	ioarcb->read_data_transfer_length =
5922		cpu_to_be32(sizeof(struct ipr_config_table));
5923
5924	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5925	ioadl->flags_and_data_len =
5926		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5927
5928	ipr_cmd->job_step = ipr_init_res_table;
5929
5930	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5931
5932	LEAVE;
5933	return IPR_RC_JOB_RETURN;
5934}
5935
5936/**
5937 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5938 * @ipr_cmd:	ipr command struct
5939 *
5940 * This utility function sends an inquiry to the adapter.
5941 *
5942 * Return value:
5943 * 	none
5944 **/
5945static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5946			      u32 dma_addr, u8 xfer_len)
5947{
5948	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5949	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5950
5951	ENTER;
5952	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5953	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5954
5955	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5956	ioarcb->cmd_pkt.cdb[1] = flags;
5957	ioarcb->cmd_pkt.cdb[2] = page;
5958	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5959
5960	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5961	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5962
5963	ioadl->address = cpu_to_be32(dma_addr);
5964	ioadl->flags_and_data_len =
5965		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5966
5967	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5968	LEAVE;
5969}
5970
5971/**
5972 * ipr_inquiry_page_supported - Is the given inquiry page supported
5973 * @page0:		inquiry page 0 buffer
5974 * @page:		page code.
5975 *
5976 * This function determines if the specified inquiry page is supported.
5977 *
5978 * Return value:
5979 *	1 if page is supported / 0 if not
5980 **/
5981static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5982{
5983	int i;
5984
5985	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5986		if (page0->page[i] == page)
5987			return 1;
5988
5989	return 0;
5990}
5991
5992/**
5993 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5994 * @ipr_cmd:	ipr command struct
5995 *
5996 * This function sends a Page 3 inquiry to the adapter
5997 * to retrieve software VPD information.
5998 *
5999 * Return value:
6000 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6001 **/
6002static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6003{
6004	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6005	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6006
6007	ENTER;
6008
6009	if (!ipr_inquiry_page_supported(page0, 1))
6010		ioa_cfg->cache_state = CACHE_NONE;
6011
6012	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6013
6014	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6015			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6016			  sizeof(struct ipr_inquiry_page3));
6017
6018	LEAVE;
6019	return IPR_RC_JOB_RETURN;
6020}
6021
6022/**
6023 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6024 * @ipr_cmd:	ipr command struct
6025 *
6026 * This function sends a Page 0 inquiry to the adapter
6027 * to retrieve supported inquiry pages.
6028 *
6029 * Return value:
6030 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6031 **/
6032static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6033{
6034	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6035	char type[5];
6036
6037	ENTER;
6038
6039	/* Grab the type out of the VPD and store it away */
6040	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6041	type[4] = '\0';
6042	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6043
6044	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6045
6046	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6047			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6048			  sizeof(struct ipr_inquiry_page0));
6049
6050	LEAVE;
6051	return IPR_RC_JOB_RETURN;
6052}
6053
6054/**
6055 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6056 * @ipr_cmd:	ipr command struct
6057 *
6058 * This function sends a standard inquiry to the adapter.
6059 *
6060 * Return value:
6061 * 	IPR_RC_JOB_RETURN
6062 **/
6063static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6064{
6065	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6066
6067	ENTER;
6068	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6069
6070	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6071			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6072			  sizeof(struct ipr_ioa_vpd));
6073
6074	LEAVE;
6075	return IPR_RC_JOB_RETURN;
6076}
6077
6078/**
6079 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6080 * @ipr_cmd:	ipr command struct
6081 *
6082 * This function send an Identify Host Request Response Queue
6083 * command to establish the HRRQ with the adapter.
6084 *
6085 * Return value:
6086 * 	IPR_RC_JOB_RETURN
6087 **/
6088static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6089{
6090	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6091	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6092
6093	ENTER;
6094	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6095
6096	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6097	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6098
6099	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6100	ioarcb->cmd_pkt.cdb[2] =
6101		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6102	ioarcb->cmd_pkt.cdb[3] =
6103		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6104	ioarcb->cmd_pkt.cdb[4] =
6105		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6106	ioarcb->cmd_pkt.cdb[5] =
6107		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6108	ioarcb->cmd_pkt.cdb[7] =
6109		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6110	ioarcb->cmd_pkt.cdb[8] =
6111		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6112
6113	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6114
6115	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6116
6117	LEAVE;
6118	return IPR_RC_JOB_RETURN;
6119}
6120
6121/**
6122 * ipr_reset_timer_done - Adapter reset timer function
6123 * @ipr_cmd:	ipr command struct
6124 *
6125 * Description: This function is used in adapter reset processing
6126 * for timing events. If the reset_cmd pointer in the IOA
6127 * config struct is not this adapter's we are doing nested
6128 * resets and fail_all_ops will take care of freeing the
6129 * command block.
6130 *
6131 * Return value:
6132 * 	none
6133 **/
6134static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6135{
6136	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6137	unsigned long lock_flags = 0;
6138
6139	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6140
6141	if (ioa_cfg->reset_cmd == ipr_cmd) {
6142		list_del(&ipr_cmd->queue);
6143		ipr_cmd->done(ipr_cmd);
6144	}
6145
6146	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6147}
6148
6149/**
6150 * ipr_reset_start_timer - Start a timer for adapter reset job
6151 * @ipr_cmd:	ipr command struct
6152 * @timeout:	timeout value
6153 *
6154 * Description: This function is used in adapter reset processing
6155 * for timing events. If the reset_cmd pointer in the IOA
6156 * config struct is not this adapter's we are doing nested
6157 * resets and fail_all_ops will take care of freeing the
6158 * command block.
6159 *
6160 * Return value:
6161 * 	none
6162 **/
6163static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6164				  unsigned long timeout)
6165{
6166	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6167	ipr_cmd->done = ipr_reset_ioa_job;
6168
6169	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6170	ipr_cmd->timer.expires = jiffies + timeout;
6171	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6172	add_timer(&ipr_cmd->timer);
6173}
6174
6175/**
6176 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6177 * @ioa_cfg:	ioa cfg struct
6178 *
6179 * Return value:
6180 * 	nothing
6181 **/
6182static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6183{
6184	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6185
6186	/* Initialize Host RRQ pointers */
6187	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6188	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6189	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6190	ioa_cfg->toggle_bit = 1;
6191
6192	/* Zero out config table */
6193	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6194}
6195
6196/**
6197 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6198 * @ipr_cmd:	ipr command struct
6199 *
6200 * This function reinitializes some control blocks and
6201 * enables destructive diagnostics on the adapter.
6202 *
6203 * Return value:
6204 * 	IPR_RC_JOB_RETURN
6205 **/
6206static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6207{
6208	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6209	volatile u32 int_reg;
6210
6211	ENTER;
6212	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6213	ipr_init_ioa_mem(ioa_cfg);
6214
6215	ioa_cfg->allow_interrupts = 1;
6216	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6217
6218	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6219		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6220		       ioa_cfg->regs.clr_interrupt_mask_reg);
6221		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6222		return IPR_RC_JOB_CONTINUE;
6223	}
6224
6225	/* Enable destructive diagnostics on IOA */
6226	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6227
6228	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6229	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6230
6231	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6232
6233	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6234	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6235	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6236	ipr_cmd->done = ipr_reset_ioa_job;
6237	add_timer(&ipr_cmd->timer);
6238	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6239
6240	LEAVE;
6241	return IPR_RC_JOB_RETURN;
6242}
6243
6244/**
6245 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6246 * @ipr_cmd:	ipr command struct
6247 *
6248 * This function is invoked when an adapter dump has run out
6249 * of processing time.
6250 *
6251 * Return value:
6252 * 	IPR_RC_JOB_CONTINUE
6253 **/
6254static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6255{
6256	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6257
6258	if (ioa_cfg->sdt_state == GET_DUMP)
6259		ioa_cfg->sdt_state = ABORT_DUMP;
6260
6261	ipr_cmd->job_step = ipr_reset_alert;
6262
6263	return IPR_RC_JOB_CONTINUE;
6264}
6265
6266/**
6267 * ipr_unit_check_no_data - Log a unit check/no data error log
6268 * @ioa_cfg:		ioa config struct
6269 *
6270 * Logs an error indicating the adapter unit checked, but for some
6271 * reason, we were unable to fetch the unit check buffer.
6272 *
6273 * Return value:
6274 * 	nothing
6275 **/
6276static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6277{
6278	ioa_cfg->errors_logged++;
6279	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6280}
6281
6282/**
6283 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6284 * @ioa_cfg:		ioa config struct
6285 *
6286 * Fetches the unit check buffer from the adapter by clocking the data
6287 * through the mailbox register.
6288 *
6289 * Return value:
6290 * 	nothing
6291 **/
6292static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6293{
6294	unsigned long mailbox;
6295	struct ipr_hostrcb *hostrcb;
6296	struct ipr_uc_sdt sdt;
6297	int rc, length;
6298	u32 ioasc;
6299
6300	mailbox = readl(ioa_cfg->ioa_mailbox);
6301
6302	if (!ipr_sdt_is_fmt2(mailbox)) {
6303		ipr_unit_check_no_data(ioa_cfg);
6304		return;
6305	}
6306
6307	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6308	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6309					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6310
6311	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6312	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6313		ipr_unit_check_no_data(ioa_cfg);
6314		return;
6315	}
6316
6317	/* Find length of the first sdt entry (UC buffer) */
6318	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6319		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6320
6321	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6322			     struct ipr_hostrcb, queue);
6323	list_del(&hostrcb->queue);
6324	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6325
6326	rc = ipr_get_ldump_data_section(ioa_cfg,
6327					be32_to_cpu(sdt.entry[0].bar_str_offset),
6328					(__be32 *)&hostrcb->hcam,
6329					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6330
6331	if (!rc) {
6332		ipr_handle_log_data(ioa_cfg, hostrcb);
6333		ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6334		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6335		    ioa_cfg->sdt_state == GET_DUMP)
6336			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6337	} else
6338		ipr_unit_check_no_data(ioa_cfg);
6339
6340	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6341}
6342
6343/**
6344 * ipr_reset_restore_cfg_space - Restore PCI config space.
6345 * @ipr_cmd:	ipr command struct
6346 *
6347 * Description: This function restores the saved PCI config space of
6348 * the adapter, fails all outstanding ops back to the callers, and
6349 * fetches the dump/unit check if applicable to this reset.
6350 *
6351 * Return value:
6352 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6353 **/
6354static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6355{
6356	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6357	int rc;
6358
6359	ENTER;
6360	rc = pci_restore_state(ioa_cfg->pdev);
6361
6362	if (rc != PCIBIOS_SUCCESSFUL) {
6363		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6364		return IPR_RC_JOB_CONTINUE;
6365	}
6366
6367	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6368		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6369		return IPR_RC_JOB_CONTINUE;
6370	}
6371
6372	ipr_fail_all_ops(ioa_cfg);
6373
6374	if (ioa_cfg->ioa_unit_checked) {
6375		ioa_cfg->ioa_unit_checked = 0;
6376		ipr_get_unit_check_buffer(ioa_cfg);
6377		ipr_cmd->job_step = ipr_reset_alert;
6378		ipr_reset_start_timer(ipr_cmd, 0);
6379		return IPR_RC_JOB_RETURN;
6380	}
6381
6382	if (ioa_cfg->in_ioa_bringdown) {
6383		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6384	} else {
6385		ipr_cmd->job_step = ipr_reset_enable_ioa;
6386
6387		if (GET_DUMP == ioa_cfg->sdt_state) {
6388			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6389			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6390			schedule_work(&ioa_cfg->work_q);
6391			return IPR_RC_JOB_RETURN;
6392		}
6393	}
6394
6395	ENTER;
6396	return IPR_RC_JOB_CONTINUE;
6397}
6398
6399/**
6400 * ipr_reset_bist_done - BIST has completed on the adapter.
6401 * @ipr_cmd:	ipr command struct
6402 *
6403 * Description: Unblock config space and resume the reset process.
6404 *
6405 * Return value:
6406 * 	IPR_RC_JOB_CONTINUE
6407 **/
6408static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6409{
6410	ENTER;
6411	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6412	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6413	LEAVE;
6414	return IPR_RC_JOB_CONTINUE;
6415}
6416
6417/**
6418 * ipr_reset_start_bist - Run BIST on the adapter.
6419 * @ipr_cmd:	ipr command struct
6420 *
6421 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6422 *
6423 * Return value:
6424 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6425 **/
6426static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6427{
6428	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6429	int rc;
6430
6431	ENTER;
6432	pci_block_user_cfg_access(ioa_cfg->pdev);
6433	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6434
6435	if (rc != PCIBIOS_SUCCESSFUL) {
6436		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6437		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6438		rc = IPR_RC_JOB_CONTINUE;
6439	} else {
6440		ipr_cmd->job_step = ipr_reset_bist_done;
6441		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6442		rc = IPR_RC_JOB_RETURN;
6443	}
6444
6445	LEAVE;
6446	return rc;
6447}
6448
6449/**
6450 * ipr_reset_allowed - Query whether or not IOA can be reset
6451 * @ioa_cfg:	ioa config struct
6452 *
6453 * Return value:
6454 * 	0 if reset not allowed / non-zero if reset is allowed
6455 **/
6456static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6457{
6458	volatile u32 temp_reg;
6459
6460	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6461	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6462}
6463
6464/**
6465 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6466 * @ipr_cmd:	ipr command struct
6467 *
6468 * Description: This function waits for adapter permission to run BIST,
6469 * then runs BIST. If the adapter does not give permission after a
6470 * reasonable time, we will reset the adapter anyway. The impact of
6471 * resetting the adapter without warning the adapter is the risk of
6472 * losing the persistent error log on the adapter. If the adapter is
6473 * reset while it is writing to the flash on the adapter, the flash
6474 * segment will have bad ECC and be zeroed.
6475 *
6476 * Return value:
6477 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6478 **/
6479static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6480{
6481	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6482	int rc = IPR_RC_JOB_RETURN;
6483
6484	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6485		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6486		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6487	} else {
6488		ipr_cmd->job_step = ipr_reset_start_bist;
6489		rc = IPR_RC_JOB_CONTINUE;
6490	}
6491
6492	return rc;
6493}
6494
6495/**
6496 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6497 * @ipr_cmd:	ipr command struct
6498 *
6499 * Description: This function alerts the adapter that it will be reset.
6500 * If memory space is not currently enabled, proceed directly
6501 * to running BIST on the adapter. The timer must always be started
6502 * so we guarantee we do not run BIST from ipr_isr.
6503 *
6504 * Return value:
6505 * 	IPR_RC_JOB_RETURN
6506 **/
6507static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6508{
6509	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6510	u16 cmd_reg;
6511	int rc;
6512
6513	ENTER;
6514	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6515
6516	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6517		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6518		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6519		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6520	} else {
6521		ipr_cmd->job_step = ipr_reset_start_bist;
6522	}
6523
6524	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6525	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6526
6527	LEAVE;
6528	return IPR_RC_JOB_RETURN;
6529}
6530
6531/**
6532 * ipr_reset_ucode_download_done - Microcode download completion
6533 * @ipr_cmd:	ipr command struct
6534 *
6535 * Description: This function unmaps the microcode download buffer.
6536 *
6537 * Return value:
6538 * 	IPR_RC_JOB_CONTINUE
6539 **/
6540static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6541{
6542	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6543	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6544
6545	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6546		     sglist->num_sg, DMA_TO_DEVICE);
6547
6548	ipr_cmd->job_step = ipr_reset_alert;
6549	return IPR_RC_JOB_CONTINUE;
6550}
6551
6552/**
6553 * ipr_reset_ucode_download - Download microcode to the adapter
6554 * @ipr_cmd:	ipr command struct
6555 *
6556 * Description: This function checks to see if it there is microcode
6557 * to download to the adapter. If there is, a download is performed.
6558 *
6559 * Return value:
6560 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6561 **/
6562static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6563{
6564	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6565	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6566
6567	ENTER;
6568	ipr_cmd->job_step = ipr_reset_alert;
6569
6570	if (!sglist)
6571		return IPR_RC_JOB_CONTINUE;
6572
6573	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6574	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6575	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6576	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6577	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6578	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6579	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6580
6581	ipr_build_ucode_ioadl(ipr_cmd, sglist);
6582	ipr_cmd->job_step = ipr_reset_ucode_download_done;
6583
6584	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6585		   IPR_WRITE_BUFFER_TIMEOUT);
6586
6587	LEAVE;
6588	return IPR_RC_JOB_RETURN;
6589}
6590
6591/**
6592 * ipr_reset_shutdown_ioa - Shutdown the adapter
6593 * @ipr_cmd:	ipr command struct
6594 *
6595 * Description: This function issues an adapter shutdown of the
6596 * specified type to the specified adapter as part of the
6597 * adapter reset job.
6598 *
6599 * Return value:
6600 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6601 **/
6602static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6603{
6604	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6605	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6606	unsigned long timeout;
6607	int rc = IPR_RC_JOB_CONTINUE;
6608
6609	ENTER;
6610	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6611		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6612		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6613		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6614		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6615
6616		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6617			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6618		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6619			timeout = IPR_INTERNAL_TIMEOUT;
6620		else
6621			timeout = IPR_SHUTDOWN_TIMEOUT;
6622
6623		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6624
6625		rc = IPR_RC_JOB_RETURN;
6626		ipr_cmd->job_step = ipr_reset_ucode_download;
6627	} else
6628		ipr_cmd->job_step = ipr_reset_alert;
6629
6630	LEAVE;
6631	return rc;
6632}
6633
6634/**
6635 * ipr_reset_ioa_job - Adapter reset job
6636 * @ipr_cmd:	ipr command struct
6637 *
6638 * Description: This function is the job router for the adapter reset job.
6639 *
6640 * Return value:
6641 * 	none
6642 **/
6643static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6644{
6645	u32 rc, ioasc;
6646	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6647
6648	do {
6649		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6650
6651		if (ioa_cfg->reset_cmd != ipr_cmd) {
6652			/*
6653			 * We are doing nested adapter resets and this is
6654			 * not the current reset job.
6655			 */
6656			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6657			return;
6658		}
6659
6660		if (IPR_IOASC_SENSE_KEY(ioasc)) {
6661			rc = ipr_cmd->job_step_failed(ipr_cmd);
6662			if (rc == IPR_RC_JOB_RETURN)
6663				return;
6664		}
6665
6666		ipr_reinit_ipr_cmnd(ipr_cmd);
6667		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6668		rc = ipr_cmd->job_step(ipr_cmd);
6669	} while(rc == IPR_RC_JOB_CONTINUE);
6670}
6671
6672/**
6673 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6674 * @ioa_cfg:		ioa config struct
6675 * @job_step:		first job step of reset job
6676 * @shutdown_type:	shutdown type
6677 *
6678 * Description: This function will initiate the reset of the given adapter
6679 * starting at the selected job step.
6680 * If the caller needs to wait on the completion of the reset,
6681 * the caller must sleep on the reset_wait_q.
6682 *
6683 * Return value:
6684 * 	none
6685 **/
6686static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6687				    int (*job_step) (struct ipr_cmnd *),
6688				    enum ipr_shutdown_type shutdown_type)
6689{
6690	struct ipr_cmnd *ipr_cmd;
6691
6692	ioa_cfg->in_reset_reload = 1;
6693	ioa_cfg->allow_cmds = 0;
6694	scsi_block_requests(ioa_cfg->host);
6695
6696	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6697	ioa_cfg->reset_cmd = ipr_cmd;
6698	ipr_cmd->job_step = job_step;
6699	ipr_cmd->u.shutdown_type = shutdown_type;
6700
6701	ipr_reset_ioa_job(ipr_cmd);
6702}
6703
6704/**
6705 * ipr_initiate_ioa_reset - Initiate an adapter reset
6706 * @ioa_cfg:		ioa config struct
6707 * @shutdown_type:	shutdown type
6708 *
6709 * Description: This function will initiate the reset of the given adapter.
6710 * If the caller needs to wait on the completion of the reset,
6711 * the caller must sleep on the reset_wait_q.
6712 *
6713 * Return value:
6714 * 	none
6715 **/
6716static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6717				   enum ipr_shutdown_type shutdown_type)
6718{
6719	if (ioa_cfg->ioa_is_dead)
6720		return;
6721
6722	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6723		ioa_cfg->sdt_state = ABORT_DUMP;
6724
6725	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6726		dev_err(&ioa_cfg->pdev->dev,
6727			"IOA taken offline - error recovery failed\n");
6728
6729		ioa_cfg->reset_retries = 0;
6730		ioa_cfg->ioa_is_dead = 1;
6731
6732		if (ioa_cfg->in_ioa_bringdown) {
6733			ioa_cfg->reset_cmd = NULL;
6734			ioa_cfg->in_reset_reload = 0;
6735			ipr_fail_all_ops(ioa_cfg);
6736			wake_up_all(&ioa_cfg->reset_wait_q);
6737
6738			spin_unlock_irq(ioa_cfg->host->host_lock);
6739			scsi_unblock_requests(ioa_cfg->host);
6740			spin_lock_irq(ioa_cfg->host->host_lock);
6741			return;
6742		} else {
6743			ioa_cfg->in_ioa_bringdown = 1;
6744			shutdown_type = IPR_SHUTDOWN_NONE;
6745		}
6746	}
6747
6748	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6749				shutdown_type);
6750}
6751
6752/**
6753 * ipr_reset_freeze - Hold off all I/O activity
6754 * @ipr_cmd:	ipr command struct
6755 *
6756 * Description: If the PCI slot is frozen, hold off all I/O
6757 * activity; then, as soon as the slot is available again,
6758 * initiate an adapter reset.
6759 */
6760static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6761{
6762	/* Disallow new interrupts, avoid loop */
6763	ipr_cmd->ioa_cfg->allow_interrupts = 0;
6764	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6765	ipr_cmd->done = ipr_reset_ioa_job;
6766	return IPR_RC_JOB_RETURN;
6767}
6768
6769/**
6770 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6771 * @pdev:	PCI device struct
6772 *
6773 * Description: This routine is called to tell us that the PCI bus
6774 * is down. Can't do anything here, except put the device driver
6775 * into a holding pattern, waiting for the PCI bus to come back.
6776 */
6777static void ipr_pci_frozen(struct pci_dev *pdev)
6778{
6779	unsigned long flags = 0;
6780	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6781
6782	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6783	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6784	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6785}
6786
6787/**
6788 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6789 * @pdev:	PCI device struct
6790 *
6791 * Description: This routine is called by the pci error recovery
6792 * code after the PCI slot has been reset, just before we
6793 * should resume normal operations.
6794 */
6795static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6796{
6797	unsigned long flags = 0;
6798	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6799
6800	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6801	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6802	                                 IPR_SHUTDOWN_NONE);
6803	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6804	return PCI_ERS_RESULT_RECOVERED;
6805}
6806
6807/**
6808 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6809 * @pdev:	PCI device struct
6810 *
6811 * Description: This routine is called when the PCI bus has
6812 * permanently failed.
6813 */
6814static void ipr_pci_perm_failure(struct pci_dev *pdev)
6815{
6816	unsigned long flags = 0;
6817	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6818
6819	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6820	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6821		ioa_cfg->sdt_state = ABORT_DUMP;
6822	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6823	ioa_cfg->in_ioa_bringdown = 1;
6824	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6825	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6826}
6827
6828/**
6829 * ipr_pci_error_detected - Called when a PCI error is detected.
6830 * @pdev:	PCI device struct
6831 * @state:	PCI channel state
6832 *
6833 * Description: Called when a PCI error is detected.
6834 *
6835 * Return value:
6836 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6837 */
6838static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6839					       pci_channel_state_t state)
6840{
6841	switch (state) {
6842	case pci_channel_io_frozen:
6843		ipr_pci_frozen(pdev);
6844		return PCI_ERS_RESULT_NEED_RESET;
6845	case pci_channel_io_perm_failure:
6846		ipr_pci_perm_failure(pdev);
6847		return PCI_ERS_RESULT_DISCONNECT;
6848		break;
6849	default:
6850		break;
6851	}
6852	return PCI_ERS_RESULT_NEED_RESET;
6853}
6854
6855/**
6856 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6857 * @ioa_cfg:	ioa cfg struct
6858 *
6859 * Description: This is the second phase of adapter intialization
6860 * This function takes care of initilizing the adapter to the point
6861 * where it can accept new commands.
6862
6863 * Return value:
6864 * 	0 on sucess / -EIO on failure
6865 **/
6866static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6867{
6868	int rc = 0;
6869	unsigned long host_lock_flags = 0;
6870
6871	ENTER;
6872	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6873	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6874	if (ioa_cfg->needs_hard_reset) {
6875		ioa_cfg->needs_hard_reset = 0;
6876		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6877	} else
6878		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6879					IPR_SHUTDOWN_NONE);
6880
6881	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6882	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6883	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6884
6885	if (ioa_cfg->ioa_is_dead) {
6886		rc = -EIO;
6887	} else if (ipr_invalid_adapter(ioa_cfg)) {
6888		if (!ipr_testmode)
6889			rc = -EIO;
6890
6891		dev_err(&ioa_cfg->pdev->dev,
6892			"Adapter not supported in this hardware configuration.\n");
6893	}
6894
6895	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6896
6897	LEAVE;
6898	return rc;
6899}
6900
6901/**
6902 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6903 * @ioa_cfg:	ioa config struct
6904 *
6905 * Return value:
6906 * 	none
6907 **/
6908static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6909{
6910	int i;
6911
6912	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6913		if (ioa_cfg->ipr_cmnd_list[i])
6914			pci_pool_free(ioa_cfg->ipr_cmd_pool,
6915				      ioa_cfg->ipr_cmnd_list[i],
6916				      ioa_cfg->ipr_cmnd_list_dma[i]);
6917
6918		ioa_cfg->ipr_cmnd_list[i] = NULL;
6919	}
6920
6921	if (ioa_cfg->ipr_cmd_pool)
6922		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6923
6924	ioa_cfg->ipr_cmd_pool = NULL;
6925}
6926
6927/**
6928 * ipr_free_mem - Frees memory allocated for an adapter
6929 * @ioa_cfg:	ioa cfg struct
6930 *
6931 * Return value:
6932 * 	nothing
6933 **/
6934static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6935{
6936	int i;
6937
6938	kfree(ioa_cfg->res_entries);
6939	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6940			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6941	ipr_free_cmd_blks(ioa_cfg);
6942	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6943			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6944	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6945			    ioa_cfg->cfg_table,
6946			    ioa_cfg->cfg_table_dma);
6947
6948	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6949		pci_free_consistent(ioa_cfg->pdev,
6950				    sizeof(struct ipr_hostrcb),
6951				    ioa_cfg->hostrcb[i],
6952				    ioa_cfg->hostrcb_dma[i]);
6953	}
6954
6955	ipr_free_dump(ioa_cfg);
6956	kfree(ioa_cfg->trace);
6957}
6958
6959/**
6960 * ipr_free_all_resources - Free all allocated resources for an adapter.
6961 * @ipr_cmd:	ipr command struct
6962 *
6963 * This function frees all allocated resources for the
6964 * specified adapter.
6965 *
6966 * Return value:
6967 * 	none
6968 **/
6969static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6970{
6971	struct pci_dev *pdev = ioa_cfg->pdev;
6972
6973	ENTER;
6974	free_irq(pdev->irq, ioa_cfg);
6975	iounmap(ioa_cfg->hdw_dma_regs);
6976	pci_release_regions(pdev);
6977	ipr_free_mem(ioa_cfg);
6978	scsi_host_put(ioa_cfg->host);
6979	pci_disable_device(pdev);
6980	LEAVE;
6981}
6982
6983/**
6984 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6985 * @ioa_cfg:	ioa config struct
6986 *
6987 * Return value:
6988 * 	0 on success / -ENOMEM on allocation failure
6989 **/
6990static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6991{
6992	struct ipr_cmnd *ipr_cmd;
6993	struct ipr_ioarcb *ioarcb;
6994	dma_addr_t dma_addr;
6995	int i;
6996
6997	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6998						 sizeof(struct ipr_cmnd), 8, 0);
6999
7000	if (!ioa_cfg->ipr_cmd_pool)
7001		return -ENOMEM;
7002
7003	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7004		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7005
7006		if (!ipr_cmd) {
7007			ipr_free_cmd_blks(ioa_cfg);
7008			return -ENOMEM;
7009		}
7010
7011		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7012		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7013		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7014
7015		ioarcb = &ipr_cmd->ioarcb;
7016		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7017		ioarcb->host_response_handle = cpu_to_be32(i << 2);
7018		ioarcb->write_ioadl_addr =
7019			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7020		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7021		ioarcb->ioasa_host_pci_addr =
7022			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7023		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7024		ipr_cmd->cmd_index = i;
7025		ipr_cmd->ioa_cfg = ioa_cfg;
7026		ipr_cmd->sense_buffer_dma = dma_addr +
7027			offsetof(struct ipr_cmnd, sense_buffer);
7028
7029		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7030	}
7031
7032	return 0;
7033}
7034
7035/**
7036 * ipr_alloc_mem - Allocate memory for an adapter
7037 * @ioa_cfg:	ioa config struct
7038 *
7039 * Return value:
7040 * 	0 on success / non-zero for error
7041 **/
7042static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7043{
7044	struct pci_dev *pdev = ioa_cfg->pdev;
7045	int i, rc = -ENOMEM;
7046
7047	ENTER;
7048	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7049				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7050
7051	if (!ioa_cfg->res_entries)
7052		goto out;
7053
7054	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7055		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7056
7057	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7058						sizeof(struct ipr_misc_cbs),
7059						&ioa_cfg->vpd_cbs_dma);
7060
7061	if (!ioa_cfg->vpd_cbs)
7062		goto out_free_res_entries;
7063
7064	if (ipr_alloc_cmd_blks(ioa_cfg))
7065		goto out_free_vpd_cbs;
7066
7067	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7068						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7069						 &ioa_cfg->host_rrq_dma);
7070
7071	if (!ioa_cfg->host_rrq)
7072		goto out_ipr_free_cmd_blocks;
7073
7074	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7075						  sizeof(struct ipr_config_table),
7076						  &ioa_cfg->cfg_table_dma);
7077
7078	if (!ioa_cfg->cfg_table)
7079		goto out_free_host_rrq;
7080
7081	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7082		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7083							   sizeof(struct ipr_hostrcb),
7084							   &ioa_cfg->hostrcb_dma[i]);
7085
7086		if (!ioa_cfg->hostrcb[i])
7087			goto out_free_hostrcb_dma;
7088
7089		ioa_cfg->hostrcb[i]->hostrcb_dma =
7090			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7091		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7092		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7093	}
7094
7095	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7096				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7097
7098	if (!ioa_cfg->trace)
7099		goto out_free_hostrcb_dma;
7100
7101	rc = 0;
7102out:
7103	LEAVE;
7104	return rc;
7105
7106out_free_hostrcb_dma:
7107	while (i-- > 0) {
7108		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7109				    ioa_cfg->hostrcb[i],
7110				    ioa_cfg->hostrcb_dma[i]);
7111	}
7112	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7113			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7114out_free_host_rrq:
7115	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7116			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7117out_ipr_free_cmd_blocks:
7118	ipr_free_cmd_blks(ioa_cfg);
7119out_free_vpd_cbs:
7120	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7121			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7122out_free_res_entries:
7123	kfree(ioa_cfg->res_entries);
7124	goto out;
7125}
7126
7127/**
7128 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7129 * @ioa_cfg:	ioa config struct
7130 *
7131 * Return value:
7132 * 	none
7133 **/
7134static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7135{
7136	int i;
7137
7138	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7139		ioa_cfg->bus_attr[i].bus = i;
7140		ioa_cfg->bus_attr[i].qas_enabled = 0;
7141		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7142		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7143			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7144		else
7145			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7146	}
7147}
7148
7149/**
7150 * ipr_init_ioa_cfg - Initialize IOA config struct
7151 * @ioa_cfg:	ioa config struct
7152 * @host:		scsi host struct
7153 * @pdev:		PCI dev struct
7154 *
7155 * Return value:
7156 * 	none
7157 **/
7158static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7159				       struct Scsi_Host *host, struct pci_dev *pdev)
7160{
7161	const struct ipr_interrupt_offsets *p;
7162	struct ipr_interrupts *t;
7163	void __iomem *base;
7164
7165	ioa_cfg->host = host;
7166	ioa_cfg->pdev = pdev;
7167	ioa_cfg->log_level = ipr_log_level;
7168	ioa_cfg->doorbell = IPR_DOORBELL;
7169	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7170	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7171	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7172	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7173	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7174	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7175	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7176	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7177
7178	INIT_LIST_HEAD(&ioa_cfg->free_q);
7179	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7180	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7181	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7182	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7183	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7184	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7185	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7186	ioa_cfg->sdt_state = INACTIVE;
7187	if (ipr_enable_cache)
7188		ioa_cfg->cache_state = CACHE_ENABLED;
7189	else
7190		ioa_cfg->cache_state = CACHE_DISABLED;
7191
7192	ipr_initialize_bus_attr(ioa_cfg);
7193
7194	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7195	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7196	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7197	host->unique_id = host->host_no;
7198	host->max_cmd_len = IPR_MAX_CDB_LEN;
7199	pci_set_drvdata(pdev, ioa_cfg);
7200
7201	p = &ioa_cfg->chip_cfg->regs;
7202	t = &ioa_cfg->regs;
7203	base = ioa_cfg->hdw_dma_regs;
7204
7205	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7206	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7207	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7208	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7209	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7210	t->ioarrin_reg = base + p->ioarrin_reg;
7211	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7212	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7213	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7214}
7215
7216/**
7217 * ipr_get_chip_cfg - Find adapter chip configuration
7218 * @dev_id:		PCI device id struct
7219 *
7220 * Return value:
7221 * 	ptr to chip config on success / NULL on failure
7222 **/
7223static const struct ipr_chip_cfg_t * __devinit
7224ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7225{
7226	int i;
7227
7228	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7229		if (ipr_chip[i].vendor == dev_id->vendor &&
7230		    ipr_chip[i].device == dev_id->device)
7231			return ipr_chip[i].cfg;
7232	return NULL;
7233}
7234
7235/**
7236 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7237 * @pdev:		PCI device struct
7238 * @dev_id:		PCI device id struct
7239 *
7240 * Return value:
7241 * 	0 on success / non-zero on failure
7242 **/
7243static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7244				   const struct pci_device_id *dev_id)
7245{
7246	struct ipr_ioa_cfg *ioa_cfg;
7247	struct Scsi_Host *host;
7248	unsigned long ipr_regs_pci;
7249	void __iomem *ipr_regs;
7250	int rc = PCIBIOS_SUCCESSFUL;
7251	volatile u32 mask, uproc;
7252
7253	ENTER;
7254
7255	if ((rc = pci_enable_device(pdev))) {
7256		dev_err(&pdev->dev, "Cannot enable adapter\n");
7257		goto out;
7258	}
7259
7260	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7261
7262	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7263
7264	if (!host) {
7265		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7266		rc = -ENOMEM;
7267		goto out_disable;
7268	}
7269
7270	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7271	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7272	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7273		      sata_port_info.flags, &ipr_sata_ops);
7274
7275	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7276
7277	if (!ioa_cfg->chip_cfg) {
7278		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7279			dev_id->vendor, dev_id->device);
7280		goto out_scsi_host_put;
7281	}
7282
7283	if (ipr_transop_timeout)
7284		ioa_cfg->transop_timeout = ipr_transop_timeout;
7285	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7286		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7287	else
7288		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7289
7290	ipr_regs_pci = pci_resource_start(pdev, 0);
7291
7292	rc = pci_request_regions(pdev, IPR_NAME);
7293	if (rc < 0) {
7294		dev_err(&pdev->dev,
7295			"Couldn't register memory range of registers\n");
7296		goto out_scsi_host_put;
7297	}
7298
7299	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7300
7301	if (!ipr_regs) {
7302		dev_err(&pdev->dev,
7303			"Couldn't map memory range of registers\n");
7304		rc = -ENOMEM;
7305		goto out_release_regions;
7306	}
7307
7308	ioa_cfg->hdw_dma_regs = ipr_regs;
7309	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7310	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7311
7312	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7313
7314	pci_set_master(pdev);
7315
7316	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7317	if (rc < 0) {
7318		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7319		goto cleanup_nomem;
7320	}
7321
7322	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7323				   ioa_cfg->chip_cfg->cache_line_size);
7324
7325	if (rc != PCIBIOS_SUCCESSFUL) {
7326		dev_err(&pdev->dev, "Write of cache line size failed\n");
7327		rc = -EIO;
7328		goto cleanup_nomem;
7329	}
7330
7331	/* Save away PCI config space for use following IOA reset */
7332	rc = pci_save_state(pdev);
7333
7334	if (rc != PCIBIOS_SUCCESSFUL) {
7335		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7336		rc = -EIO;
7337		goto cleanup_nomem;
7338	}
7339
7340	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7341		goto cleanup_nomem;
7342
7343	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7344		goto cleanup_nomem;
7345
7346	rc = ipr_alloc_mem(ioa_cfg);
7347	if (rc < 0) {
7348		dev_err(&pdev->dev,
7349			"Couldn't allocate enough memory for device driver!\n");
7350		goto cleanup_nomem;
7351	}
7352
7353	/*
7354	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7355	 * the card is in an unknown state and needs a hard reset
7356	 */
7357	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7358	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7359	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7360		ioa_cfg->needs_hard_reset = 1;
7361
7362	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7363	rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7364
7365	if (rc) {
7366		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7367			pdev->irq, rc);
7368		goto cleanup_nolog;
7369	}
7370
7371	spin_lock(&ipr_driver_lock);
7372	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7373	spin_unlock(&ipr_driver_lock);
7374
7375	LEAVE;
7376out:
7377	return rc;
7378
7379cleanup_nolog:
7380	ipr_free_mem(ioa_cfg);
7381cleanup_nomem:
7382	iounmap(ipr_regs);
7383out_release_regions:
7384	pci_release_regions(pdev);
7385out_scsi_host_put:
7386	scsi_host_put(host);
7387out_disable:
7388	pci_disable_device(pdev);
7389	goto out;
7390}
7391
7392/**
7393 * ipr_scan_vsets - Scans for VSET devices
7394 * @ioa_cfg:	ioa config struct
7395 *
7396 * Description: Since the VSET resources do not follow SAM in that we can have
7397 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7398 *
7399 * Return value:
7400 * 	none
7401 **/
7402static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7403{
7404	int target, lun;
7405
7406	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7407		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7408			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7409}
7410
7411/**
7412 * ipr_initiate_ioa_bringdown - Bring down an adapter
7413 * @ioa_cfg:		ioa config struct
7414 * @shutdown_type:	shutdown type
7415 *
7416 * Description: This function will initiate bringing down the adapter.
7417 * This consists of issuing an IOA shutdown to the adapter
7418 * to flush the cache, and running BIST.
7419 * If the caller needs to wait on the completion of the reset,
7420 * the caller must sleep on the reset_wait_q.
7421 *
7422 * Return value:
7423 * 	none
7424 **/
7425static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7426				       enum ipr_shutdown_type shutdown_type)
7427{
7428	ENTER;
7429	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7430		ioa_cfg->sdt_state = ABORT_DUMP;
7431	ioa_cfg->reset_retries = 0;
7432	ioa_cfg->in_ioa_bringdown = 1;
7433	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7434	LEAVE;
7435}
7436
7437/**
7438 * __ipr_remove - Remove a single adapter
7439 * @pdev:	pci device struct
7440 *
7441 * Adapter hot plug remove entry point.
7442 *
7443 * Return value:
7444 * 	none
7445 **/
7446static void __ipr_remove(struct pci_dev *pdev)
7447{
7448	unsigned long host_lock_flags = 0;
7449	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7450	ENTER;
7451
7452	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7453	while(ioa_cfg->in_reset_reload) {
7454		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7455		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7456		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7457	}
7458
7459	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7460
7461	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7462	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7463	flush_scheduled_work();
7464	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7465
7466	spin_lock(&ipr_driver_lock);
7467	list_del(&ioa_cfg->queue);
7468	spin_unlock(&ipr_driver_lock);
7469
7470	if (ioa_cfg->sdt_state == ABORT_DUMP)
7471		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7472	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7473
7474	ipr_free_all_resources(ioa_cfg);
7475
7476	LEAVE;
7477}
7478
7479/**
7480 * ipr_remove - IOA hot plug remove entry point
7481 * @pdev:	pci device struct
7482 *
7483 * Adapter hot plug remove entry point.
7484 *
7485 * Return value:
7486 * 	none
7487 **/
7488static void ipr_remove(struct pci_dev *pdev)
7489{
7490	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7491
7492	ENTER;
7493
7494	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7495			      &ipr_trace_attr);
7496	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7497			     &ipr_dump_attr);
7498	scsi_remove_host(ioa_cfg->host);
7499
7500	__ipr_remove(pdev);
7501
7502	LEAVE;
7503}
7504
7505/**
7506 * ipr_probe - Adapter hot plug add entry point
7507 *
7508 * Return value:
7509 * 	0 on success / non-zero on failure
7510 **/
7511static int __devinit ipr_probe(struct pci_dev *pdev,
7512			       const struct pci_device_id *dev_id)
7513{
7514	struct ipr_ioa_cfg *ioa_cfg;
7515	int rc;
7516
7517	rc = ipr_probe_ioa(pdev, dev_id);
7518
7519	if (rc)
7520		return rc;
7521
7522	ioa_cfg = pci_get_drvdata(pdev);
7523	rc = ipr_probe_ioa_part2(ioa_cfg);
7524
7525	if (rc) {
7526		__ipr_remove(pdev);
7527		return rc;
7528	}
7529
7530	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7531
7532	if (rc) {
7533		__ipr_remove(pdev);
7534		return rc;
7535	}
7536
7537	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7538				   &ipr_trace_attr);
7539
7540	if (rc) {
7541		scsi_remove_host(ioa_cfg->host);
7542		__ipr_remove(pdev);
7543		return rc;
7544	}
7545
7546	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7547				   &ipr_dump_attr);
7548
7549	if (rc) {
7550		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7551				      &ipr_trace_attr);
7552		scsi_remove_host(ioa_cfg->host);
7553		__ipr_remove(pdev);
7554		return rc;
7555	}
7556
7557	scsi_scan_host(ioa_cfg->host);
7558	ipr_scan_vsets(ioa_cfg);
7559	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7560	ioa_cfg->allow_ml_add_del = 1;
7561	ioa_cfg->host->max_channel = IPR_VSET_BUS;
7562	schedule_work(&ioa_cfg->work_q);
7563	return 0;
7564}
7565
7566/**
7567 * ipr_shutdown - Shutdown handler.
7568 * @pdev:	pci device struct
7569 *
7570 * This function is invoked upon system shutdown/reboot. It will issue
7571 * an adapter shutdown to the adapter to flush the write cache.
7572 *
7573 * Return value:
7574 * 	none
7575 **/
7576static void ipr_shutdown(struct pci_dev *pdev)
7577{
7578	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7579	unsigned long lock_flags = 0;
7580
7581	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7582	while(ioa_cfg->in_reset_reload) {
7583		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7584		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7585		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7586	}
7587
7588	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7589	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7590	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7591}
7592
7593static struct pci_device_id ipr_pci_table[] __devinitdata = {
7594	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7595		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7596	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7597		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7598	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7599		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7600	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7601		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7602	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7603		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7604	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7605		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7606	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7607		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7608	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7609		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7610		IPR_USE_LONG_TRANSOP_TIMEOUT },
7611	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7612	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7613	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7614	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7615	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7616	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7617	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7618	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7619	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7620	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7621	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7622	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7623	      IPR_USE_LONG_TRANSOP_TIMEOUT},
7624	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7625	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7626	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7627	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7628	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7629	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7630	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7631	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7632	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7633	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7634	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7635	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7636	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7637	      IPR_USE_LONG_TRANSOP_TIMEOUT },
7638	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7639		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7640	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7641		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7642	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7643		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7644		IPR_USE_LONG_TRANSOP_TIMEOUT },
7645	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7646		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7647		IPR_USE_LONG_TRANSOP_TIMEOUT },
7648	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7649		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7650		IPR_USE_LONG_TRANSOP_TIMEOUT },
7651	{ }
7652};
7653MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7654
7655static struct pci_error_handlers ipr_err_handler = {
7656	.error_detected = ipr_pci_error_detected,
7657	.slot_reset = ipr_pci_slot_reset,
7658};
7659
7660static struct pci_driver ipr_driver = {
7661	.name = IPR_NAME,
7662	.id_table = ipr_pci_table,
7663	.probe = ipr_probe,
7664	.remove = ipr_remove,
7665	.shutdown = ipr_shutdown,
7666	.err_handler = &ipr_err_handler,
7667	.dynids.use_driver_data = 1
7668};
7669
7670/**
7671 * ipr_init - Module entry point
7672 *
7673 * Return value:
7674 * 	0 on success / negative value on failure
7675 **/
7676static int __init ipr_init(void)
7677{
7678	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7679		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7680
7681	return pci_register_driver(&ipr_driver);
7682}
7683
7684/**
7685 * ipr_exit - Module unload
7686 *
7687 * Module unload entry point.
7688 *
7689 * Return value:
7690 * 	none
7691 **/
7692static void __exit ipr_exit(void)
7693{
7694	pci_unregister_driver(&ipr_driver);
7695}
7696
7697module_init(ipr_init);
7698module_exit(ipr_exit);
7699