ipr.c revision a32c055feed74246747bf4f45adb765136d3a4d3
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/libata.h>
74#include <linux/hdreg.h>
75#include <asm/io.h>
76#include <asm/irq.h>
77#include <asm/processor.h>
78#include <scsi/scsi.h>
79#include <scsi/scsi_host.h>
80#include <scsi/scsi_tcq.h>
81#include <scsi/scsi_eh.h>
82#include <scsi/scsi_cmnd.h>
83#include "ipr.h"
84
85/*
86 *   Global Data
87 */
88static LIST_HEAD(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = 0;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0;
96static unsigned int ipr_dual_ioa_raid = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock);
98
99/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
102		.mailbox = 0x0042C,
103		.cache_line_size = 0x20,
104		{
105			.set_interrupt_mask_reg = 0x0022C,
106			.clr_interrupt_mask_reg = 0x00230,
107			.sense_interrupt_mask_reg = 0x0022C,
108			.clr_interrupt_reg = 0x00228,
109			.sense_interrupt_reg = 0x00224,
110			.ioarrin_reg = 0x00404,
111			.sense_uproc_interrupt_reg = 0x00214,
112			.set_uproc_interrupt_reg = 0x00214,
113			.clr_uproc_interrupt_reg = 0x00218
114		}
115	},
116	{ /* Snipe and Scamp */
117		.mailbox = 0x0052C,
118		.cache_line_size = 0x20,
119		{
120			.set_interrupt_mask_reg = 0x00288,
121			.clr_interrupt_mask_reg = 0x0028C,
122			.sense_interrupt_mask_reg = 0x00288,
123			.clr_interrupt_reg = 0x00284,
124			.sense_interrupt_reg = 0x00280,
125			.ioarrin_reg = 0x00504,
126			.sense_uproc_interrupt_reg = 0x00290,
127			.set_uproc_interrupt_reg = 0x00290,
128			.clr_uproc_interrupt_reg = 0x00294
129		}
130	},
131};
132
133static const struct ipr_chip_t ipr_chip[] = {
134	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
137	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
138	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
139	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
140	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
141};
142
143static int ipr_max_bus_speeds [] = {
144	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
145};
146
147MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
148MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
149module_param_named(max_speed, ipr_max_speed, uint, 0);
150MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
151module_param_named(log_level, ipr_log_level, uint, 0);
152MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
153module_param_named(testmode, ipr_testmode, int, 0);
154MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
155module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
157module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
159module_param_named(enable_cache, ipr_enable_cache, int, 0);
160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
161module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
165MODULE_LICENSE("GPL");
166MODULE_VERSION(IPR_DRIVER_VERSION);
167
168/*  A constant array of IOASCs/URCs/Error Messages */
169static const
170struct ipr_error_table_t ipr_error_table[] = {
171	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
172	"8155: An unknown error was received"},
173	{0x00330000, 0, 0,
174	"Soft underlength error"},
175	{0x005A0000, 0, 0,
176	"Command to be cancelled not found"},
177	{0x00808000, 0, 0,
178	"Qualified success"},
179	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
180	"FFFE: Soft device bus error recovered by the IOA"},
181	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
182	"4101: Soft device bus fabric error"},
183	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
184	"FFF9: Device sector reassign successful"},
185	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
186	"FFF7: Media error recovered by device rewrite procedures"},
187	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
188	"7001: IOA sector reassignment successful"},
189	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
190	"FFF9: Soft media error. Sector reassignment recommended"},
191	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
192	"FFF7: Media error recovered by IOA rewrite procedures"},
193	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
194	"FF3D: Soft PCI bus error recovered by the IOA"},
195	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
196	"FFF6: Device hardware error recovered by the IOA"},
197	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
198	"FFF6: Device hardware error recovered by the device"},
199	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
200	"FF3D: Soft IOA error recovered by the IOA"},
201	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
202	"FFFA: Undefined device response recovered by the IOA"},
203	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
204	"FFF6: Device bus error, message or command phase"},
205	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
206	"FFFE: Task Management Function failed"},
207	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
208	"FFF6: Failure prediction threshold exceeded"},
209	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
210	"8009: Impending cache battery pack failure"},
211	{0x02040400, 0, 0,
212	"34FF: Disk device format in progress"},
213	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
214	"9070: IOA requested reset"},
215	{0x023F0000, 0, 0,
216	"Synchronization required"},
217	{0x024E0000, 0, 0,
218	"No ready, IOA shutdown"},
219	{0x025A0000, 0, 0,
220	"Not ready, IOA has been shutdown"},
221	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
222	"3020: Storage subsystem configuration error"},
223	{0x03110B00, 0, 0,
224	"FFF5: Medium error, data unreadable, recommend reassign"},
225	{0x03110C00, 0, 0,
226	"7000: Medium error, data unreadable, do not reassign"},
227	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
228	"FFF3: Disk media format bad"},
229	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
230	"3002: Addressed device failed to respond to selection"},
231	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
232	"3100: Device bus error"},
233	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
234	"3109: IOA timed out a device command"},
235	{0x04088000, 0, 0,
236	"3120: SCSI bus is not operational"},
237	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
238	"4100: Hard device bus fabric error"},
239	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
240	"9000: IOA reserved area data check"},
241	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
242	"9001: IOA reserved area invalid data pattern"},
243	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
244	"9002: IOA reserved area LRC error"},
245	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
246	"102E: Out of alternate sectors for disk storage"},
247	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
248	"FFF4: Data transfer underlength error"},
249	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
250	"FFF4: Data transfer overlength error"},
251	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
252	"3400: Logical unit failure"},
253	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
254	"FFF4: Device microcode is corrupt"},
255	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
256	"8150: PCI bus error"},
257	{0x04430000, 1, 0,
258	"Unsupported device bus message received"},
259	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
260	"FFF4: Disk device problem"},
261	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
262	"8150: Permanent IOA failure"},
263	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
264	"3010: Disk device returned wrong response to IOA"},
265	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
266	"8151: IOA microcode error"},
267	{0x04448500, 0, 0,
268	"Device bus status error"},
269	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
270	"8157: IOA error requiring IOA reset to recover"},
271	{0x04448700, 0, 0,
272	"ATA device status error"},
273	{0x04490000, 0, 0,
274	"Message reject received from the device"},
275	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
276	"8008: A permanent cache battery pack failure occurred"},
277	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
278	"9090: Disk unit has been modified after the last known status"},
279	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
280	"9081: IOA detected device error"},
281	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
282	"9082: IOA detected device error"},
283	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
284	"3110: Device bus error, message or command phase"},
285	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
286	"3110: SAS Command / Task Management Function failed"},
287	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
288	"9091: Incorrect hardware configuration change has been detected"},
289	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
290	"9073: Invalid multi-adapter configuration"},
291	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
292	"4010: Incorrect connection between cascaded expanders"},
293	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
294	"4020: Connections exceed IOA design limits"},
295	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
296	"4030: Incorrect multipath connection"},
297	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
298	"4110: Unsupported enclosure function"},
299	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
300	"FFF4: Command to logical unit failed"},
301	{0x05240000, 1, 0,
302	"Illegal request, invalid request type or request packet"},
303	{0x05250000, 0, 0,
304	"Illegal request, invalid resource handle"},
305	{0x05258000, 0, 0,
306	"Illegal request, commands not allowed to this device"},
307	{0x05258100, 0, 0,
308	"Illegal request, command not allowed to a secondary adapter"},
309	{0x05260000, 0, 0,
310	"Illegal request, invalid field in parameter list"},
311	{0x05260100, 0, 0,
312	"Illegal request, parameter not supported"},
313	{0x05260200, 0, 0,
314	"Illegal request, parameter value invalid"},
315	{0x052C0000, 0, 0,
316	"Illegal request, command sequence error"},
317	{0x052C8000, 1, 0,
318	"Illegal request, dual adapter support not enabled"},
319	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
320	"9031: Array protection temporarily suspended, protection resuming"},
321	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
322	"9040: Array protection temporarily suspended, protection resuming"},
323	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
324	"3140: Device bus not ready to ready transition"},
325	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
326	"FFFB: SCSI bus was reset"},
327	{0x06290500, 0, 0,
328	"FFFE: SCSI bus transition to single ended"},
329	{0x06290600, 0, 0,
330	"FFFE: SCSI bus transition to LVD"},
331	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
332	"FFFB: SCSI bus was reset by another initiator"},
333	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
334	"3029: A device replacement has occurred"},
335	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
336	"9051: IOA cache data exists for a missing or failed device"},
337	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
338	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
339	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
340	"9025: Disk unit is not supported at its physical location"},
341	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
342	"3020: IOA detected a SCSI bus configuration error"},
343	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
344	"3150: SCSI bus configuration error"},
345	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
346	"9074: Asymmetric advanced function disk configuration"},
347	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
348	"4040: Incomplete multipath connection between IOA and enclosure"},
349	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
350	"4041: Incomplete multipath connection between enclosure and device"},
351	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
352	"9075: Incomplete multipath connection between IOA and remote IOA"},
353	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
354	"9076: Configuration error, missing remote IOA"},
355	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
356	"4050: Enclosure does not support a required multipath function"},
357	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
358	"4070: Logically bad block written on device"},
359	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
360	"9041: Array protection temporarily suspended"},
361	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
362	"9042: Corrupt array parity detected on specified device"},
363	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
364	"9030: Array no longer protected due to missing or failed disk unit"},
365	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
366	"9071: Link operational transition"},
367	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
368	"9072: Link not operational transition"},
369	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
370	"9032: Array exposed but still protected"},
371	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
372	"70DD: Device forced failed by disrupt device command"},
373	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
374	"4061: Multipath redundancy level got better"},
375	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
376	"4060: Multipath redundancy level got worse"},
377	{0x07270000, 0, 0,
378	"Failure due to other device"},
379	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
380	"9008: IOA does not support functions expected by devices"},
381	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
382	"9010: Cache data associated with attached devices cannot be found"},
383	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
384	"9011: Cache data belongs to devices other than those attached"},
385	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
386	"9020: Array missing 2 or more devices with only 1 device present"},
387	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
388	"9021: Array missing 2 or more devices with 2 or more devices present"},
389	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
390	"9022: Exposed array is missing a required device"},
391	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
392	"9023: Array member(s) not at required physical locations"},
393	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
394	"9024: Array not functional due to present hardware configuration"},
395	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
396	"9026: Array not functional due to present hardware configuration"},
397	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
398	"9027: Array is missing a device and parity is out of sync"},
399	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
400	"9028: Maximum number of arrays already exist"},
401	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
402	"9050: Required cache data cannot be located for a disk unit"},
403	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
404	"9052: Cache data exists for a device that has been modified"},
405	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
406	"9054: IOA resources not available due to previous problems"},
407	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
408	"9092: Disk unit requires initialization before use"},
409	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
410	"9029: Incorrect hardware configuration change has been detected"},
411	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
412	"9060: One or more disk pairs are missing from an array"},
413	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
414	"9061: One or more disks are missing from an array"},
415	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
416	"9062: One or more disks are missing from an array"},
417	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
418	"9063: Maximum number of functional arrays has been exceeded"},
419	{0x0B260000, 0, 0,
420	"Aborted command, invalid descriptor"},
421	{0x0B5A0000, 0, 0,
422	"Command terminated by host"}
423};
424
425static const struct ipr_ses_table_entry ipr_ses_table[] = {
426	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
427	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
428	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
429	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
430	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
431	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
432	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
433	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
434	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
435	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
436	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
437	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
438	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
439};
440
441/*
442 *  Function Prototypes
443 */
444static int ipr_reset_alert(struct ipr_cmnd *);
445static void ipr_process_ccn(struct ipr_cmnd *);
446static void ipr_process_error(struct ipr_cmnd *);
447static void ipr_reset_ioa_job(struct ipr_cmnd *);
448static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
449				   enum ipr_shutdown_type);
450
451#ifdef CONFIG_SCSI_IPR_TRACE
452/**
453 * ipr_trc_hook - Add a trace entry to the driver trace
454 * @ipr_cmd:	ipr command struct
455 * @type:		trace type
456 * @add_data:	additional data
457 *
458 * Return value:
459 * 	none
460 **/
461static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
462			 u8 type, u32 add_data)
463{
464	struct ipr_trace_entry *trace_entry;
465	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
466
467	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
468	trace_entry->time = jiffies;
469	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
470	trace_entry->type = type;
471	if (ipr_cmd->ioa_cfg->sis64)
472		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
473	else
474		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
475	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
476	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
477	trace_entry->u.add_data = add_data;
478}
479#else
480#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
481#endif
482
483/**
484 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
485 * @ipr_cmd:	ipr command struct
486 *
487 * Return value:
488 * 	none
489 **/
490static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
491{
492	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
493	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
494	dma_addr_t dma_addr = ipr_cmd->dma_addr;
495
496	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
497	ioarcb->data_transfer_length = 0;
498	ioarcb->read_data_transfer_length = 0;
499	ioarcb->ioadl_len = 0;
500	ioarcb->read_ioadl_len = 0;
501
502	if (ipr_cmd->ioa_cfg->sis64)
503		ioarcb->u.sis64_addr_data.data_ioadl_addr =
504			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
505	else {
506		ioarcb->write_ioadl_addr =
507			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
508		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
509	}
510
511	ioasa->ioasc = 0;
512	ioasa->residual_data_len = 0;
513	ioasa->u.gata.status = 0;
514
515	ipr_cmd->scsi_cmd = NULL;
516	ipr_cmd->qc = NULL;
517	ipr_cmd->sense_buffer[0] = 0;
518	ipr_cmd->dma_use_sg = 0;
519}
520
521/**
522 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
523 * @ipr_cmd:	ipr command struct
524 *
525 * Return value:
526 * 	none
527 **/
528static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
529{
530	ipr_reinit_ipr_cmnd(ipr_cmd);
531	ipr_cmd->u.scratch = 0;
532	ipr_cmd->sibling = NULL;
533	init_timer(&ipr_cmd->timer);
534}
535
536/**
537 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
538 * @ioa_cfg:	ioa config struct
539 *
540 * Return value:
541 * 	pointer to ipr command struct
542 **/
543static
544struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
545{
546	struct ipr_cmnd *ipr_cmd;
547
548	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
549	list_del(&ipr_cmd->queue);
550	ipr_init_ipr_cmnd(ipr_cmd);
551
552	return ipr_cmd;
553}
554
555/**
556 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
557 * @ioa_cfg:	ioa config struct
558 * @clr_ints:     interrupts to clear
559 *
560 * This function masks all interrupts on the adapter, then clears the
561 * interrupts specified in the mask
562 *
563 * Return value:
564 * 	none
565 **/
566static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
567					  u32 clr_ints)
568{
569	volatile u32 int_reg;
570
571	/* Stop new interrupts */
572	ioa_cfg->allow_interrupts = 0;
573
574	/* Set interrupt mask to stop all new interrupts */
575	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
576
577	/* Clear any pending interrupts */
578	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
579	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
580}
581
582/**
583 * ipr_save_pcix_cmd_reg - Save PCI-X command register
584 * @ioa_cfg:	ioa config struct
585 *
586 * Return value:
587 * 	0 on success / -EIO on failure
588 **/
589static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
590{
591	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
592
593	if (pcix_cmd_reg == 0)
594		return 0;
595
596	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
597				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
598		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
599		return -EIO;
600	}
601
602	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
603	return 0;
604}
605
606/**
607 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
608 * @ioa_cfg:	ioa config struct
609 *
610 * Return value:
611 * 	0 on success / -EIO on failure
612 **/
613static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
614{
615	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
616
617	if (pcix_cmd_reg) {
618		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
619					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
620			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
621			return -EIO;
622		}
623	}
624
625	return 0;
626}
627
628/**
629 * ipr_sata_eh_done - done function for aborted SATA commands
630 * @ipr_cmd:	ipr command struct
631 *
632 * This function is invoked for ops generated to SATA
633 * devices which are being aborted.
634 *
635 * Return value:
636 * 	none
637 **/
638static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
639{
640	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
641	struct ata_queued_cmd *qc = ipr_cmd->qc;
642	struct ipr_sata_port *sata_port = qc->ap->private_data;
643
644	qc->err_mask |= AC_ERR_OTHER;
645	sata_port->ioasa.status |= ATA_BUSY;
646	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
647	ata_qc_complete(qc);
648}
649
650/**
651 * ipr_scsi_eh_done - mid-layer done function for aborted ops
652 * @ipr_cmd:	ipr command struct
653 *
654 * This function is invoked by the interrupt handler for
655 * ops generated by the SCSI mid-layer which are being aborted.
656 *
657 * Return value:
658 * 	none
659 **/
660static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
661{
662	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
663	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
664
665	scsi_cmd->result |= (DID_ERROR << 16);
666
667	scsi_dma_unmap(ipr_cmd->scsi_cmd);
668	scsi_cmd->scsi_done(scsi_cmd);
669	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
670}
671
672/**
673 * ipr_fail_all_ops - Fails all outstanding ops.
674 * @ioa_cfg:	ioa config struct
675 *
676 * This function fails all outstanding ops.
677 *
678 * Return value:
679 * 	none
680 **/
681static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
682{
683	struct ipr_cmnd *ipr_cmd, *temp;
684
685	ENTER;
686	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
687		list_del(&ipr_cmd->queue);
688
689		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
690		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
691
692		if (ipr_cmd->scsi_cmd)
693			ipr_cmd->done = ipr_scsi_eh_done;
694		else if (ipr_cmd->qc)
695			ipr_cmd->done = ipr_sata_eh_done;
696
697		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
698		del_timer(&ipr_cmd->timer);
699		ipr_cmd->done(ipr_cmd);
700	}
701
702	LEAVE;
703}
704
705/**
706 * ipr_send_command -  Send driver initiated requests.
707 * @ipr_cmd:		ipr command struct
708 *
709 * This function sends a command to the adapter using the correct write call.
710 * In the case of sis64, calculate the ioarcb size required. Then or in the
711 * appropriate bits.
712 *
713 * Return value:
714 * 	none
715 **/
716static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
717{
718	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
719	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
720
721	if (ioa_cfg->sis64) {
722		/* The default size is 256 bytes */
723		send_dma_addr |= 0x1;
724
725		/* If the number of ioadls * size of ioadl > 128 bytes,
726		   then use a 512 byte ioarcb */
727		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
728			send_dma_addr |= 0x4;
729		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
730	} else
731		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
732}
733
734/**
735 * ipr_do_req -  Send driver initiated requests.
736 * @ipr_cmd:		ipr command struct
737 * @done:			done function
738 * @timeout_func:	timeout function
739 * @timeout:		timeout value
740 *
741 * This function sends the specified command to the adapter with the
742 * timeout given. The done function is invoked on command completion.
743 *
744 * Return value:
745 * 	none
746 **/
747static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
748		       void (*done) (struct ipr_cmnd *),
749		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
750{
751	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
752
753	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
754
755	ipr_cmd->done = done;
756
757	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
758	ipr_cmd->timer.expires = jiffies + timeout;
759	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
760
761	add_timer(&ipr_cmd->timer);
762
763	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
764
765	mb();
766
767	ipr_send_command(ipr_cmd);
768}
769
770/**
771 * ipr_internal_cmd_done - Op done function for an internally generated op.
772 * @ipr_cmd:	ipr command struct
773 *
774 * This function is the op done function for an internally generated,
775 * blocking op. It simply wakes the sleeping thread.
776 *
777 * Return value:
778 * 	none
779 **/
780static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
781{
782	if (ipr_cmd->sibling)
783		ipr_cmd->sibling = NULL;
784	else
785		complete(&ipr_cmd->completion);
786}
787
788/**
789 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
790 * @ipr_cmd:	ipr command struct
791 * @dma_addr:	dma address
792 * @len:	transfer length
793 * @flags:	ioadl flag value
794 *
795 * This function initializes an ioadl in the case where there is only a single
796 * descriptor.
797 *
798 * Return value:
799 * 	nothing
800 **/
801static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
802			   u32 len, int flags)
803{
804	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
805	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
806
807	ipr_cmd->dma_use_sg = 1;
808
809	if (ipr_cmd->ioa_cfg->sis64) {
810		ioadl64->flags = cpu_to_be32(flags);
811		ioadl64->data_len = cpu_to_be32(len);
812		ioadl64->address = cpu_to_be64(dma_addr);
813
814		ipr_cmd->ioarcb.ioadl_len =
815		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
816		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
817	} else {
818		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
819		ioadl->address = cpu_to_be32(dma_addr);
820
821		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
822			ipr_cmd->ioarcb.read_ioadl_len =
823				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
824			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
825		} else {
826			ipr_cmd->ioarcb.ioadl_len =
827			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
828			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
829		}
830	}
831}
832
833/**
834 * ipr_send_blocking_cmd - Send command and sleep on its completion.
835 * @ipr_cmd:	ipr command struct
836 * @timeout_func:	function to invoke if command times out
837 * @timeout:	timeout
838 *
839 * Return value:
840 * 	none
841 **/
842static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
843				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
844				  u32 timeout)
845{
846	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
847
848	init_completion(&ipr_cmd->completion);
849	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
850
851	spin_unlock_irq(ioa_cfg->host->host_lock);
852	wait_for_completion(&ipr_cmd->completion);
853	spin_lock_irq(ioa_cfg->host->host_lock);
854}
855
856/**
857 * ipr_send_hcam - Send an HCAM to the adapter.
858 * @ioa_cfg:	ioa config struct
859 * @type:		HCAM type
860 * @hostrcb:	hostrcb struct
861 *
862 * This function will send a Host Controlled Async command to the adapter.
863 * If HCAMs are currently not allowed to be issued to the adapter, it will
864 * place the hostrcb on the free queue.
865 *
866 * Return value:
867 * 	none
868 **/
869static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
870			  struct ipr_hostrcb *hostrcb)
871{
872	struct ipr_cmnd *ipr_cmd;
873	struct ipr_ioarcb *ioarcb;
874
875	if (ioa_cfg->allow_cmds) {
876		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
877		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
878		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
879
880		ipr_cmd->u.hostrcb = hostrcb;
881		ioarcb = &ipr_cmd->ioarcb;
882
883		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
884		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
885		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
886		ioarcb->cmd_pkt.cdb[1] = type;
887		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
888		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
889
890		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
891			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
892
893		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
894			ipr_cmd->done = ipr_process_ccn;
895		else
896			ipr_cmd->done = ipr_process_error;
897
898		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
899
900		mb();
901
902		ipr_send_command(ipr_cmd);
903	} else {
904		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
905	}
906}
907
908/**
909 * ipr_init_res_entry - Initialize a resource entry struct.
910 * @res:	resource entry struct
911 *
912 * Return value:
913 * 	none
914 **/
915static void ipr_init_res_entry(struct ipr_resource_entry *res)
916{
917	res->needs_sync_complete = 0;
918	res->in_erp = 0;
919	res->add_to_ml = 0;
920	res->del_from_ml = 0;
921	res->resetting_device = 0;
922	res->sdev = NULL;
923	res->sata_port = NULL;
924}
925
926/**
927 * ipr_handle_config_change - Handle a config change from the adapter
928 * @ioa_cfg:	ioa config struct
929 * @hostrcb:	hostrcb
930 *
931 * Return value:
932 * 	none
933 **/
934static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
935			      struct ipr_hostrcb *hostrcb)
936{
937	struct ipr_resource_entry *res = NULL;
938	struct ipr_config_table_entry *cfgte;
939	u32 is_ndn = 1;
940
941	cfgte = &hostrcb->hcam.u.ccn.cfgte;
942
943	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
944		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
945			    sizeof(cfgte->res_addr))) {
946			is_ndn = 0;
947			break;
948		}
949	}
950
951	if (is_ndn) {
952		if (list_empty(&ioa_cfg->free_res_q)) {
953			ipr_send_hcam(ioa_cfg,
954				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
955				      hostrcb);
956			return;
957		}
958
959		res = list_entry(ioa_cfg->free_res_q.next,
960				 struct ipr_resource_entry, queue);
961
962		list_del(&res->queue);
963		ipr_init_res_entry(res);
964		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
965	}
966
967	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
968
969	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
970		if (res->sdev) {
971			res->del_from_ml = 1;
972			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
973			if (ioa_cfg->allow_ml_add_del)
974				schedule_work(&ioa_cfg->work_q);
975		} else
976			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
977	} else if (!res->sdev) {
978		res->add_to_ml = 1;
979		if (ioa_cfg->allow_ml_add_del)
980			schedule_work(&ioa_cfg->work_q);
981	}
982
983	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
984}
985
986/**
987 * ipr_process_ccn - Op done function for a CCN.
988 * @ipr_cmd:	ipr command struct
989 *
990 * This function is the op done function for a configuration
991 * change notification host controlled async from the adapter.
992 *
993 * Return value:
994 * 	none
995 **/
996static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
997{
998	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
999	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1000	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1001
1002	list_del(&hostrcb->queue);
1003	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1004
1005	if (ioasc) {
1006		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1007			dev_err(&ioa_cfg->pdev->dev,
1008				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1009
1010		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1011	} else {
1012		ipr_handle_config_change(ioa_cfg, hostrcb);
1013	}
1014}
1015
1016/**
1017 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1018 * @i:		index into buffer
1019 * @buf:		string to modify
1020 *
1021 * This function will strip all trailing whitespace, pad the end
1022 * of the string with a single space, and NULL terminate the string.
1023 *
1024 * Return value:
1025 * 	new length of string
1026 **/
1027static int strip_and_pad_whitespace(int i, char *buf)
1028{
1029	while (i && buf[i] == ' ')
1030		i--;
1031	buf[i+1] = ' ';
1032	buf[i+2] = '\0';
1033	return i + 2;
1034}
1035
1036/**
1037 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1038 * @prefix:		string to print at start of printk
1039 * @hostrcb:	hostrcb pointer
1040 * @vpd:		vendor/product id/sn struct
1041 *
1042 * Return value:
1043 * 	none
1044 **/
1045static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1046				struct ipr_vpd *vpd)
1047{
1048	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1049	int i = 0;
1050
1051	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1052	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1053
1054	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1055	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1056
1057	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1058	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1059
1060	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1061}
1062
1063/**
1064 * ipr_log_vpd - Log the passed VPD to the error log.
1065 * @vpd:		vendor/product id/sn struct
1066 *
1067 * Return value:
1068 * 	none
1069 **/
1070static void ipr_log_vpd(struct ipr_vpd *vpd)
1071{
1072	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1073		    + IPR_SERIAL_NUM_LEN];
1074
1075	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1076	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1077	       IPR_PROD_ID_LEN);
1078	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1079	ipr_err("Vendor/Product ID: %s\n", buffer);
1080
1081	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1082	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1083	ipr_err("    Serial Number: %s\n", buffer);
1084}
1085
1086/**
1087 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1088 * @prefix:		string to print at start of printk
1089 * @hostrcb:	hostrcb pointer
1090 * @vpd:		vendor/product id/sn/wwn struct
1091 *
1092 * Return value:
1093 * 	none
1094 **/
1095static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1096				    struct ipr_ext_vpd *vpd)
1097{
1098	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1099	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1100		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1101}
1102
1103/**
1104 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1105 * @vpd:		vendor/product id/sn/wwn struct
1106 *
1107 * Return value:
1108 * 	none
1109 **/
1110static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1111{
1112	ipr_log_vpd(&vpd->vpd);
1113	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1114		be32_to_cpu(vpd->wwid[1]));
1115}
1116
1117/**
1118 * ipr_log_enhanced_cache_error - Log a cache error.
1119 * @ioa_cfg:	ioa config struct
1120 * @hostrcb:	hostrcb struct
1121 *
1122 * Return value:
1123 * 	none
1124 **/
1125static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1126					 struct ipr_hostrcb *hostrcb)
1127{
1128	struct ipr_hostrcb_type_12_error *error =
1129		&hostrcb->hcam.u.error.u.type_12_error;
1130
1131	ipr_err("-----Current Configuration-----\n");
1132	ipr_err("Cache Directory Card Information:\n");
1133	ipr_log_ext_vpd(&error->ioa_vpd);
1134	ipr_err("Adapter Card Information:\n");
1135	ipr_log_ext_vpd(&error->cfc_vpd);
1136
1137	ipr_err("-----Expected Configuration-----\n");
1138	ipr_err("Cache Directory Card Information:\n");
1139	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1140	ipr_err("Adapter Card Information:\n");
1141	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1142
1143	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1144		     be32_to_cpu(error->ioa_data[0]),
1145		     be32_to_cpu(error->ioa_data[1]),
1146		     be32_to_cpu(error->ioa_data[2]));
1147}
1148
1149/**
1150 * ipr_log_cache_error - Log a cache error.
1151 * @ioa_cfg:	ioa config struct
1152 * @hostrcb:	hostrcb struct
1153 *
1154 * Return value:
1155 * 	none
1156 **/
1157static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1158				struct ipr_hostrcb *hostrcb)
1159{
1160	struct ipr_hostrcb_type_02_error *error =
1161		&hostrcb->hcam.u.error.u.type_02_error;
1162
1163	ipr_err("-----Current Configuration-----\n");
1164	ipr_err("Cache Directory Card Information:\n");
1165	ipr_log_vpd(&error->ioa_vpd);
1166	ipr_err("Adapter Card Information:\n");
1167	ipr_log_vpd(&error->cfc_vpd);
1168
1169	ipr_err("-----Expected Configuration-----\n");
1170	ipr_err("Cache Directory Card Information:\n");
1171	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1172	ipr_err("Adapter Card Information:\n");
1173	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1174
1175	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1176		     be32_to_cpu(error->ioa_data[0]),
1177		     be32_to_cpu(error->ioa_data[1]),
1178		     be32_to_cpu(error->ioa_data[2]));
1179}
1180
1181/**
1182 * ipr_log_enhanced_config_error - Log a configuration error.
1183 * @ioa_cfg:	ioa config struct
1184 * @hostrcb:	hostrcb struct
1185 *
1186 * Return value:
1187 * 	none
1188 **/
1189static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1190					  struct ipr_hostrcb *hostrcb)
1191{
1192	int errors_logged, i;
1193	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1194	struct ipr_hostrcb_type_13_error *error;
1195
1196	error = &hostrcb->hcam.u.error.u.type_13_error;
1197	errors_logged = be32_to_cpu(error->errors_logged);
1198
1199	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1200		be32_to_cpu(error->errors_detected), errors_logged);
1201
1202	dev_entry = error->dev;
1203
1204	for (i = 0; i < errors_logged; i++, dev_entry++) {
1205		ipr_err_separator;
1206
1207		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1208		ipr_log_ext_vpd(&dev_entry->vpd);
1209
1210		ipr_err("-----New Device Information-----\n");
1211		ipr_log_ext_vpd(&dev_entry->new_vpd);
1212
1213		ipr_err("Cache Directory Card Information:\n");
1214		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1215
1216		ipr_err("Adapter Card Information:\n");
1217		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1218	}
1219}
1220
1221/**
1222 * ipr_log_config_error - Log a configuration error.
1223 * @ioa_cfg:	ioa config struct
1224 * @hostrcb:	hostrcb struct
1225 *
1226 * Return value:
1227 * 	none
1228 **/
1229static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1230				 struct ipr_hostrcb *hostrcb)
1231{
1232	int errors_logged, i;
1233	struct ipr_hostrcb_device_data_entry *dev_entry;
1234	struct ipr_hostrcb_type_03_error *error;
1235
1236	error = &hostrcb->hcam.u.error.u.type_03_error;
1237	errors_logged = be32_to_cpu(error->errors_logged);
1238
1239	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1240		be32_to_cpu(error->errors_detected), errors_logged);
1241
1242	dev_entry = error->dev;
1243
1244	for (i = 0; i < errors_logged; i++, dev_entry++) {
1245		ipr_err_separator;
1246
1247		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1248		ipr_log_vpd(&dev_entry->vpd);
1249
1250		ipr_err("-----New Device Information-----\n");
1251		ipr_log_vpd(&dev_entry->new_vpd);
1252
1253		ipr_err("Cache Directory Card Information:\n");
1254		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1255
1256		ipr_err("Adapter Card Information:\n");
1257		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1258
1259		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1260			be32_to_cpu(dev_entry->ioa_data[0]),
1261			be32_to_cpu(dev_entry->ioa_data[1]),
1262			be32_to_cpu(dev_entry->ioa_data[2]),
1263			be32_to_cpu(dev_entry->ioa_data[3]),
1264			be32_to_cpu(dev_entry->ioa_data[4]));
1265	}
1266}
1267
1268/**
1269 * ipr_log_enhanced_array_error - Log an array configuration error.
1270 * @ioa_cfg:	ioa config struct
1271 * @hostrcb:	hostrcb struct
1272 *
1273 * Return value:
1274 * 	none
1275 **/
1276static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1277					 struct ipr_hostrcb *hostrcb)
1278{
1279	int i, num_entries;
1280	struct ipr_hostrcb_type_14_error *error;
1281	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1282	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1283
1284	error = &hostrcb->hcam.u.error.u.type_14_error;
1285
1286	ipr_err_separator;
1287
1288	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1289		error->protection_level,
1290		ioa_cfg->host->host_no,
1291		error->last_func_vset_res_addr.bus,
1292		error->last_func_vset_res_addr.target,
1293		error->last_func_vset_res_addr.lun);
1294
1295	ipr_err_separator;
1296
1297	array_entry = error->array_member;
1298	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1299			    sizeof(error->array_member));
1300
1301	for (i = 0; i < num_entries; i++, array_entry++) {
1302		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1303			continue;
1304
1305		if (be32_to_cpu(error->exposed_mode_adn) == i)
1306			ipr_err("Exposed Array Member %d:\n", i);
1307		else
1308			ipr_err("Array Member %d:\n", i);
1309
1310		ipr_log_ext_vpd(&array_entry->vpd);
1311		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1312		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1313				 "Expected Location");
1314
1315		ipr_err_separator;
1316	}
1317}
1318
1319/**
1320 * ipr_log_array_error - Log an array configuration error.
1321 * @ioa_cfg:	ioa config struct
1322 * @hostrcb:	hostrcb struct
1323 *
1324 * Return value:
1325 * 	none
1326 **/
1327static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1328				struct ipr_hostrcb *hostrcb)
1329{
1330	int i;
1331	struct ipr_hostrcb_type_04_error *error;
1332	struct ipr_hostrcb_array_data_entry *array_entry;
1333	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1334
1335	error = &hostrcb->hcam.u.error.u.type_04_error;
1336
1337	ipr_err_separator;
1338
1339	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1340		error->protection_level,
1341		ioa_cfg->host->host_no,
1342		error->last_func_vset_res_addr.bus,
1343		error->last_func_vset_res_addr.target,
1344		error->last_func_vset_res_addr.lun);
1345
1346	ipr_err_separator;
1347
1348	array_entry = error->array_member;
1349
1350	for (i = 0; i < 18; i++) {
1351		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1352			continue;
1353
1354		if (be32_to_cpu(error->exposed_mode_adn) == i)
1355			ipr_err("Exposed Array Member %d:\n", i);
1356		else
1357			ipr_err("Array Member %d:\n", i);
1358
1359		ipr_log_vpd(&array_entry->vpd);
1360
1361		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1362		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1363				 "Expected Location");
1364
1365		ipr_err_separator;
1366
1367		if (i == 9)
1368			array_entry = error->array_member2;
1369		else
1370			array_entry++;
1371	}
1372}
1373
1374/**
1375 * ipr_log_hex_data - Log additional hex IOA error data.
1376 * @ioa_cfg:	ioa config struct
1377 * @data:		IOA error data
1378 * @len:		data length
1379 *
1380 * Return value:
1381 * 	none
1382 **/
1383static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1384{
1385	int i;
1386
1387	if (len == 0)
1388		return;
1389
1390	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1391		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1392
1393	for (i = 0; i < len / 4; i += 4) {
1394		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1395			be32_to_cpu(data[i]),
1396			be32_to_cpu(data[i+1]),
1397			be32_to_cpu(data[i+2]),
1398			be32_to_cpu(data[i+3]));
1399	}
1400}
1401
1402/**
1403 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1404 * @ioa_cfg:	ioa config struct
1405 * @hostrcb:	hostrcb struct
1406 *
1407 * Return value:
1408 * 	none
1409 **/
1410static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1411					    struct ipr_hostrcb *hostrcb)
1412{
1413	struct ipr_hostrcb_type_17_error *error;
1414
1415	error = &hostrcb->hcam.u.error.u.type_17_error;
1416	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1417	strim(error->failure_reason);
1418
1419	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1420		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1421	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1422	ipr_log_hex_data(ioa_cfg, error->data,
1423			 be32_to_cpu(hostrcb->hcam.length) -
1424			 (offsetof(struct ipr_hostrcb_error, u) +
1425			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1426}
1427
1428/**
1429 * ipr_log_dual_ioa_error - Log a dual adapter error.
1430 * @ioa_cfg:	ioa config struct
1431 * @hostrcb:	hostrcb struct
1432 *
1433 * Return value:
1434 * 	none
1435 **/
1436static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1437				   struct ipr_hostrcb *hostrcb)
1438{
1439	struct ipr_hostrcb_type_07_error *error;
1440
1441	error = &hostrcb->hcam.u.error.u.type_07_error;
1442	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1443	strim(error->failure_reason);
1444
1445	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1446		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1447	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1448	ipr_log_hex_data(ioa_cfg, error->data,
1449			 be32_to_cpu(hostrcb->hcam.length) -
1450			 (offsetof(struct ipr_hostrcb_error, u) +
1451			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1452}
1453
1454static const struct {
1455	u8 active;
1456	char *desc;
1457} path_active_desc[] = {
1458	{ IPR_PATH_NO_INFO, "Path" },
1459	{ IPR_PATH_ACTIVE, "Active path" },
1460	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1461};
1462
1463static const struct {
1464	u8 state;
1465	char *desc;
1466} path_state_desc[] = {
1467	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1468	{ IPR_PATH_HEALTHY, "is healthy" },
1469	{ IPR_PATH_DEGRADED, "is degraded" },
1470	{ IPR_PATH_FAILED, "is failed" }
1471};
1472
1473/**
1474 * ipr_log_fabric_path - Log a fabric path error
1475 * @hostrcb:	hostrcb struct
1476 * @fabric:		fabric descriptor
1477 *
1478 * Return value:
1479 * 	none
1480 **/
1481static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1482				struct ipr_hostrcb_fabric_desc *fabric)
1483{
1484	int i, j;
1485	u8 path_state = fabric->path_state;
1486	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1487	u8 state = path_state & IPR_PATH_STATE_MASK;
1488
1489	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1490		if (path_active_desc[i].active != active)
1491			continue;
1492
1493		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1494			if (path_state_desc[j].state != state)
1495				continue;
1496
1497			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1498				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1499					     path_active_desc[i].desc, path_state_desc[j].desc,
1500					     fabric->ioa_port);
1501			} else if (fabric->cascaded_expander == 0xff) {
1502				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1503					     path_active_desc[i].desc, path_state_desc[j].desc,
1504					     fabric->ioa_port, fabric->phy);
1505			} else if (fabric->phy == 0xff) {
1506				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1507					     path_active_desc[i].desc, path_state_desc[j].desc,
1508					     fabric->ioa_port, fabric->cascaded_expander);
1509			} else {
1510				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1511					     path_active_desc[i].desc, path_state_desc[j].desc,
1512					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1513			}
1514			return;
1515		}
1516	}
1517
1518	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1519		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1520}
1521
1522static const struct {
1523	u8 type;
1524	char *desc;
1525} path_type_desc[] = {
1526	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1527	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1528	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1529	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1530};
1531
1532static const struct {
1533	u8 status;
1534	char *desc;
1535} path_status_desc[] = {
1536	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1537	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1538	{ IPR_PATH_CFG_FAILED, "Failed" },
1539	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1540	{ IPR_PATH_NOT_DETECTED, "Missing" },
1541	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1542};
1543
1544static const char *link_rate[] = {
1545	"unknown",
1546	"disabled",
1547	"phy reset problem",
1548	"spinup hold",
1549	"port selector",
1550	"unknown",
1551	"unknown",
1552	"unknown",
1553	"1.5Gbps",
1554	"3.0Gbps",
1555	"unknown",
1556	"unknown",
1557	"unknown",
1558	"unknown",
1559	"unknown",
1560	"unknown"
1561};
1562
1563/**
1564 * ipr_log_path_elem - Log a fabric path element.
1565 * @hostrcb:	hostrcb struct
1566 * @cfg:		fabric path element struct
1567 *
1568 * Return value:
1569 * 	none
1570 **/
1571static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1572			      struct ipr_hostrcb_config_element *cfg)
1573{
1574	int i, j;
1575	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1576	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1577
1578	if (type == IPR_PATH_CFG_NOT_EXIST)
1579		return;
1580
1581	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1582		if (path_type_desc[i].type != type)
1583			continue;
1584
1585		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1586			if (path_status_desc[j].status != status)
1587				continue;
1588
1589			if (type == IPR_PATH_CFG_IOA_PORT) {
1590				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1591					     path_status_desc[j].desc, path_type_desc[i].desc,
1592					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1593					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1594			} else {
1595				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1596					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1597						     path_status_desc[j].desc, path_type_desc[i].desc,
1598						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1599						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1600				} else if (cfg->cascaded_expander == 0xff) {
1601					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1602						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1603						     path_type_desc[i].desc, cfg->phy,
1604						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1605						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1606				} else if (cfg->phy == 0xff) {
1607					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1608						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1609						     path_type_desc[i].desc, cfg->cascaded_expander,
1610						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1611						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1612				} else {
1613					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1614						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1615						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1616						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1617						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1618				}
1619			}
1620			return;
1621		}
1622	}
1623
1624	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1625		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1626		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1627		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1628}
1629
1630/**
1631 * ipr_log_fabric_error - Log a fabric error.
1632 * @ioa_cfg:	ioa config struct
1633 * @hostrcb:	hostrcb struct
1634 *
1635 * Return value:
1636 * 	none
1637 **/
1638static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1639				 struct ipr_hostrcb *hostrcb)
1640{
1641	struct ipr_hostrcb_type_20_error *error;
1642	struct ipr_hostrcb_fabric_desc *fabric;
1643	struct ipr_hostrcb_config_element *cfg;
1644	int i, add_len;
1645
1646	error = &hostrcb->hcam.u.error.u.type_20_error;
1647	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1648	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1649
1650	add_len = be32_to_cpu(hostrcb->hcam.length) -
1651		(offsetof(struct ipr_hostrcb_error, u) +
1652		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1653
1654	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1655		ipr_log_fabric_path(hostrcb, fabric);
1656		for_each_fabric_cfg(fabric, cfg)
1657			ipr_log_path_elem(hostrcb, cfg);
1658
1659		add_len -= be16_to_cpu(fabric->length);
1660		fabric = (struct ipr_hostrcb_fabric_desc *)
1661			((unsigned long)fabric + be16_to_cpu(fabric->length));
1662	}
1663
1664	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1665}
1666
1667/**
1668 * ipr_log_generic_error - Log an adapter error.
1669 * @ioa_cfg:	ioa config struct
1670 * @hostrcb:	hostrcb struct
1671 *
1672 * Return value:
1673 * 	none
1674 **/
1675static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1676				  struct ipr_hostrcb *hostrcb)
1677{
1678	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1679			 be32_to_cpu(hostrcb->hcam.length));
1680}
1681
1682/**
1683 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1684 * @ioasc:	IOASC
1685 *
1686 * This function will return the index of into the ipr_error_table
1687 * for the specified IOASC. If the IOASC is not in the table,
1688 * 0 will be returned, which points to the entry used for unknown errors.
1689 *
1690 * Return value:
1691 * 	index into the ipr_error_table
1692 **/
1693static u32 ipr_get_error(u32 ioasc)
1694{
1695	int i;
1696
1697	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1698		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1699			return i;
1700
1701	return 0;
1702}
1703
1704/**
1705 * ipr_handle_log_data - Log an adapter error.
1706 * @ioa_cfg:	ioa config struct
1707 * @hostrcb:	hostrcb struct
1708 *
1709 * This function logs an adapter error to the system.
1710 *
1711 * Return value:
1712 * 	none
1713 **/
1714static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1715				struct ipr_hostrcb *hostrcb)
1716{
1717	u32 ioasc;
1718	int error_index;
1719
1720	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1721		return;
1722
1723	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1724		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1725
1726	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1727
1728	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1729	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1730		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1731		scsi_report_bus_reset(ioa_cfg->host,
1732				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1733	}
1734
1735	error_index = ipr_get_error(ioasc);
1736
1737	if (!ipr_error_table[error_index].log_hcam)
1738		return;
1739
1740	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1741
1742	/* Set indication we have logged an error */
1743	ioa_cfg->errors_logged++;
1744
1745	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1746		return;
1747	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1748		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1749
1750	switch (hostrcb->hcam.overlay_id) {
1751	case IPR_HOST_RCB_OVERLAY_ID_2:
1752		ipr_log_cache_error(ioa_cfg, hostrcb);
1753		break;
1754	case IPR_HOST_RCB_OVERLAY_ID_3:
1755		ipr_log_config_error(ioa_cfg, hostrcb);
1756		break;
1757	case IPR_HOST_RCB_OVERLAY_ID_4:
1758	case IPR_HOST_RCB_OVERLAY_ID_6:
1759		ipr_log_array_error(ioa_cfg, hostrcb);
1760		break;
1761	case IPR_HOST_RCB_OVERLAY_ID_7:
1762		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1763		break;
1764	case IPR_HOST_RCB_OVERLAY_ID_12:
1765		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1766		break;
1767	case IPR_HOST_RCB_OVERLAY_ID_13:
1768		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1769		break;
1770	case IPR_HOST_RCB_OVERLAY_ID_14:
1771	case IPR_HOST_RCB_OVERLAY_ID_16:
1772		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1773		break;
1774	case IPR_HOST_RCB_OVERLAY_ID_17:
1775		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1776		break;
1777	case IPR_HOST_RCB_OVERLAY_ID_20:
1778		ipr_log_fabric_error(ioa_cfg, hostrcb);
1779		break;
1780	case IPR_HOST_RCB_OVERLAY_ID_1:
1781	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1782	default:
1783		ipr_log_generic_error(ioa_cfg, hostrcb);
1784		break;
1785	}
1786}
1787
1788/**
1789 * ipr_process_error - Op done function for an adapter error log.
1790 * @ipr_cmd:	ipr command struct
1791 *
1792 * This function is the op done function for an error log host
1793 * controlled async from the adapter. It will log the error and
1794 * send the HCAM back to the adapter.
1795 *
1796 * Return value:
1797 * 	none
1798 **/
1799static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1800{
1801	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1802	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1803	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1804	u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1805
1806	list_del(&hostrcb->queue);
1807	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1808
1809	if (!ioasc) {
1810		ipr_handle_log_data(ioa_cfg, hostrcb);
1811		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1812			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1813	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1814		dev_err(&ioa_cfg->pdev->dev,
1815			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1816	}
1817
1818	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1819}
1820
1821/**
1822 * ipr_timeout -  An internally generated op has timed out.
1823 * @ipr_cmd:	ipr command struct
1824 *
1825 * This function blocks host requests and initiates an
1826 * adapter reset.
1827 *
1828 * Return value:
1829 * 	none
1830 **/
1831static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1832{
1833	unsigned long lock_flags = 0;
1834	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1835
1836	ENTER;
1837	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1838
1839	ioa_cfg->errors_logged++;
1840	dev_err(&ioa_cfg->pdev->dev,
1841		"Adapter being reset due to command timeout.\n");
1842
1843	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1844		ioa_cfg->sdt_state = GET_DUMP;
1845
1846	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1847		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1848
1849	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1850	LEAVE;
1851}
1852
1853/**
1854 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1855 * @ipr_cmd:	ipr command struct
1856 *
1857 * This function blocks host requests and initiates an
1858 * adapter reset.
1859 *
1860 * Return value:
1861 * 	none
1862 **/
1863static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1864{
1865	unsigned long lock_flags = 0;
1866	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1867
1868	ENTER;
1869	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1870
1871	ioa_cfg->errors_logged++;
1872	dev_err(&ioa_cfg->pdev->dev,
1873		"Adapter timed out transitioning to operational.\n");
1874
1875	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1876		ioa_cfg->sdt_state = GET_DUMP;
1877
1878	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1879		if (ipr_fastfail)
1880			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1881		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1882	}
1883
1884	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1885	LEAVE;
1886}
1887
1888/**
1889 * ipr_reset_reload - Reset/Reload the IOA
1890 * @ioa_cfg:		ioa config struct
1891 * @shutdown_type:	shutdown type
1892 *
1893 * This function resets the adapter and re-initializes it.
1894 * This function assumes that all new host commands have been stopped.
1895 * Return value:
1896 * 	SUCCESS / FAILED
1897 **/
1898static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1899			    enum ipr_shutdown_type shutdown_type)
1900{
1901	if (!ioa_cfg->in_reset_reload)
1902		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1903
1904	spin_unlock_irq(ioa_cfg->host->host_lock);
1905	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1906	spin_lock_irq(ioa_cfg->host->host_lock);
1907
1908	/* If we got hit with a host reset while we were already resetting
1909	 the adapter for some reason, and the reset failed. */
1910	if (ioa_cfg->ioa_is_dead) {
1911		ipr_trace;
1912		return FAILED;
1913	}
1914
1915	return SUCCESS;
1916}
1917
1918/**
1919 * ipr_find_ses_entry - Find matching SES in SES table
1920 * @res:	resource entry struct of SES
1921 *
1922 * Return value:
1923 * 	pointer to SES table entry / NULL on failure
1924 **/
1925static const struct ipr_ses_table_entry *
1926ipr_find_ses_entry(struct ipr_resource_entry *res)
1927{
1928	int i, j, matches;
1929	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1930
1931	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1932		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1933			if (ste->compare_product_id_byte[j] == 'X') {
1934				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1935					matches++;
1936				else
1937					break;
1938			} else
1939				matches++;
1940		}
1941
1942		if (matches == IPR_PROD_ID_LEN)
1943			return ste;
1944	}
1945
1946	return NULL;
1947}
1948
1949/**
1950 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1951 * @ioa_cfg:	ioa config struct
1952 * @bus:		SCSI bus
1953 * @bus_width:	bus width
1954 *
1955 * Return value:
1956 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1957 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1958 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1959 *	max 160MHz = max 320MB/sec).
1960 **/
1961static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1962{
1963	struct ipr_resource_entry *res;
1964	const struct ipr_ses_table_entry *ste;
1965	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1966
1967	/* Loop through each config table entry in the config table buffer */
1968	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1969		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1970			continue;
1971
1972		if (bus != res->cfgte.res_addr.bus)
1973			continue;
1974
1975		if (!(ste = ipr_find_ses_entry(res)))
1976			continue;
1977
1978		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1979	}
1980
1981	return max_xfer_rate;
1982}
1983
1984/**
1985 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1986 * @ioa_cfg:		ioa config struct
1987 * @max_delay:		max delay in micro-seconds to wait
1988 *
1989 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1990 *
1991 * Return value:
1992 * 	0 on success / other on failure
1993 **/
1994static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1995{
1996	volatile u32 pcii_reg;
1997	int delay = 1;
1998
1999	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2000	while (delay < max_delay) {
2001		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2002
2003		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2004			return 0;
2005
2006		/* udelay cannot be used if delay is more than a few milliseconds */
2007		if ((delay / 1000) > MAX_UDELAY_MS)
2008			mdelay(delay / 1000);
2009		else
2010			udelay(delay);
2011
2012		delay += delay;
2013	}
2014	return -EIO;
2015}
2016
2017/**
2018 * ipr_get_ldump_data_section - Dump IOA memory
2019 * @ioa_cfg:			ioa config struct
2020 * @start_addr:			adapter address to dump
2021 * @dest:				destination kernel buffer
2022 * @length_in_words:	length to dump in 4 byte words
2023 *
2024 * Return value:
2025 * 	0 on success / -EIO on failure
2026 **/
2027static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2028				      u32 start_addr,
2029				      __be32 *dest, u32 length_in_words)
2030{
2031	volatile u32 temp_pcii_reg;
2032	int i, delay = 0;
2033
2034	/* Write IOA interrupt reg starting LDUMP state  */
2035	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2036	       ioa_cfg->regs.set_uproc_interrupt_reg);
2037
2038	/* Wait for IO debug acknowledge */
2039	if (ipr_wait_iodbg_ack(ioa_cfg,
2040			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2041		dev_err(&ioa_cfg->pdev->dev,
2042			"IOA dump long data transfer timeout\n");
2043		return -EIO;
2044	}
2045
2046	/* Signal LDUMP interlocked - clear IO debug ack */
2047	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2048	       ioa_cfg->regs.clr_interrupt_reg);
2049
2050	/* Write Mailbox with starting address */
2051	writel(start_addr, ioa_cfg->ioa_mailbox);
2052
2053	/* Signal address valid - clear IOA Reset alert */
2054	writel(IPR_UPROCI_RESET_ALERT,
2055	       ioa_cfg->regs.clr_uproc_interrupt_reg);
2056
2057	for (i = 0; i < length_in_words; i++) {
2058		/* Wait for IO debug acknowledge */
2059		if (ipr_wait_iodbg_ack(ioa_cfg,
2060				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2061			dev_err(&ioa_cfg->pdev->dev,
2062				"IOA dump short data transfer timeout\n");
2063			return -EIO;
2064		}
2065
2066		/* Read data from mailbox and increment destination pointer */
2067		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2068		dest++;
2069
2070		/* For all but the last word of data, signal data received */
2071		if (i < (length_in_words - 1)) {
2072			/* Signal dump data received - Clear IO debug Ack */
2073			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2074			       ioa_cfg->regs.clr_interrupt_reg);
2075		}
2076	}
2077
2078	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2079	writel(IPR_UPROCI_RESET_ALERT,
2080	       ioa_cfg->regs.set_uproc_interrupt_reg);
2081
2082	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2083	       ioa_cfg->regs.clr_uproc_interrupt_reg);
2084
2085	/* Signal dump data received - Clear IO debug Ack */
2086	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2087	       ioa_cfg->regs.clr_interrupt_reg);
2088
2089	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2090	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2091		temp_pcii_reg =
2092		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2093
2094		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2095			return 0;
2096
2097		udelay(10);
2098		delay += 10;
2099	}
2100
2101	return 0;
2102}
2103
2104#ifdef CONFIG_SCSI_IPR_DUMP
2105/**
2106 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2107 * @ioa_cfg:		ioa config struct
2108 * @pci_address:	adapter address
2109 * @length:			length of data to copy
2110 *
2111 * Copy data from PCI adapter to kernel buffer.
2112 * Note: length MUST be a 4 byte multiple
2113 * Return value:
2114 * 	0 on success / other on failure
2115 **/
2116static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2117			unsigned long pci_address, u32 length)
2118{
2119	int bytes_copied = 0;
2120	int cur_len, rc, rem_len, rem_page_len;
2121	__be32 *page;
2122	unsigned long lock_flags = 0;
2123	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2124
2125	while (bytes_copied < length &&
2126	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2127		if (ioa_dump->page_offset >= PAGE_SIZE ||
2128		    ioa_dump->page_offset == 0) {
2129			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2130
2131			if (!page) {
2132				ipr_trace;
2133				return bytes_copied;
2134			}
2135
2136			ioa_dump->page_offset = 0;
2137			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2138			ioa_dump->next_page_index++;
2139		} else
2140			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2141
2142		rem_len = length - bytes_copied;
2143		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2144		cur_len = min(rem_len, rem_page_len);
2145
2146		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2147		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2148			rc = -EIO;
2149		} else {
2150			rc = ipr_get_ldump_data_section(ioa_cfg,
2151							pci_address + bytes_copied,
2152							&page[ioa_dump->page_offset / 4],
2153							(cur_len / sizeof(u32)));
2154		}
2155		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2156
2157		if (!rc) {
2158			ioa_dump->page_offset += cur_len;
2159			bytes_copied += cur_len;
2160		} else {
2161			ipr_trace;
2162			break;
2163		}
2164		schedule();
2165	}
2166
2167	return bytes_copied;
2168}
2169
2170/**
2171 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2172 * @hdr:	dump entry header struct
2173 *
2174 * Return value:
2175 * 	nothing
2176 **/
2177static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2178{
2179	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2180	hdr->num_elems = 1;
2181	hdr->offset = sizeof(*hdr);
2182	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2183}
2184
2185/**
2186 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2187 * @ioa_cfg:	ioa config struct
2188 * @driver_dump:	driver dump struct
2189 *
2190 * Return value:
2191 * 	nothing
2192 **/
2193static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2194				   struct ipr_driver_dump *driver_dump)
2195{
2196	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2197
2198	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2199	driver_dump->ioa_type_entry.hdr.len =
2200		sizeof(struct ipr_dump_ioa_type_entry) -
2201		sizeof(struct ipr_dump_entry_header);
2202	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2203	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2204	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2205	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2206		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2207		ucode_vpd->minor_release[1];
2208	driver_dump->hdr.num_entries++;
2209}
2210
2211/**
2212 * ipr_dump_version_data - Fill in the driver version in the dump.
2213 * @ioa_cfg:	ioa config struct
2214 * @driver_dump:	driver dump struct
2215 *
2216 * Return value:
2217 * 	nothing
2218 **/
2219static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2220				  struct ipr_driver_dump *driver_dump)
2221{
2222	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2223	driver_dump->version_entry.hdr.len =
2224		sizeof(struct ipr_dump_version_entry) -
2225		sizeof(struct ipr_dump_entry_header);
2226	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2227	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2228	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2229	driver_dump->hdr.num_entries++;
2230}
2231
2232/**
2233 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2234 * @ioa_cfg:	ioa config struct
2235 * @driver_dump:	driver dump struct
2236 *
2237 * Return value:
2238 * 	nothing
2239 **/
2240static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2241				   struct ipr_driver_dump *driver_dump)
2242{
2243	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2244	driver_dump->trace_entry.hdr.len =
2245		sizeof(struct ipr_dump_trace_entry) -
2246		sizeof(struct ipr_dump_entry_header);
2247	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2248	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2249	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2250	driver_dump->hdr.num_entries++;
2251}
2252
2253/**
2254 * ipr_dump_location_data - Fill in the IOA location in the dump.
2255 * @ioa_cfg:	ioa config struct
2256 * @driver_dump:	driver dump struct
2257 *
2258 * Return value:
2259 * 	nothing
2260 **/
2261static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2262				   struct ipr_driver_dump *driver_dump)
2263{
2264	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2265	driver_dump->location_entry.hdr.len =
2266		sizeof(struct ipr_dump_location_entry) -
2267		sizeof(struct ipr_dump_entry_header);
2268	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2269	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2270	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2271	driver_dump->hdr.num_entries++;
2272}
2273
2274/**
2275 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2276 * @ioa_cfg:	ioa config struct
2277 * @dump:		dump struct
2278 *
2279 * Return value:
2280 * 	nothing
2281 **/
2282static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2283{
2284	unsigned long start_addr, sdt_word;
2285	unsigned long lock_flags = 0;
2286	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2287	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2288	u32 num_entries, start_off, end_off;
2289	u32 bytes_to_copy, bytes_copied, rc;
2290	struct ipr_sdt *sdt;
2291	int i;
2292
2293	ENTER;
2294
2295	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2296
2297	if (ioa_cfg->sdt_state != GET_DUMP) {
2298		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2299		return;
2300	}
2301
2302	start_addr = readl(ioa_cfg->ioa_mailbox);
2303
2304	if (!ipr_sdt_is_fmt2(start_addr)) {
2305		dev_err(&ioa_cfg->pdev->dev,
2306			"Invalid dump table format: %lx\n", start_addr);
2307		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2308		return;
2309	}
2310
2311	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2312
2313	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2314
2315	/* Initialize the overall dump header */
2316	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2317	driver_dump->hdr.num_entries = 1;
2318	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2319	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2320	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2321	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2322
2323	ipr_dump_version_data(ioa_cfg, driver_dump);
2324	ipr_dump_location_data(ioa_cfg, driver_dump);
2325	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2326	ipr_dump_trace_data(ioa_cfg, driver_dump);
2327
2328	/* Update dump_header */
2329	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2330
2331	/* IOA Dump entry */
2332	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2333	ioa_dump->format = IPR_SDT_FMT2;
2334	ioa_dump->hdr.len = 0;
2335	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2336	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2337
2338	/* First entries in sdt are actually a list of dump addresses and
2339	 lengths to gather the real dump data.  sdt represents the pointer
2340	 to the ioa generated dump table.  Dump data will be extracted based
2341	 on entries in this table */
2342	sdt = &ioa_dump->sdt;
2343
2344	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2345					sizeof(struct ipr_sdt) / sizeof(__be32));
2346
2347	/* Smart Dump table is ready to use and the first entry is valid */
2348	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2349		dev_err(&ioa_cfg->pdev->dev,
2350			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2351			rc, be32_to_cpu(sdt->hdr.state));
2352		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2353		ioa_cfg->sdt_state = DUMP_OBTAINED;
2354		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2355		return;
2356	}
2357
2358	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2359
2360	if (num_entries > IPR_NUM_SDT_ENTRIES)
2361		num_entries = IPR_NUM_SDT_ENTRIES;
2362
2363	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2364
2365	for (i = 0; i < num_entries; i++) {
2366		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2367			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2368			break;
2369		}
2370
2371		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2372			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2373			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2374			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2375
2376			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2377				bytes_to_copy = end_off - start_off;
2378				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2379					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2380					continue;
2381				}
2382
2383				/* Copy data from adapter to driver buffers */
2384				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2385							    bytes_to_copy);
2386
2387				ioa_dump->hdr.len += bytes_copied;
2388
2389				if (bytes_copied != bytes_to_copy) {
2390					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2391					break;
2392				}
2393			}
2394		}
2395	}
2396
2397	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2398
2399	/* Update dump_header */
2400	driver_dump->hdr.len += ioa_dump->hdr.len;
2401	wmb();
2402	ioa_cfg->sdt_state = DUMP_OBTAINED;
2403	LEAVE;
2404}
2405
2406#else
2407#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2408#endif
2409
2410/**
2411 * ipr_release_dump - Free adapter dump memory
2412 * @kref:	kref struct
2413 *
2414 * Return value:
2415 *	nothing
2416 **/
2417static void ipr_release_dump(struct kref *kref)
2418{
2419	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2420	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2421	unsigned long lock_flags = 0;
2422	int i;
2423
2424	ENTER;
2425	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2426	ioa_cfg->dump = NULL;
2427	ioa_cfg->sdt_state = INACTIVE;
2428	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2429
2430	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2431		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2432
2433	kfree(dump);
2434	LEAVE;
2435}
2436
2437/**
2438 * ipr_worker_thread - Worker thread
2439 * @work:		ioa config struct
2440 *
2441 * Called at task level from a work thread. This function takes care
2442 * of adding and removing device from the mid-layer as configuration
2443 * changes are detected by the adapter.
2444 *
2445 * Return value:
2446 * 	nothing
2447 **/
2448static void ipr_worker_thread(struct work_struct *work)
2449{
2450	unsigned long lock_flags;
2451	struct ipr_resource_entry *res;
2452	struct scsi_device *sdev;
2453	struct ipr_dump *dump;
2454	struct ipr_ioa_cfg *ioa_cfg =
2455		container_of(work, struct ipr_ioa_cfg, work_q);
2456	u8 bus, target, lun;
2457	int did_work;
2458
2459	ENTER;
2460	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2461
2462	if (ioa_cfg->sdt_state == GET_DUMP) {
2463		dump = ioa_cfg->dump;
2464		if (!dump) {
2465			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2466			return;
2467		}
2468		kref_get(&dump->kref);
2469		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2470		ipr_get_ioa_dump(ioa_cfg, dump);
2471		kref_put(&dump->kref, ipr_release_dump);
2472
2473		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2474		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2475			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2476		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2477		return;
2478	}
2479
2480restart:
2481	do {
2482		did_work = 0;
2483		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2484			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2485			return;
2486		}
2487
2488		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2489			if (res->del_from_ml && res->sdev) {
2490				did_work = 1;
2491				sdev = res->sdev;
2492				if (!scsi_device_get(sdev)) {
2493					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2494					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2495					scsi_remove_device(sdev);
2496					scsi_device_put(sdev);
2497					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2498				}
2499				break;
2500			}
2501		}
2502	} while(did_work);
2503
2504	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2505		if (res->add_to_ml) {
2506			bus = res->cfgte.res_addr.bus;
2507			target = res->cfgte.res_addr.target;
2508			lun = res->cfgte.res_addr.lun;
2509			res->add_to_ml = 0;
2510			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511			scsi_add_device(ioa_cfg->host, bus, target, lun);
2512			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2513			goto restart;
2514		}
2515	}
2516
2517	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2518	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
2519	LEAVE;
2520}
2521
2522#ifdef CONFIG_SCSI_IPR_TRACE
2523/**
2524 * ipr_read_trace - Dump the adapter trace
2525 * @kobj:		kobject struct
2526 * @bin_attr:		bin_attribute struct
2527 * @buf:		buffer
2528 * @off:		offset
2529 * @count:		buffer size
2530 *
2531 * Return value:
2532 *	number of bytes printed to buffer
2533 **/
2534static ssize_t ipr_read_trace(struct kobject *kobj,
2535			      struct bin_attribute *bin_attr,
2536			      char *buf, loff_t off, size_t count)
2537{
2538	struct device *dev = container_of(kobj, struct device, kobj);
2539	struct Scsi_Host *shost = class_to_shost(dev);
2540	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2541	unsigned long lock_flags = 0;
2542	ssize_t ret;
2543
2544	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2545	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
2546				IPR_TRACE_SIZE);
2547	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2548
2549	return ret;
2550}
2551
2552static struct bin_attribute ipr_trace_attr = {
2553	.attr =	{
2554		.name = "trace",
2555		.mode = S_IRUGO,
2556	},
2557	.size = 0,
2558	.read = ipr_read_trace,
2559};
2560#endif
2561
2562static const struct {
2563	enum ipr_cache_state state;
2564	char *name;
2565} cache_state [] = {
2566	{ CACHE_NONE, "none" },
2567	{ CACHE_DISABLED, "disabled" },
2568	{ CACHE_ENABLED, "enabled" }
2569};
2570
2571/**
2572 * ipr_show_write_caching - Show the write caching attribute
2573 * @dev:	device struct
2574 * @buf:	buffer
2575 *
2576 * Return value:
2577 *	number of bytes printed to buffer
2578 **/
2579static ssize_t ipr_show_write_caching(struct device *dev,
2580				      struct device_attribute *attr, char *buf)
2581{
2582	struct Scsi_Host *shost = class_to_shost(dev);
2583	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2584	unsigned long lock_flags = 0;
2585	int i, len = 0;
2586
2587	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2588	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2589		if (cache_state[i].state == ioa_cfg->cache_state) {
2590			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2591			break;
2592		}
2593	}
2594	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2595	return len;
2596}
2597
2598
2599/**
2600 * ipr_store_write_caching - Enable/disable adapter write cache
2601 * @dev:	device struct
2602 * @buf:	buffer
2603 * @count:	buffer size
2604 *
2605 * This function will enable/disable adapter write cache.
2606 *
2607 * Return value:
2608 * 	count on success / other on failure
2609 **/
2610static ssize_t ipr_store_write_caching(struct device *dev,
2611				       struct device_attribute *attr,
2612				       const char *buf, size_t count)
2613{
2614	struct Scsi_Host *shost = class_to_shost(dev);
2615	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2616	unsigned long lock_flags = 0;
2617	enum ipr_cache_state new_state = CACHE_INVALID;
2618	int i;
2619
2620	if (!capable(CAP_SYS_ADMIN))
2621		return -EACCES;
2622	if (ioa_cfg->cache_state == CACHE_NONE)
2623		return -EINVAL;
2624
2625	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2626		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2627			new_state = cache_state[i].state;
2628			break;
2629		}
2630	}
2631
2632	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2633		return -EINVAL;
2634
2635	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2636	if (ioa_cfg->cache_state == new_state) {
2637		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2638		return count;
2639	}
2640
2641	ioa_cfg->cache_state = new_state;
2642	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2643		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2644	if (!ioa_cfg->in_reset_reload)
2645		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2646	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2647	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2648
2649	return count;
2650}
2651
2652static struct device_attribute ipr_ioa_cache_attr = {
2653	.attr = {
2654		.name =		"write_cache",
2655		.mode =		S_IRUGO | S_IWUSR,
2656	},
2657	.show = ipr_show_write_caching,
2658	.store = ipr_store_write_caching
2659};
2660
2661/**
2662 * ipr_show_fw_version - Show the firmware version
2663 * @dev:	class device struct
2664 * @buf:	buffer
2665 *
2666 * Return value:
2667 *	number of bytes printed to buffer
2668 **/
2669static ssize_t ipr_show_fw_version(struct device *dev,
2670				   struct device_attribute *attr, char *buf)
2671{
2672	struct Scsi_Host *shost = class_to_shost(dev);
2673	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2674	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2675	unsigned long lock_flags = 0;
2676	int len;
2677
2678	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2679	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2680		       ucode_vpd->major_release, ucode_vpd->card_type,
2681		       ucode_vpd->minor_release[0],
2682		       ucode_vpd->minor_release[1]);
2683	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2684	return len;
2685}
2686
2687static struct device_attribute ipr_fw_version_attr = {
2688	.attr = {
2689		.name =		"fw_version",
2690		.mode =		S_IRUGO,
2691	},
2692	.show = ipr_show_fw_version,
2693};
2694
2695/**
2696 * ipr_show_log_level - Show the adapter's error logging level
2697 * @dev:	class device struct
2698 * @buf:	buffer
2699 *
2700 * Return value:
2701 * 	number of bytes printed to buffer
2702 **/
2703static ssize_t ipr_show_log_level(struct device *dev,
2704				   struct device_attribute *attr, char *buf)
2705{
2706	struct Scsi_Host *shost = class_to_shost(dev);
2707	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2708	unsigned long lock_flags = 0;
2709	int len;
2710
2711	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2712	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2713	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2714	return len;
2715}
2716
2717/**
2718 * ipr_store_log_level - Change the adapter's error logging level
2719 * @dev:	class device struct
2720 * @buf:	buffer
2721 *
2722 * Return value:
2723 * 	number of bytes printed to buffer
2724 **/
2725static ssize_t ipr_store_log_level(struct device *dev,
2726			           struct device_attribute *attr,
2727				   const char *buf, size_t count)
2728{
2729	struct Scsi_Host *shost = class_to_shost(dev);
2730	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2731	unsigned long lock_flags = 0;
2732
2733	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2734	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2735	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2736	return strlen(buf);
2737}
2738
2739static struct device_attribute ipr_log_level_attr = {
2740	.attr = {
2741		.name =		"log_level",
2742		.mode =		S_IRUGO | S_IWUSR,
2743	},
2744	.show = ipr_show_log_level,
2745	.store = ipr_store_log_level
2746};
2747
2748/**
2749 * ipr_store_diagnostics - IOA Diagnostics interface
2750 * @dev:	device struct
2751 * @buf:	buffer
2752 * @count:	buffer size
2753 *
2754 * This function will reset the adapter and wait a reasonable
2755 * amount of time for any errors that the adapter might log.
2756 *
2757 * Return value:
2758 * 	count on success / other on failure
2759 **/
2760static ssize_t ipr_store_diagnostics(struct device *dev,
2761				     struct device_attribute *attr,
2762				     const char *buf, size_t count)
2763{
2764	struct Scsi_Host *shost = class_to_shost(dev);
2765	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2766	unsigned long lock_flags = 0;
2767	int rc = count;
2768
2769	if (!capable(CAP_SYS_ADMIN))
2770		return -EACCES;
2771
2772	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2773	while(ioa_cfg->in_reset_reload) {
2774		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2775		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2776		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2777	}
2778
2779	ioa_cfg->errors_logged = 0;
2780	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2781
2782	if (ioa_cfg->in_reset_reload) {
2783		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2784		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2785
2786		/* Wait for a second for any errors to be logged */
2787		msleep(1000);
2788	} else {
2789		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2790		return -EIO;
2791	}
2792
2793	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2794	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2795		rc = -EIO;
2796	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2797
2798	return rc;
2799}
2800
2801static struct device_attribute ipr_diagnostics_attr = {
2802	.attr = {
2803		.name =		"run_diagnostics",
2804		.mode =		S_IWUSR,
2805	},
2806	.store = ipr_store_diagnostics
2807};
2808
2809/**
2810 * ipr_show_adapter_state - Show the adapter's state
2811 * @class_dev:	device struct
2812 * @buf:	buffer
2813 *
2814 * Return value:
2815 * 	number of bytes printed to buffer
2816 **/
2817static ssize_t ipr_show_adapter_state(struct device *dev,
2818				      struct device_attribute *attr, char *buf)
2819{
2820	struct Scsi_Host *shost = class_to_shost(dev);
2821	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2822	unsigned long lock_flags = 0;
2823	int len;
2824
2825	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2826	if (ioa_cfg->ioa_is_dead)
2827		len = snprintf(buf, PAGE_SIZE, "offline\n");
2828	else
2829		len = snprintf(buf, PAGE_SIZE, "online\n");
2830	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2831	return len;
2832}
2833
2834/**
2835 * ipr_store_adapter_state - Change adapter state
2836 * @dev:	device struct
2837 * @buf:	buffer
2838 * @count:	buffer size
2839 *
2840 * This function will change the adapter's state.
2841 *
2842 * Return value:
2843 * 	count on success / other on failure
2844 **/
2845static ssize_t ipr_store_adapter_state(struct device *dev,
2846				       struct device_attribute *attr,
2847				       const char *buf, size_t count)
2848{
2849	struct Scsi_Host *shost = class_to_shost(dev);
2850	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2851	unsigned long lock_flags;
2852	int result = count;
2853
2854	if (!capable(CAP_SYS_ADMIN))
2855		return -EACCES;
2856
2857	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2858	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2859		ioa_cfg->ioa_is_dead = 0;
2860		ioa_cfg->reset_retries = 0;
2861		ioa_cfg->in_ioa_bringdown = 0;
2862		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2863	}
2864	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2865	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2866
2867	return result;
2868}
2869
2870static struct device_attribute ipr_ioa_state_attr = {
2871	.attr = {
2872		.name =		"online_state",
2873		.mode =		S_IRUGO | S_IWUSR,
2874	},
2875	.show = ipr_show_adapter_state,
2876	.store = ipr_store_adapter_state
2877};
2878
2879/**
2880 * ipr_store_reset_adapter - Reset the adapter
2881 * @dev:	device struct
2882 * @buf:	buffer
2883 * @count:	buffer size
2884 *
2885 * This function will reset the adapter.
2886 *
2887 * Return value:
2888 * 	count on success / other on failure
2889 **/
2890static ssize_t ipr_store_reset_adapter(struct device *dev,
2891				       struct device_attribute *attr,
2892				       const char *buf, size_t count)
2893{
2894	struct Scsi_Host *shost = class_to_shost(dev);
2895	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2896	unsigned long lock_flags;
2897	int result = count;
2898
2899	if (!capable(CAP_SYS_ADMIN))
2900		return -EACCES;
2901
2902	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2903	if (!ioa_cfg->in_reset_reload)
2904		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2905	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2906	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2907
2908	return result;
2909}
2910
2911static struct device_attribute ipr_ioa_reset_attr = {
2912	.attr = {
2913		.name =		"reset_host",
2914		.mode =		S_IWUSR,
2915	},
2916	.store = ipr_store_reset_adapter
2917};
2918
2919/**
2920 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2921 * @buf_len:		buffer length
2922 *
2923 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2924 * list to use for microcode download
2925 *
2926 * Return value:
2927 * 	pointer to sglist / NULL on failure
2928 **/
2929static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2930{
2931	int sg_size, order, bsize_elem, num_elem, i, j;
2932	struct ipr_sglist *sglist;
2933	struct scatterlist *scatterlist;
2934	struct page *page;
2935
2936	/* Get the minimum size per scatter/gather element */
2937	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2938
2939	/* Get the actual size per element */
2940	order = get_order(sg_size);
2941
2942	/* Determine the actual number of bytes per element */
2943	bsize_elem = PAGE_SIZE * (1 << order);
2944
2945	/* Determine the actual number of sg entries needed */
2946	if (buf_len % bsize_elem)
2947		num_elem = (buf_len / bsize_elem) + 1;
2948	else
2949		num_elem = buf_len / bsize_elem;
2950
2951	/* Allocate a scatter/gather list for the DMA */
2952	sglist = kzalloc(sizeof(struct ipr_sglist) +
2953			 (sizeof(struct scatterlist) * (num_elem - 1)),
2954			 GFP_KERNEL);
2955
2956	if (sglist == NULL) {
2957		ipr_trace;
2958		return NULL;
2959	}
2960
2961	scatterlist = sglist->scatterlist;
2962	sg_init_table(scatterlist, num_elem);
2963
2964	sglist->order = order;
2965	sglist->num_sg = num_elem;
2966
2967	/* Allocate a bunch of sg elements */
2968	for (i = 0; i < num_elem; i++) {
2969		page = alloc_pages(GFP_KERNEL, order);
2970		if (!page) {
2971			ipr_trace;
2972
2973			/* Free up what we already allocated */
2974			for (j = i - 1; j >= 0; j--)
2975				__free_pages(sg_page(&scatterlist[j]), order);
2976			kfree(sglist);
2977			return NULL;
2978		}
2979
2980		sg_set_page(&scatterlist[i], page, 0, 0);
2981	}
2982
2983	return sglist;
2984}
2985
2986/**
2987 * ipr_free_ucode_buffer - Frees a microcode download buffer
2988 * @p_dnld:		scatter/gather list pointer
2989 *
2990 * Free a DMA'able ucode download buffer previously allocated with
2991 * ipr_alloc_ucode_buffer
2992 *
2993 * Return value:
2994 * 	nothing
2995 **/
2996static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2997{
2998	int i;
2999
3000	for (i = 0; i < sglist->num_sg; i++)
3001		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3002
3003	kfree(sglist);
3004}
3005
3006/**
3007 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3008 * @sglist:		scatter/gather list pointer
3009 * @buffer:		buffer pointer
3010 * @len:		buffer length
3011 *
3012 * Copy a microcode image from a user buffer into a buffer allocated by
3013 * ipr_alloc_ucode_buffer
3014 *
3015 * Return value:
3016 * 	0 on success / other on failure
3017 **/
3018static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3019				 u8 *buffer, u32 len)
3020{
3021	int bsize_elem, i, result = 0;
3022	struct scatterlist *scatterlist;
3023	void *kaddr;
3024
3025	/* Determine the actual number of bytes per element */
3026	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3027
3028	scatterlist = sglist->scatterlist;
3029
3030	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3031		struct page *page = sg_page(&scatterlist[i]);
3032
3033		kaddr = kmap(page);
3034		memcpy(kaddr, buffer, bsize_elem);
3035		kunmap(page);
3036
3037		scatterlist[i].length = bsize_elem;
3038
3039		if (result != 0) {
3040			ipr_trace;
3041			return result;
3042		}
3043	}
3044
3045	if (len % bsize_elem) {
3046		struct page *page = sg_page(&scatterlist[i]);
3047
3048		kaddr = kmap(page);
3049		memcpy(kaddr, buffer, len % bsize_elem);
3050		kunmap(page);
3051
3052		scatterlist[i].length = len % bsize_elem;
3053	}
3054
3055	sglist->buffer_len = len;
3056	return result;
3057}
3058
3059/**
3060 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3061 * @ipr_cmd:		ipr command struct
3062 * @sglist:		scatter/gather list
3063 *
3064 * Builds a microcode download IOA data list (IOADL).
3065 *
3066 **/
3067static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3068				    struct ipr_sglist *sglist)
3069{
3070	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3071	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3072	struct scatterlist *scatterlist = sglist->scatterlist;
3073	int i;
3074
3075	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3076	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3077	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3078
3079	ioarcb->ioadl_len =
3080		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3081	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3082		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3083		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3084		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3085	}
3086
3087	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3088}
3089
3090/**
3091 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3092 * @ipr_cmd:	ipr command struct
3093 * @sglist:		scatter/gather list
3094 *
3095 * Builds a microcode download IOA data list (IOADL).
3096 *
3097 **/
3098static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3099				  struct ipr_sglist *sglist)
3100{
3101	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3102	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3103	struct scatterlist *scatterlist = sglist->scatterlist;
3104	int i;
3105
3106	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3107	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3108	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3109
3110	ioarcb->ioadl_len =
3111		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3112
3113	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3114		ioadl[i].flags_and_data_len =
3115			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3116		ioadl[i].address =
3117			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3118	}
3119
3120	ioadl[i-1].flags_and_data_len |=
3121		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3122}
3123
3124/**
3125 * ipr_update_ioa_ucode - Update IOA's microcode
3126 * @ioa_cfg:	ioa config struct
3127 * @sglist:		scatter/gather list
3128 *
3129 * Initiate an adapter reset to update the IOA's microcode
3130 *
3131 * Return value:
3132 * 	0 on success / -EIO on failure
3133 **/
3134static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3135				struct ipr_sglist *sglist)
3136{
3137	unsigned long lock_flags;
3138
3139	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3140	while(ioa_cfg->in_reset_reload) {
3141		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3143		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3144	}
3145
3146	if (ioa_cfg->ucode_sglist) {
3147		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148		dev_err(&ioa_cfg->pdev->dev,
3149			"Microcode download already in progress\n");
3150		return -EIO;
3151	}
3152
3153	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3154					sglist->num_sg, DMA_TO_DEVICE);
3155
3156	if (!sglist->num_dma_sg) {
3157		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158		dev_err(&ioa_cfg->pdev->dev,
3159			"Failed to map microcode download buffer!\n");
3160		return -EIO;
3161	}
3162
3163	ioa_cfg->ucode_sglist = sglist;
3164	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3165	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3166	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3167
3168	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3169	ioa_cfg->ucode_sglist = NULL;
3170	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3171	return 0;
3172}
3173
3174/**
3175 * ipr_store_update_fw - Update the firmware on the adapter
3176 * @class_dev:	device struct
3177 * @buf:	buffer
3178 * @count:	buffer size
3179 *
3180 * This function will update the firmware on the adapter.
3181 *
3182 * Return value:
3183 * 	count on success / other on failure
3184 **/
3185static ssize_t ipr_store_update_fw(struct device *dev,
3186				   struct device_attribute *attr,
3187				   const char *buf, size_t count)
3188{
3189	struct Scsi_Host *shost = class_to_shost(dev);
3190	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3191	struct ipr_ucode_image_header *image_hdr;
3192	const struct firmware *fw_entry;
3193	struct ipr_sglist *sglist;
3194	char fname[100];
3195	char *src;
3196	int len, result, dnld_size;
3197
3198	if (!capable(CAP_SYS_ADMIN))
3199		return -EACCES;
3200
3201	len = snprintf(fname, 99, "%s", buf);
3202	fname[len-1] = '\0';
3203
3204	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3205		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3206		return -EIO;
3207	}
3208
3209	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3210
3211	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3212	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3213	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3214		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3215		release_firmware(fw_entry);
3216		return -EINVAL;
3217	}
3218
3219	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3220	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3221	sglist = ipr_alloc_ucode_buffer(dnld_size);
3222
3223	if (!sglist) {
3224		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3225		release_firmware(fw_entry);
3226		return -ENOMEM;
3227	}
3228
3229	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3230
3231	if (result) {
3232		dev_err(&ioa_cfg->pdev->dev,
3233			"Microcode buffer copy to DMA buffer failed\n");
3234		goto out;
3235	}
3236
3237	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3238
3239	if (!result)
3240		result = count;
3241out:
3242	ipr_free_ucode_buffer(sglist);
3243	release_firmware(fw_entry);
3244	return result;
3245}
3246
3247static struct device_attribute ipr_update_fw_attr = {
3248	.attr = {
3249		.name =		"update_fw",
3250		.mode =		S_IWUSR,
3251	},
3252	.store = ipr_store_update_fw
3253};
3254
3255static struct device_attribute *ipr_ioa_attrs[] = {
3256	&ipr_fw_version_attr,
3257	&ipr_log_level_attr,
3258	&ipr_diagnostics_attr,
3259	&ipr_ioa_state_attr,
3260	&ipr_ioa_reset_attr,
3261	&ipr_update_fw_attr,
3262	&ipr_ioa_cache_attr,
3263	NULL,
3264};
3265
3266#ifdef CONFIG_SCSI_IPR_DUMP
3267/**
3268 * ipr_read_dump - Dump the adapter
3269 * @kobj:		kobject struct
3270 * @bin_attr:		bin_attribute struct
3271 * @buf:		buffer
3272 * @off:		offset
3273 * @count:		buffer size
3274 *
3275 * Return value:
3276 *	number of bytes printed to buffer
3277 **/
3278static ssize_t ipr_read_dump(struct kobject *kobj,
3279			     struct bin_attribute *bin_attr,
3280			     char *buf, loff_t off, size_t count)
3281{
3282	struct device *cdev = container_of(kobj, struct device, kobj);
3283	struct Scsi_Host *shost = class_to_shost(cdev);
3284	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3285	struct ipr_dump *dump;
3286	unsigned long lock_flags = 0;
3287	char *src;
3288	int len;
3289	size_t rc = count;
3290
3291	if (!capable(CAP_SYS_ADMIN))
3292		return -EACCES;
3293
3294	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295	dump = ioa_cfg->dump;
3296
3297	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3298		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3299		return 0;
3300	}
3301	kref_get(&dump->kref);
3302	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303
3304	if (off > dump->driver_dump.hdr.len) {
3305		kref_put(&dump->kref, ipr_release_dump);
3306		return 0;
3307	}
3308
3309	if (off + count > dump->driver_dump.hdr.len) {
3310		count = dump->driver_dump.hdr.len - off;
3311		rc = count;
3312	}
3313
3314	if (count && off < sizeof(dump->driver_dump)) {
3315		if (off + count > sizeof(dump->driver_dump))
3316			len = sizeof(dump->driver_dump) - off;
3317		else
3318			len = count;
3319		src = (u8 *)&dump->driver_dump + off;
3320		memcpy(buf, src, len);
3321		buf += len;
3322		off += len;
3323		count -= len;
3324	}
3325
3326	off -= sizeof(dump->driver_dump);
3327
3328	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3329		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3330			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3331		else
3332			len = count;
3333		src = (u8 *)&dump->ioa_dump + off;
3334		memcpy(buf, src, len);
3335		buf += len;
3336		off += len;
3337		count -= len;
3338	}
3339
3340	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3341
3342	while (count) {
3343		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3344			len = PAGE_ALIGN(off) - off;
3345		else
3346			len = count;
3347		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3348		src += off & ~PAGE_MASK;
3349		memcpy(buf, src, len);
3350		buf += len;
3351		off += len;
3352		count -= len;
3353	}
3354
3355	kref_put(&dump->kref, ipr_release_dump);
3356	return rc;
3357}
3358
3359/**
3360 * ipr_alloc_dump - Prepare for adapter dump
3361 * @ioa_cfg:	ioa config struct
3362 *
3363 * Return value:
3364 *	0 on success / other on failure
3365 **/
3366static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3367{
3368	struct ipr_dump *dump;
3369	unsigned long lock_flags = 0;
3370
3371	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3372
3373	if (!dump) {
3374		ipr_err("Dump memory allocation failed\n");
3375		return -ENOMEM;
3376	}
3377
3378	kref_init(&dump->kref);
3379	dump->ioa_cfg = ioa_cfg;
3380
3381	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3382
3383	if (INACTIVE != ioa_cfg->sdt_state) {
3384		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385		kfree(dump);
3386		return 0;
3387	}
3388
3389	ioa_cfg->dump = dump;
3390	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3391	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3392		ioa_cfg->dump_taken = 1;
3393		schedule_work(&ioa_cfg->work_q);
3394	}
3395	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3396
3397	return 0;
3398}
3399
3400/**
3401 * ipr_free_dump - Free adapter dump memory
3402 * @ioa_cfg:	ioa config struct
3403 *
3404 * Return value:
3405 *	0 on success / other on failure
3406 **/
3407static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3408{
3409	struct ipr_dump *dump;
3410	unsigned long lock_flags = 0;
3411
3412	ENTER;
3413
3414	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415	dump = ioa_cfg->dump;
3416	if (!dump) {
3417		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3418		return 0;
3419	}
3420
3421	ioa_cfg->dump = NULL;
3422	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3423
3424	kref_put(&dump->kref, ipr_release_dump);
3425
3426	LEAVE;
3427	return 0;
3428}
3429
3430/**
3431 * ipr_write_dump - Setup dump state of adapter
3432 * @kobj:		kobject struct
3433 * @bin_attr:		bin_attribute struct
3434 * @buf:		buffer
3435 * @off:		offset
3436 * @count:		buffer size
3437 *
3438 * Return value:
3439 *	number of bytes printed to buffer
3440 **/
3441static ssize_t ipr_write_dump(struct kobject *kobj,
3442			      struct bin_attribute *bin_attr,
3443			      char *buf, loff_t off, size_t count)
3444{
3445	struct device *cdev = container_of(kobj, struct device, kobj);
3446	struct Scsi_Host *shost = class_to_shost(cdev);
3447	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3448	int rc;
3449
3450	if (!capable(CAP_SYS_ADMIN))
3451		return -EACCES;
3452
3453	if (buf[0] == '1')
3454		rc = ipr_alloc_dump(ioa_cfg);
3455	else if (buf[0] == '0')
3456		rc = ipr_free_dump(ioa_cfg);
3457	else
3458		return -EINVAL;
3459
3460	if (rc)
3461		return rc;
3462	else
3463		return count;
3464}
3465
3466static struct bin_attribute ipr_dump_attr = {
3467	.attr =	{
3468		.name = "dump",
3469		.mode = S_IRUSR | S_IWUSR,
3470	},
3471	.size = 0,
3472	.read = ipr_read_dump,
3473	.write = ipr_write_dump
3474};
3475#else
3476static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3477#endif
3478
3479/**
3480 * ipr_change_queue_depth - Change the device's queue depth
3481 * @sdev:	scsi device struct
3482 * @qdepth:	depth to set
3483 * @reason:	calling context
3484 *
3485 * Return value:
3486 * 	actual depth set
3487 **/
3488static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3489				  int reason)
3490{
3491	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3492	struct ipr_resource_entry *res;
3493	unsigned long lock_flags = 0;
3494
3495	if (reason != SCSI_QDEPTH_DEFAULT)
3496		return -EOPNOTSUPP;
3497
3498	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3499	res = (struct ipr_resource_entry *)sdev->hostdata;
3500
3501	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3502		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3503	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3504
3505	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3506	return sdev->queue_depth;
3507}
3508
3509/**
3510 * ipr_change_queue_type - Change the device's queue type
3511 * @dsev:		scsi device struct
3512 * @tag_type:	type of tags to use
3513 *
3514 * Return value:
3515 * 	actual queue type set
3516 **/
3517static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3518{
3519	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3520	struct ipr_resource_entry *res;
3521	unsigned long lock_flags = 0;
3522
3523	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3524	res = (struct ipr_resource_entry *)sdev->hostdata;
3525
3526	if (res) {
3527		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3528			/*
3529			 * We don't bother quiescing the device here since the
3530			 * adapter firmware does it for us.
3531			 */
3532			scsi_set_tag_type(sdev, tag_type);
3533
3534			if (tag_type)
3535				scsi_activate_tcq(sdev, sdev->queue_depth);
3536			else
3537				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3538		} else
3539			tag_type = 0;
3540	} else
3541		tag_type = 0;
3542
3543	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3544	return tag_type;
3545}
3546
3547/**
3548 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3549 * @dev:	device struct
3550 * @buf:	buffer
3551 *
3552 * Return value:
3553 * 	number of bytes printed to buffer
3554 **/
3555static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3556{
3557	struct scsi_device *sdev = to_scsi_device(dev);
3558	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3559	struct ipr_resource_entry *res;
3560	unsigned long lock_flags = 0;
3561	ssize_t len = -ENXIO;
3562
3563	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3564	res = (struct ipr_resource_entry *)sdev->hostdata;
3565	if (res)
3566		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3567	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3568	return len;
3569}
3570
3571static struct device_attribute ipr_adapter_handle_attr = {
3572	.attr = {
3573		.name = 	"adapter_handle",
3574		.mode =		S_IRUSR,
3575	},
3576	.show = ipr_show_adapter_handle
3577};
3578
3579static struct device_attribute *ipr_dev_attrs[] = {
3580	&ipr_adapter_handle_attr,
3581	NULL,
3582};
3583
3584/**
3585 * ipr_biosparam - Return the HSC mapping
3586 * @sdev:			scsi device struct
3587 * @block_device:	block device pointer
3588 * @capacity:		capacity of the device
3589 * @parm:			Array containing returned HSC values.
3590 *
3591 * This function generates the HSC parms that fdisk uses.
3592 * We want to make sure we return something that places partitions
3593 * on 4k boundaries for best performance with the IOA.
3594 *
3595 * Return value:
3596 * 	0 on success
3597 **/
3598static int ipr_biosparam(struct scsi_device *sdev,
3599			 struct block_device *block_device,
3600			 sector_t capacity, int *parm)
3601{
3602	int heads, sectors;
3603	sector_t cylinders;
3604
3605	heads = 128;
3606	sectors = 32;
3607
3608	cylinders = capacity;
3609	sector_div(cylinders, (128 * 32));
3610
3611	/* return result */
3612	parm[0] = heads;
3613	parm[1] = sectors;
3614	parm[2] = cylinders;
3615
3616	return 0;
3617}
3618
3619/**
3620 * ipr_find_starget - Find target based on bus/target.
3621 * @starget:	scsi target struct
3622 *
3623 * Return value:
3624 * 	resource entry pointer if found / NULL if not found
3625 **/
3626static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3627{
3628	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3629	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3630	struct ipr_resource_entry *res;
3631
3632	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3633		if ((res->cfgte.res_addr.bus == starget->channel) &&
3634		    (res->cfgte.res_addr.target == starget->id) &&
3635		    (res->cfgte.res_addr.lun == 0)) {
3636			return res;
3637		}
3638	}
3639
3640	return NULL;
3641}
3642
3643static struct ata_port_info sata_port_info;
3644
3645/**
3646 * ipr_target_alloc - Prepare for commands to a SCSI target
3647 * @starget:	scsi target struct
3648 *
3649 * If the device is a SATA device, this function allocates an
3650 * ATA port with libata, else it does nothing.
3651 *
3652 * Return value:
3653 * 	0 on success / non-0 on failure
3654 **/
3655static int ipr_target_alloc(struct scsi_target *starget)
3656{
3657	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3658	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3659	struct ipr_sata_port *sata_port;
3660	struct ata_port *ap;
3661	struct ipr_resource_entry *res;
3662	unsigned long lock_flags;
3663
3664	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3665	res = ipr_find_starget(starget);
3666	starget->hostdata = NULL;
3667
3668	if (res && ipr_is_gata(res)) {
3669		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3670		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3671		if (!sata_port)
3672			return -ENOMEM;
3673
3674		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3675		if (ap) {
3676			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3677			sata_port->ioa_cfg = ioa_cfg;
3678			sata_port->ap = ap;
3679			sata_port->res = res;
3680
3681			res->sata_port = sata_port;
3682			ap->private_data = sata_port;
3683			starget->hostdata = sata_port;
3684		} else {
3685			kfree(sata_port);
3686			return -ENOMEM;
3687		}
3688	}
3689	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3690
3691	return 0;
3692}
3693
3694/**
3695 * ipr_target_destroy - Destroy a SCSI target
3696 * @starget:	scsi target struct
3697 *
3698 * If the device was a SATA device, this function frees the libata
3699 * ATA port, else it does nothing.
3700 *
3701 **/
3702static void ipr_target_destroy(struct scsi_target *starget)
3703{
3704	struct ipr_sata_port *sata_port = starget->hostdata;
3705
3706	if (sata_port) {
3707		starget->hostdata = NULL;
3708		ata_sas_port_destroy(sata_port->ap);
3709		kfree(sata_port);
3710	}
3711}
3712
3713/**
3714 * ipr_find_sdev - Find device based on bus/target/lun.
3715 * @sdev:	scsi device struct
3716 *
3717 * Return value:
3718 * 	resource entry pointer if found / NULL if not found
3719 **/
3720static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3721{
3722	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3723	struct ipr_resource_entry *res;
3724
3725	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3726		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3727		    (res->cfgte.res_addr.target == sdev->id) &&
3728		    (res->cfgte.res_addr.lun == sdev->lun))
3729			return res;
3730	}
3731
3732	return NULL;
3733}
3734
3735/**
3736 * ipr_slave_destroy - Unconfigure a SCSI device
3737 * @sdev:	scsi device struct
3738 *
3739 * Return value:
3740 * 	nothing
3741 **/
3742static void ipr_slave_destroy(struct scsi_device *sdev)
3743{
3744	struct ipr_resource_entry *res;
3745	struct ipr_ioa_cfg *ioa_cfg;
3746	unsigned long lock_flags = 0;
3747
3748	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3749
3750	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3751	res = (struct ipr_resource_entry *) sdev->hostdata;
3752	if (res) {
3753		if (res->sata_port)
3754			ata_port_disable(res->sata_port->ap);
3755		sdev->hostdata = NULL;
3756		res->sdev = NULL;
3757		res->sata_port = NULL;
3758	}
3759	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3760}
3761
3762/**
3763 * ipr_slave_configure - Configure a SCSI device
3764 * @sdev:	scsi device struct
3765 *
3766 * This function configures the specified scsi device.
3767 *
3768 * Return value:
3769 * 	0 on success
3770 **/
3771static int ipr_slave_configure(struct scsi_device *sdev)
3772{
3773	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3774	struct ipr_resource_entry *res;
3775	struct ata_port *ap = NULL;
3776	unsigned long lock_flags = 0;
3777
3778	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3779	res = sdev->hostdata;
3780	if (res) {
3781		if (ipr_is_af_dasd_device(res))
3782			sdev->type = TYPE_RAID;
3783		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3784			sdev->scsi_level = 4;
3785			sdev->no_uld_attach = 1;
3786		}
3787		if (ipr_is_vset_device(res)) {
3788			blk_queue_rq_timeout(sdev->request_queue,
3789					     IPR_VSET_RW_TIMEOUT);
3790			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3791		}
3792		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3793			sdev->allow_restart = 1;
3794		if (ipr_is_gata(res) && res->sata_port)
3795			ap = res->sata_port->ap;
3796		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3797
3798		if (ap) {
3799			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3800			ata_sas_slave_configure(sdev, ap);
3801		} else
3802			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3803		return 0;
3804	}
3805	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3806	return 0;
3807}
3808
3809/**
3810 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3811 * @sdev:	scsi device struct
3812 *
3813 * This function initializes an ATA port so that future commands
3814 * sent through queuecommand will work.
3815 *
3816 * Return value:
3817 * 	0 on success
3818 **/
3819static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3820{
3821	struct ipr_sata_port *sata_port = NULL;
3822	int rc = -ENXIO;
3823
3824	ENTER;
3825	if (sdev->sdev_target)
3826		sata_port = sdev->sdev_target->hostdata;
3827	if (sata_port)
3828		rc = ata_sas_port_init(sata_port->ap);
3829	if (rc)
3830		ipr_slave_destroy(sdev);
3831
3832	LEAVE;
3833	return rc;
3834}
3835
3836/**
3837 * ipr_slave_alloc - Prepare for commands to a device.
3838 * @sdev:	scsi device struct
3839 *
3840 * This function saves a pointer to the resource entry
3841 * in the scsi device struct if the device exists. We
3842 * can then use this pointer in ipr_queuecommand when
3843 * handling new commands.
3844 *
3845 * Return value:
3846 * 	0 on success / -ENXIO if device does not exist
3847 **/
3848static int ipr_slave_alloc(struct scsi_device *sdev)
3849{
3850	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3851	struct ipr_resource_entry *res;
3852	unsigned long lock_flags;
3853	int rc = -ENXIO;
3854
3855	sdev->hostdata = NULL;
3856
3857	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3858
3859	res = ipr_find_sdev(sdev);
3860	if (res) {
3861		res->sdev = sdev;
3862		res->add_to_ml = 0;
3863		res->in_erp = 0;
3864		sdev->hostdata = res;
3865		if (!ipr_is_naca_model(res))
3866			res->needs_sync_complete = 1;
3867		rc = 0;
3868		if (ipr_is_gata(res)) {
3869			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3870			return ipr_ata_slave_alloc(sdev);
3871		}
3872	}
3873
3874	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3875
3876	return rc;
3877}
3878
3879/**
3880 * ipr_eh_host_reset - Reset the host adapter
3881 * @scsi_cmd:	scsi command struct
3882 *
3883 * Return value:
3884 * 	SUCCESS / FAILED
3885 **/
3886static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3887{
3888	struct ipr_ioa_cfg *ioa_cfg;
3889	int rc;
3890
3891	ENTER;
3892	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3893
3894	dev_err(&ioa_cfg->pdev->dev,
3895		"Adapter being reset as a result of error recovery.\n");
3896
3897	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3898		ioa_cfg->sdt_state = GET_DUMP;
3899
3900	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3901
3902	LEAVE;
3903	return rc;
3904}
3905
3906static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3907{
3908	int rc;
3909
3910	spin_lock_irq(cmd->device->host->host_lock);
3911	rc = __ipr_eh_host_reset(cmd);
3912	spin_unlock_irq(cmd->device->host->host_lock);
3913
3914	return rc;
3915}
3916
3917/**
3918 * ipr_device_reset - Reset the device
3919 * @ioa_cfg:	ioa config struct
3920 * @res:		resource entry struct
3921 *
3922 * This function issues a device reset to the affected device.
3923 * If the device is a SCSI device, a LUN reset will be sent
3924 * to the device first. If that does not work, a target reset
3925 * will be sent. If the device is a SATA device, a PHY reset will
3926 * be sent.
3927 *
3928 * Return value:
3929 *	0 on success / non-zero on failure
3930 **/
3931static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3932			    struct ipr_resource_entry *res)
3933{
3934	struct ipr_cmnd *ipr_cmd;
3935	struct ipr_ioarcb *ioarcb;
3936	struct ipr_cmd_pkt *cmd_pkt;
3937	struct ipr_ioarcb_ata_regs *regs;
3938	u32 ioasc;
3939
3940	ENTER;
3941	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3942	ioarcb = &ipr_cmd->ioarcb;
3943	cmd_pkt = &ioarcb->cmd_pkt;
3944
3945	if (ipr_cmd->ioa_cfg->sis64) {
3946		regs = &ipr_cmd->i.ata_ioadl.regs;
3947		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
3948	} else
3949		regs = &ioarcb->u.add_data.u.regs;
3950
3951	ioarcb->res_handle = res->cfgte.res_handle;
3952	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3953	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3954	if (ipr_is_gata(res)) {
3955		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3956		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
3957		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3958	}
3959
3960	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3961	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3962	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3963	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3964		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3965		       sizeof(struct ipr_ioasa_gata));
3966
3967	LEAVE;
3968	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3969}
3970
3971/**
3972 * ipr_sata_reset - Reset the SATA port
3973 * @link:	SATA link to reset
3974 * @classes:	class of the attached device
3975 *
3976 * This function issues a SATA phy reset to the affected ATA link.
3977 *
3978 * Return value:
3979 *	0 on success / non-zero on failure
3980 **/
3981static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
3982				unsigned long deadline)
3983{
3984	struct ipr_sata_port *sata_port = link->ap->private_data;
3985	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3986	struct ipr_resource_entry *res;
3987	unsigned long lock_flags = 0;
3988	int rc = -ENXIO;
3989
3990	ENTER;
3991	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3992	while(ioa_cfg->in_reset_reload) {
3993		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3994		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3995		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3996	}
3997
3998	res = sata_port->res;
3999	if (res) {
4000		rc = ipr_device_reset(ioa_cfg, res);
4001		switch(res->cfgte.proto) {
4002		case IPR_PROTO_SATA:
4003		case IPR_PROTO_SAS_STP:
4004			*classes = ATA_DEV_ATA;
4005			break;
4006		case IPR_PROTO_SATA_ATAPI:
4007		case IPR_PROTO_SAS_STP_ATAPI:
4008			*classes = ATA_DEV_ATAPI;
4009			break;
4010		default:
4011			*classes = ATA_DEV_UNKNOWN;
4012			break;
4013		};
4014	}
4015
4016	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4017	LEAVE;
4018	return rc;
4019}
4020
4021/**
4022 * ipr_eh_dev_reset - Reset the device
4023 * @scsi_cmd:	scsi command struct
4024 *
4025 * This function issues a device reset to the affected device.
4026 * A LUN reset will be sent to the device first. If that does
4027 * not work, a target reset will be sent.
4028 *
4029 * Return value:
4030 *	SUCCESS / FAILED
4031 **/
4032static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4033{
4034	struct ipr_cmnd *ipr_cmd;
4035	struct ipr_ioa_cfg *ioa_cfg;
4036	struct ipr_resource_entry *res;
4037	struct ata_port *ap;
4038	int rc = 0;
4039
4040	ENTER;
4041	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4042	res = scsi_cmd->device->hostdata;
4043
4044	if (!res)
4045		return FAILED;
4046
4047	/*
4048	 * If we are currently going through reset/reload, return failed. This will force the
4049	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4050	 * reset to complete
4051	 */
4052	if (ioa_cfg->in_reset_reload)
4053		return FAILED;
4054	if (ioa_cfg->ioa_is_dead)
4055		return FAILED;
4056
4057	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4058		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
4059			if (ipr_cmd->scsi_cmd)
4060				ipr_cmd->done = ipr_scsi_eh_done;
4061			if (ipr_cmd->qc)
4062				ipr_cmd->done = ipr_sata_eh_done;
4063			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4064				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4065				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4066			}
4067		}
4068	}
4069
4070	res->resetting_device = 1;
4071	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4072
4073	if (ipr_is_gata(res) && res->sata_port) {
4074		ap = res->sata_port->ap;
4075		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4076		ata_std_error_handler(ap);
4077		spin_lock_irq(scsi_cmd->device->host->host_lock);
4078
4079		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4080			if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
4081				rc = -EIO;
4082				break;
4083			}
4084		}
4085	} else
4086		rc = ipr_device_reset(ioa_cfg, res);
4087	res->resetting_device = 0;
4088
4089	LEAVE;
4090	return (rc ? FAILED : SUCCESS);
4091}
4092
4093static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4094{
4095	int rc;
4096
4097	spin_lock_irq(cmd->device->host->host_lock);
4098	rc = __ipr_eh_dev_reset(cmd);
4099	spin_unlock_irq(cmd->device->host->host_lock);
4100
4101	return rc;
4102}
4103
4104/**
4105 * ipr_bus_reset_done - Op done function for bus reset.
4106 * @ipr_cmd:	ipr command struct
4107 *
4108 * This function is the op done function for a bus reset
4109 *
4110 * Return value:
4111 * 	none
4112 **/
4113static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4114{
4115	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4116	struct ipr_resource_entry *res;
4117
4118	ENTER;
4119	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4120		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
4121			    sizeof(res->cfgte.res_handle))) {
4122			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
4123			break;
4124		}
4125	}
4126
4127	/*
4128	 * If abort has not completed, indicate the reset has, else call the
4129	 * abort's done function to wake the sleeping eh thread
4130	 */
4131	if (ipr_cmd->sibling->sibling)
4132		ipr_cmd->sibling->sibling = NULL;
4133	else
4134		ipr_cmd->sibling->done(ipr_cmd->sibling);
4135
4136	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4137	LEAVE;
4138}
4139
4140/**
4141 * ipr_abort_timeout - An abort task has timed out
4142 * @ipr_cmd:	ipr command struct
4143 *
4144 * This function handles when an abort task times out. If this
4145 * happens we issue a bus reset since we have resources tied
4146 * up that must be freed before returning to the midlayer.
4147 *
4148 * Return value:
4149 *	none
4150 **/
4151static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4152{
4153	struct ipr_cmnd *reset_cmd;
4154	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4155	struct ipr_cmd_pkt *cmd_pkt;
4156	unsigned long lock_flags = 0;
4157
4158	ENTER;
4159	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4160	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4161		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4162		return;
4163	}
4164
4165	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4166	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4167	ipr_cmd->sibling = reset_cmd;
4168	reset_cmd->sibling = ipr_cmd;
4169	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4170	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4171	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4172	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4173	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4174
4175	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4176	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4177	LEAVE;
4178}
4179
4180/**
4181 * ipr_cancel_op - Cancel specified op
4182 * @scsi_cmd:	scsi command struct
4183 *
4184 * This function cancels specified op.
4185 *
4186 * Return value:
4187 *	SUCCESS / FAILED
4188 **/
4189static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4190{
4191	struct ipr_cmnd *ipr_cmd;
4192	struct ipr_ioa_cfg *ioa_cfg;
4193	struct ipr_resource_entry *res;
4194	struct ipr_cmd_pkt *cmd_pkt;
4195	u32 ioasc;
4196	int op_found = 0;
4197
4198	ENTER;
4199	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4200	res = scsi_cmd->device->hostdata;
4201
4202	/* If we are currently going through reset/reload, return failed.
4203	 * This will force the mid-layer to call ipr_eh_host_reset,
4204	 * which will then go to sleep and wait for the reset to complete
4205	 */
4206	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4207		return FAILED;
4208	if (!res || !ipr_is_gscsi(res))
4209		return FAILED;
4210
4211	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4212		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4213			ipr_cmd->done = ipr_scsi_eh_done;
4214			op_found = 1;
4215			break;
4216		}
4217	}
4218
4219	if (!op_found)
4220		return SUCCESS;
4221
4222	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4223	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4224	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4225	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4226	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4227	ipr_cmd->u.sdev = scsi_cmd->device;
4228
4229	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4230		    scsi_cmd->cmnd[0]);
4231	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4232	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4233
4234	/*
4235	 * If the abort task timed out and we sent a bus reset, we will get
4236	 * one the following responses to the abort
4237	 */
4238	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4239		ioasc = 0;
4240		ipr_trace;
4241	}
4242
4243	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4244	if (!ipr_is_naca_model(res))
4245		res->needs_sync_complete = 1;
4246
4247	LEAVE;
4248	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4249}
4250
4251/**
4252 * ipr_eh_abort - Abort a single op
4253 * @scsi_cmd:	scsi command struct
4254 *
4255 * Return value:
4256 * 	SUCCESS / FAILED
4257 **/
4258static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4259{
4260	unsigned long flags;
4261	int rc;
4262
4263	ENTER;
4264
4265	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4266	rc = ipr_cancel_op(scsi_cmd);
4267	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4268
4269	LEAVE;
4270	return rc;
4271}
4272
4273/**
4274 * ipr_handle_other_interrupt - Handle "other" interrupts
4275 * @ioa_cfg:	ioa config struct
4276 * @int_reg:	interrupt register
4277 *
4278 * Return value:
4279 * 	IRQ_NONE / IRQ_HANDLED
4280 **/
4281static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4282					      volatile u32 int_reg)
4283{
4284	irqreturn_t rc = IRQ_HANDLED;
4285
4286	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4287		/* Mask the interrupt */
4288		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4289
4290		/* Clear the interrupt */
4291		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4292		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4293
4294		list_del(&ioa_cfg->reset_cmd->queue);
4295		del_timer(&ioa_cfg->reset_cmd->timer);
4296		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4297	} else {
4298		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4299			ioa_cfg->ioa_unit_checked = 1;
4300		else
4301			dev_err(&ioa_cfg->pdev->dev,
4302				"Permanent IOA failure. 0x%08X\n", int_reg);
4303
4304		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4305			ioa_cfg->sdt_state = GET_DUMP;
4306
4307		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4308		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4309	}
4310
4311	return rc;
4312}
4313
4314/**
4315 * ipr_isr_eh - Interrupt service routine error handler
4316 * @ioa_cfg:	ioa config struct
4317 * @msg:	message to log
4318 *
4319 * Return value:
4320 * 	none
4321 **/
4322static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4323{
4324	ioa_cfg->errors_logged++;
4325	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4326
4327	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4328		ioa_cfg->sdt_state = GET_DUMP;
4329
4330	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4331}
4332
4333/**
4334 * ipr_isr - Interrupt service routine
4335 * @irq:	irq number
4336 * @devp:	pointer to ioa config struct
4337 *
4338 * Return value:
4339 * 	IRQ_NONE / IRQ_HANDLED
4340 **/
4341static irqreturn_t ipr_isr(int irq, void *devp)
4342{
4343	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4344	unsigned long lock_flags = 0;
4345	volatile u32 int_reg, int_mask_reg;
4346	u32 ioasc;
4347	u16 cmd_index;
4348	int num_hrrq = 0;
4349	struct ipr_cmnd *ipr_cmd;
4350	irqreturn_t rc = IRQ_NONE;
4351
4352	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4353
4354	/* If interrupts are disabled, ignore the interrupt */
4355	if (!ioa_cfg->allow_interrupts) {
4356		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4357		return IRQ_NONE;
4358	}
4359
4360	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4361	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4362
4363	/* If an interrupt on the adapter did not occur, ignore it */
4364	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4365		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4366		return IRQ_NONE;
4367	}
4368
4369	while (1) {
4370		ipr_cmd = NULL;
4371
4372		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4373		       ioa_cfg->toggle_bit) {
4374
4375			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4376				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4377
4378			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4379				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4380				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4381				return IRQ_HANDLED;
4382			}
4383
4384			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4385
4386			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4387
4388			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4389
4390			list_del(&ipr_cmd->queue);
4391			del_timer(&ipr_cmd->timer);
4392			ipr_cmd->done(ipr_cmd);
4393
4394			rc = IRQ_HANDLED;
4395
4396			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4397				ioa_cfg->hrrq_curr++;
4398			} else {
4399				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4400				ioa_cfg->toggle_bit ^= 1u;
4401			}
4402		}
4403
4404		if (ipr_cmd != NULL) {
4405			/* Clear the PCI interrupt */
4406			do {
4407				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4408				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4409			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4410					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4411
4412			if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4413				ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4414				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4415				return IRQ_HANDLED;
4416			}
4417
4418		} else
4419			break;
4420	}
4421
4422	if (unlikely(rc == IRQ_NONE))
4423		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4424
4425	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4426	return rc;
4427}
4428
4429/**
4430 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4431 * @ioa_cfg:	ioa config struct
4432 * @ipr_cmd:	ipr command struct
4433 *
4434 * Return value:
4435 * 	0 on success / -1 on failure
4436 **/
4437static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4438			     struct ipr_cmnd *ipr_cmd)
4439{
4440	int i, nseg;
4441	struct scatterlist *sg;
4442	u32 length;
4443	u32 ioadl_flags = 0;
4444	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4445	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4446	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
4447
4448	length = scsi_bufflen(scsi_cmd);
4449	if (!length)
4450		return 0;
4451
4452	nseg = scsi_dma_map(scsi_cmd);
4453	if (nseg < 0) {
4454		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4455		return -1;
4456	}
4457
4458	ipr_cmd->dma_use_sg = nseg;
4459
4460	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4461		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4462		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4463	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
4464		ioadl_flags = IPR_IOADL_FLAGS_READ;
4465
4466	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4467		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
4468		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
4469		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
4470	}
4471
4472	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4473	return 0;
4474}
4475
4476/**
4477 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4478 * @ioa_cfg:	ioa config struct
4479 * @ipr_cmd:	ipr command struct
4480 *
4481 * Return value:
4482 * 	0 on success / -1 on failure
4483 **/
4484static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4485			   struct ipr_cmnd *ipr_cmd)
4486{
4487	int i, nseg;
4488	struct scatterlist *sg;
4489	u32 length;
4490	u32 ioadl_flags = 0;
4491	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4492	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4493	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4494
4495	length = scsi_bufflen(scsi_cmd);
4496	if (!length)
4497		return 0;
4498
4499	nseg = scsi_dma_map(scsi_cmd);
4500	if (nseg < 0) {
4501		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4502		return -1;
4503	}
4504
4505	ipr_cmd->dma_use_sg = nseg;
4506
4507	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4508		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4509		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4510		ioarcb->data_transfer_length = cpu_to_be32(length);
4511		ioarcb->ioadl_len =
4512			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4513	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4514		ioadl_flags = IPR_IOADL_FLAGS_READ;
4515		ioarcb->read_data_transfer_length = cpu_to_be32(length);
4516		ioarcb->read_ioadl_len =
4517			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4518	}
4519
4520	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
4521		ioadl = ioarcb->u.add_data.u.ioadl;
4522		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
4523				    offsetof(struct ipr_ioarcb, u.add_data));
4524		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4525	}
4526
4527	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4528		ioadl[i].flags_and_data_len =
4529			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4530		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4531	}
4532
4533	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4534	return 0;
4535}
4536
4537/**
4538 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4539 * @scsi_cmd:	scsi command struct
4540 *
4541 * Return value:
4542 * 	task attributes
4543 **/
4544static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4545{
4546	u8 tag[2];
4547	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4548
4549	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4550		switch (tag[0]) {
4551		case MSG_SIMPLE_TAG:
4552			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4553			break;
4554		case MSG_HEAD_TAG:
4555			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4556			break;
4557		case MSG_ORDERED_TAG:
4558			rc = IPR_FLAGS_LO_ORDERED_TASK;
4559			break;
4560		};
4561	}
4562
4563	return rc;
4564}
4565
4566/**
4567 * ipr_erp_done - Process completion of ERP for a device
4568 * @ipr_cmd:		ipr command struct
4569 *
4570 * This function copies the sense buffer into the scsi_cmd
4571 * struct and pushes the scsi_done function.
4572 *
4573 * Return value:
4574 * 	nothing
4575 **/
4576static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4577{
4578	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4579	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4580	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4581	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4582
4583	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4584		scsi_cmd->result |= (DID_ERROR << 16);
4585		scmd_printk(KERN_ERR, scsi_cmd,
4586			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4587	} else {
4588		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4589		       SCSI_SENSE_BUFFERSIZE);
4590	}
4591
4592	if (res) {
4593		if (!ipr_is_naca_model(res))
4594			res->needs_sync_complete = 1;
4595		res->in_erp = 0;
4596	}
4597	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4598	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4599	scsi_cmd->scsi_done(scsi_cmd);
4600}
4601
4602/**
4603 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4604 * @ipr_cmd:	ipr command struct
4605 *
4606 * Return value:
4607 * 	none
4608 **/
4609static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4610{
4611	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4612	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4613	dma_addr_t dma_addr = ipr_cmd->dma_addr;
4614
4615	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4616	ioarcb->data_transfer_length = 0;
4617	ioarcb->read_data_transfer_length = 0;
4618	ioarcb->ioadl_len = 0;
4619	ioarcb->read_ioadl_len = 0;
4620	ioasa->ioasc = 0;
4621	ioasa->residual_data_len = 0;
4622
4623	if (ipr_cmd->ioa_cfg->sis64)
4624		ioarcb->u.sis64_addr_data.data_ioadl_addr =
4625			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
4626	else {
4627		ioarcb->write_ioadl_addr =
4628			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
4629		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4630	}
4631}
4632
4633/**
4634 * ipr_erp_request_sense - Send request sense to a device
4635 * @ipr_cmd:	ipr command struct
4636 *
4637 * This function sends a request sense to a device as a result
4638 * of a check condition.
4639 *
4640 * Return value:
4641 * 	nothing
4642 **/
4643static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4644{
4645	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4646	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4647
4648	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4649		ipr_erp_done(ipr_cmd);
4650		return;
4651	}
4652
4653	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4654
4655	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4656	cmd_pkt->cdb[0] = REQUEST_SENSE;
4657	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4658	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4659	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4660	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4661
4662	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
4663		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
4664
4665	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4666		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4667}
4668
4669/**
4670 * ipr_erp_cancel_all - Send cancel all to a device
4671 * @ipr_cmd:	ipr command struct
4672 *
4673 * This function sends a cancel all to a device to clear the
4674 * queue. If we are running TCQ on the device, QERR is set to 1,
4675 * which means all outstanding ops have been dropped on the floor.
4676 * Cancel all will return them to us.
4677 *
4678 * Return value:
4679 * 	nothing
4680 **/
4681static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4682{
4683	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4684	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4685	struct ipr_cmd_pkt *cmd_pkt;
4686
4687	res->in_erp = 1;
4688
4689	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4690
4691	if (!scsi_get_tag_type(scsi_cmd->device)) {
4692		ipr_erp_request_sense(ipr_cmd);
4693		return;
4694	}
4695
4696	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4697	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4698	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4699
4700	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4701		   IPR_CANCEL_ALL_TIMEOUT);
4702}
4703
4704/**
4705 * ipr_dump_ioasa - Dump contents of IOASA
4706 * @ioa_cfg:	ioa config struct
4707 * @ipr_cmd:	ipr command struct
4708 * @res:		resource entry struct
4709 *
4710 * This function is invoked by the interrupt handler when ops
4711 * fail. It will log the IOASA if appropriate. Only called
4712 * for GPDD ops.
4713 *
4714 * Return value:
4715 * 	none
4716 **/
4717static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4718			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4719{
4720	int i;
4721	u16 data_len;
4722	u32 ioasc, fd_ioasc;
4723	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4724	__be32 *ioasa_data = (__be32 *)ioasa;
4725	int error_index;
4726
4727	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4728	fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4729
4730	if (0 == ioasc)
4731		return;
4732
4733	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4734		return;
4735
4736	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4737		error_index = ipr_get_error(fd_ioasc);
4738	else
4739		error_index = ipr_get_error(ioasc);
4740
4741	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4742		/* Don't log an error if the IOA already logged one */
4743		if (ioasa->ilid != 0)
4744			return;
4745
4746		if (!ipr_is_gscsi(res))
4747			return;
4748
4749		if (ipr_error_table[error_index].log_ioasa == 0)
4750			return;
4751	}
4752
4753	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4754
4755	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4756		data_len = sizeof(struct ipr_ioasa);
4757	else
4758		data_len = be16_to_cpu(ioasa->ret_stat_len);
4759
4760	ipr_err("IOASA Dump:\n");
4761
4762	for (i = 0; i < data_len / 4; i += 4) {
4763		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4764			be32_to_cpu(ioasa_data[i]),
4765			be32_to_cpu(ioasa_data[i+1]),
4766			be32_to_cpu(ioasa_data[i+2]),
4767			be32_to_cpu(ioasa_data[i+3]));
4768	}
4769}
4770
4771/**
4772 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4773 * @ioasa:		IOASA
4774 * @sense_buf:	sense data buffer
4775 *
4776 * Return value:
4777 * 	none
4778 **/
4779static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4780{
4781	u32 failing_lba;
4782	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4783	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4784	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4785	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4786
4787	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4788
4789	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4790		return;
4791
4792	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4793
4794	if (ipr_is_vset_device(res) &&
4795	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4796	    ioasa->u.vset.failing_lba_hi != 0) {
4797		sense_buf[0] = 0x72;
4798		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4799		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4800		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4801
4802		sense_buf[7] = 12;
4803		sense_buf[8] = 0;
4804		sense_buf[9] = 0x0A;
4805		sense_buf[10] = 0x80;
4806
4807		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4808
4809		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4810		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4811		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4812		sense_buf[15] = failing_lba & 0x000000ff;
4813
4814		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4815
4816		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4817		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4818		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4819		sense_buf[19] = failing_lba & 0x000000ff;
4820	} else {
4821		sense_buf[0] = 0x70;
4822		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4823		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4824		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4825
4826		/* Illegal request */
4827		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4828		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4829			sense_buf[7] = 10;	/* additional length */
4830
4831			/* IOARCB was in error */
4832			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4833				sense_buf[15] = 0xC0;
4834			else	/* Parameter data was invalid */
4835				sense_buf[15] = 0x80;
4836
4837			sense_buf[16] =
4838			    ((IPR_FIELD_POINTER_MASK &
4839			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4840			sense_buf[17] =
4841			    (IPR_FIELD_POINTER_MASK &
4842			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4843		} else {
4844			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4845				if (ipr_is_vset_device(res))
4846					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4847				else
4848					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4849
4850				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4851				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4852				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4853				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4854				sense_buf[6] = failing_lba & 0x000000ff;
4855			}
4856
4857			sense_buf[7] = 6;	/* additional length */
4858		}
4859	}
4860}
4861
4862/**
4863 * ipr_get_autosense - Copy autosense data to sense buffer
4864 * @ipr_cmd:	ipr command struct
4865 *
4866 * This function copies the autosense buffer to the buffer
4867 * in the scsi_cmd, if there is autosense available.
4868 *
4869 * Return value:
4870 *	1 if autosense was available / 0 if not
4871 **/
4872static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4873{
4874	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4875
4876	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4877		return 0;
4878
4879	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4880	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4881		   SCSI_SENSE_BUFFERSIZE));
4882	return 1;
4883}
4884
4885/**
4886 * ipr_erp_start - Process an error response for a SCSI op
4887 * @ioa_cfg:	ioa config struct
4888 * @ipr_cmd:	ipr command struct
4889 *
4890 * This function determines whether or not to initiate ERP
4891 * on the affected device.
4892 *
4893 * Return value:
4894 * 	nothing
4895 **/
4896static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4897			      struct ipr_cmnd *ipr_cmd)
4898{
4899	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4900	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4901	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4902	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4903
4904	if (!res) {
4905		ipr_scsi_eh_done(ipr_cmd);
4906		return;
4907	}
4908
4909	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4910		ipr_gen_sense(ipr_cmd);
4911
4912	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4913
4914	switch (masked_ioasc) {
4915	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4916		if (ipr_is_naca_model(res))
4917			scsi_cmd->result |= (DID_ABORT << 16);
4918		else
4919			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4920		break;
4921	case IPR_IOASC_IR_RESOURCE_HANDLE:
4922	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4923		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4924		break;
4925	case IPR_IOASC_HW_SEL_TIMEOUT:
4926		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4927		if (!ipr_is_naca_model(res))
4928			res->needs_sync_complete = 1;
4929		break;
4930	case IPR_IOASC_SYNC_REQUIRED:
4931		if (!res->in_erp)
4932			res->needs_sync_complete = 1;
4933		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4934		break;
4935	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4936	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4937		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4938		break;
4939	case IPR_IOASC_BUS_WAS_RESET:
4940	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4941		/*
4942		 * Report the bus reset and ask for a retry. The device
4943		 * will give CC/UA the next command.
4944		 */
4945		if (!res->resetting_device)
4946			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4947		scsi_cmd->result |= (DID_ERROR << 16);
4948		if (!ipr_is_naca_model(res))
4949			res->needs_sync_complete = 1;
4950		break;
4951	case IPR_IOASC_HW_DEV_BUS_STATUS:
4952		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4953		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4954			if (!ipr_get_autosense(ipr_cmd)) {
4955				if (!ipr_is_naca_model(res)) {
4956					ipr_erp_cancel_all(ipr_cmd);
4957					return;
4958				}
4959			}
4960		}
4961		if (!ipr_is_naca_model(res))
4962			res->needs_sync_complete = 1;
4963		break;
4964	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4965		break;
4966	default:
4967		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4968			scsi_cmd->result |= (DID_ERROR << 16);
4969		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4970			res->needs_sync_complete = 1;
4971		break;
4972	}
4973
4974	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4975	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4976	scsi_cmd->scsi_done(scsi_cmd);
4977}
4978
4979/**
4980 * ipr_scsi_done - mid-layer done function
4981 * @ipr_cmd:	ipr command struct
4982 *
4983 * This function is invoked by the interrupt handler for
4984 * ops generated by the SCSI mid-layer
4985 *
4986 * Return value:
4987 * 	none
4988 **/
4989static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4990{
4991	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4992	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4993	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4994
4995	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4996
4997	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4998		scsi_dma_unmap(ipr_cmd->scsi_cmd);
4999		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5000		scsi_cmd->scsi_done(scsi_cmd);
5001	} else
5002		ipr_erp_start(ioa_cfg, ipr_cmd);
5003}
5004
5005/**
5006 * ipr_queuecommand - Queue a mid-layer request
5007 * @scsi_cmd:	scsi command struct
5008 * @done:		done function
5009 *
5010 * This function queues a request generated by the mid-layer.
5011 *
5012 * Return value:
5013 *	0 on success
5014 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5015 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5016 **/
5017static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5018			    void (*done) (struct scsi_cmnd *))
5019{
5020	struct ipr_ioa_cfg *ioa_cfg;
5021	struct ipr_resource_entry *res;
5022	struct ipr_ioarcb *ioarcb;
5023	struct ipr_cmnd *ipr_cmd;
5024	int rc = 0;
5025
5026	scsi_cmd->scsi_done = done;
5027	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5028	res = scsi_cmd->device->hostdata;
5029	scsi_cmd->result = (DID_OK << 16);
5030
5031	/*
5032	 * We are currently blocking all devices due to a host reset
5033	 * We have told the host to stop giving us new requests, but
5034	 * ERP ops don't count. FIXME
5035	 */
5036	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5037		return SCSI_MLQUEUE_HOST_BUSY;
5038
5039	/*
5040	 * FIXME - Create scsi_set_host_offline interface
5041	 *  and the ioa_is_dead check can be removed
5042	 */
5043	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5044		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5045		scsi_cmd->result = (DID_NO_CONNECT << 16);
5046		scsi_cmd->scsi_done(scsi_cmd);
5047		return 0;
5048	}
5049
5050	if (ipr_is_gata(res) && res->sata_port)
5051		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5052
5053	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5054	ioarcb = &ipr_cmd->ioarcb;
5055	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5056
5057	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5058	ipr_cmd->scsi_cmd = scsi_cmd;
5059	ioarcb->res_handle = res->cfgte.res_handle;
5060	ipr_cmd->done = ipr_scsi_done;
5061	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5062
5063	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5064		if (scsi_cmd->underflow == 0)
5065			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5066
5067		if (res->needs_sync_complete) {
5068			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5069			res->needs_sync_complete = 0;
5070		}
5071
5072		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5073		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5074		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5075		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5076	}
5077
5078	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5079	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5080		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5081
5082	if (likely(rc == 0)) {
5083		if (ioa_cfg->sis64)
5084			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5085		else
5086			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5087	}
5088
5089	if (likely(rc == 0)) {
5090		mb();
5091		ipr_send_command(ipr_cmd);
5092	} else {
5093		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5094		 return SCSI_MLQUEUE_HOST_BUSY;
5095	}
5096
5097	return 0;
5098}
5099
5100/**
5101 * ipr_ioctl - IOCTL handler
5102 * @sdev:	scsi device struct
5103 * @cmd:	IOCTL cmd
5104 * @arg:	IOCTL arg
5105 *
5106 * Return value:
5107 * 	0 on success / other on failure
5108 **/
5109static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5110{
5111	struct ipr_resource_entry *res;
5112
5113	res = (struct ipr_resource_entry *)sdev->hostdata;
5114	if (res && ipr_is_gata(res)) {
5115		if (cmd == HDIO_GET_IDENTITY)
5116			return -ENOTTY;
5117		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5118	}
5119
5120	return -EINVAL;
5121}
5122
5123/**
5124 * ipr_info - Get information about the card/driver
5125 * @scsi_host:	scsi host struct
5126 *
5127 * Return value:
5128 * 	pointer to buffer with description string
5129 **/
5130static const char * ipr_ioa_info(struct Scsi_Host *host)
5131{
5132	static char buffer[512];
5133	struct ipr_ioa_cfg *ioa_cfg;
5134	unsigned long lock_flags = 0;
5135
5136	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5137
5138	spin_lock_irqsave(host->host_lock, lock_flags);
5139	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5140	spin_unlock_irqrestore(host->host_lock, lock_flags);
5141
5142	return buffer;
5143}
5144
5145static struct scsi_host_template driver_template = {
5146	.module = THIS_MODULE,
5147	.name = "IPR",
5148	.info = ipr_ioa_info,
5149	.ioctl = ipr_ioctl,
5150	.queuecommand = ipr_queuecommand,
5151	.eh_abort_handler = ipr_eh_abort,
5152	.eh_device_reset_handler = ipr_eh_dev_reset,
5153	.eh_host_reset_handler = ipr_eh_host_reset,
5154	.slave_alloc = ipr_slave_alloc,
5155	.slave_configure = ipr_slave_configure,
5156	.slave_destroy = ipr_slave_destroy,
5157	.target_alloc = ipr_target_alloc,
5158	.target_destroy = ipr_target_destroy,
5159	.change_queue_depth = ipr_change_queue_depth,
5160	.change_queue_type = ipr_change_queue_type,
5161	.bios_param = ipr_biosparam,
5162	.can_queue = IPR_MAX_COMMANDS,
5163	.this_id = -1,
5164	.sg_tablesize = IPR_MAX_SGLIST,
5165	.max_sectors = IPR_IOA_MAX_SECTORS,
5166	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5167	.use_clustering = ENABLE_CLUSTERING,
5168	.shost_attrs = ipr_ioa_attrs,
5169	.sdev_attrs = ipr_dev_attrs,
5170	.proc_name = IPR_NAME
5171};
5172
5173/**
5174 * ipr_ata_phy_reset - libata phy_reset handler
5175 * @ap:		ata port to reset
5176 *
5177 **/
5178static void ipr_ata_phy_reset(struct ata_port *ap)
5179{
5180	unsigned long flags;
5181	struct ipr_sata_port *sata_port = ap->private_data;
5182	struct ipr_resource_entry *res = sata_port->res;
5183	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5184	int rc;
5185
5186	ENTER;
5187	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5188	while(ioa_cfg->in_reset_reload) {
5189		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5190		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5191		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5192	}
5193
5194	if (!ioa_cfg->allow_cmds)
5195		goto out_unlock;
5196
5197	rc = ipr_device_reset(ioa_cfg, res);
5198
5199	if (rc) {
5200		ata_port_disable(ap);
5201		goto out_unlock;
5202	}
5203
5204	switch(res->cfgte.proto) {
5205	case IPR_PROTO_SATA:
5206	case IPR_PROTO_SAS_STP:
5207		ap->link.device[0].class = ATA_DEV_ATA;
5208		break;
5209	case IPR_PROTO_SATA_ATAPI:
5210	case IPR_PROTO_SAS_STP_ATAPI:
5211		ap->link.device[0].class = ATA_DEV_ATAPI;
5212		break;
5213	default:
5214		ap->link.device[0].class = ATA_DEV_UNKNOWN;
5215		ata_port_disable(ap);
5216		break;
5217	};
5218
5219out_unlock:
5220	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5221	LEAVE;
5222}
5223
5224/**
5225 * ipr_ata_post_internal - Cleanup after an internal command
5226 * @qc:	ATA queued command
5227 *
5228 * Return value:
5229 * 	none
5230 **/
5231static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5232{
5233	struct ipr_sata_port *sata_port = qc->ap->private_data;
5234	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5235	struct ipr_cmnd *ipr_cmd;
5236	unsigned long flags;
5237
5238	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5239	while(ioa_cfg->in_reset_reload) {
5240		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5241		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5242		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5243	}
5244
5245	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5246		if (ipr_cmd->qc == qc) {
5247			ipr_device_reset(ioa_cfg, sata_port->res);
5248			break;
5249		}
5250	}
5251	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5252}
5253
5254/**
5255 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5256 * @regs:	destination
5257 * @tf:	source ATA taskfile
5258 *
5259 * Return value:
5260 * 	none
5261 **/
5262static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5263			     struct ata_taskfile *tf)
5264{
5265	regs->feature = tf->feature;
5266	regs->nsect = tf->nsect;
5267	regs->lbal = tf->lbal;
5268	regs->lbam = tf->lbam;
5269	regs->lbah = tf->lbah;
5270	regs->device = tf->device;
5271	regs->command = tf->command;
5272	regs->hob_feature = tf->hob_feature;
5273	regs->hob_nsect = tf->hob_nsect;
5274	regs->hob_lbal = tf->hob_lbal;
5275	regs->hob_lbam = tf->hob_lbam;
5276	regs->hob_lbah = tf->hob_lbah;
5277	regs->ctl = tf->ctl;
5278}
5279
5280/**
5281 * ipr_sata_done - done function for SATA commands
5282 * @ipr_cmd:	ipr command struct
5283 *
5284 * This function is invoked by the interrupt handler for
5285 * ops generated by the SCSI mid-layer to SATA devices
5286 *
5287 * Return value:
5288 * 	none
5289 **/
5290static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5291{
5292	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5293	struct ata_queued_cmd *qc = ipr_cmd->qc;
5294	struct ipr_sata_port *sata_port = qc->ap->private_data;
5295	struct ipr_resource_entry *res = sata_port->res;
5296	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5297
5298	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5299	       sizeof(struct ipr_ioasa_gata));
5300	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5301
5302	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5303		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5304					 res->cfgte.res_addr.target);
5305
5306	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5307		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5308	else
5309		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5310	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5311	ata_qc_complete(qc);
5312}
5313
5314/**
5315 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5316 * @ipr_cmd:	ipr command struct
5317 * @qc:		ATA queued command
5318 *
5319 **/
5320static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5321				  struct ata_queued_cmd *qc)
5322{
5323	u32 ioadl_flags = 0;
5324	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5325	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5326	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5327	int len = qc->nbytes;
5328	struct scatterlist *sg;
5329	unsigned int si;
5330	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5331
5332	if (len == 0)
5333		return;
5334
5335	if (qc->dma_dir == DMA_TO_DEVICE) {
5336		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5337		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5338	} else if (qc->dma_dir == DMA_FROM_DEVICE)
5339		ioadl_flags = IPR_IOADL_FLAGS_READ;
5340
5341	ioarcb->data_transfer_length = cpu_to_be32(len);
5342	ioarcb->ioadl_len =
5343		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5344	ioarcb->u.sis64_addr_data.data_ioadl_addr =
5345		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5346
5347	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5348		ioadl64->flags = cpu_to_be32(ioadl_flags);
5349		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5350		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5351
5352		last_ioadl64 = ioadl64;
5353		ioadl64++;
5354	}
5355
5356	if (likely(last_ioadl64))
5357		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5358}
5359
5360/**
5361 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5362 * @ipr_cmd:	ipr command struct
5363 * @qc:		ATA queued command
5364 *
5365 **/
5366static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5367				struct ata_queued_cmd *qc)
5368{
5369	u32 ioadl_flags = 0;
5370	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5371	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5372	struct ipr_ioadl_desc *last_ioadl = NULL;
5373	int len = qc->nbytes;
5374	struct scatterlist *sg;
5375	unsigned int si;
5376
5377	if (len == 0)
5378		return;
5379
5380	if (qc->dma_dir == DMA_TO_DEVICE) {
5381		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5382		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5383		ioarcb->data_transfer_length = cpu_to_be32(len);
5384		ioarcb->ioadl_len =
5385			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5386	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5387		ioadl_flags = IPR_IOADL_FLAGS_READ;
5388		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5389		ioarcb->read_ioadl_len =
5390			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5391	}
5392
5393	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5394		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5395		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5396
5397		last_ioadl = ioadl;
5398		ioadl++;
5399	}
5400
5401	if (likely(last_ioadl))
5402		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5403}
5404
5405/**
5406 * ipr_qc_issue - Issue a SATA qc to a device
5407 * @qc:	queued command
5408 *
5409 * Return value:
5410 * 	0 if success
5411 **/
5412static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5413{
5414	struct ata_port *ap = qc->ap;
5415	struct ipr_sata_port *sata_port = ap->private_data;
5416	struct ipr_resource_entry *res = sata_port->res;
5417	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5418	struct ipr_cmnd *ipr_cmd;
5419	struct ipr_ioarcb *ioarcb;
5420	struct ipr_ioarcb_ata_regs *regs;
5421
5422	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5423		return AC_ERR_SYSTEM;
5424
5425	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5426	ioarcb = &ipr_cmd->ioarcb;
5427
5428	if (ioa_cfg->sis64) {
5429		regs = &ipr_cmd->i.ata_ioadl.regs;
5430		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5431	} else
5432		regs = &ioarcb->u.add_data.u.regs;
5433
5434	memset(regs, 0, sizeof(*regs));
5435	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
5436
5437	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5438	ipr_cmd->qc = qc;
5439	ipr_cmd->done = ipr_sata_done;
5440	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5441	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5442	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5443	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5444	ipr_cmd->dma_use_sg = qc->n_elem;
5445
5446	if (ioa_cfg->sis64)
5447		ipr_build_ata_ioadl64(ipr_cmd, qc);
5448	else
5449		ipr_build_ata_ioadl(ipr_cmd, qc);
5450
5451	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5452	ipr_copy_sata_tf(regs, &qc->tf);
5453	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5454	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5455
5456	switch (qc->tf.protocol) {
5457	case ATA_PROT_NODATA:
5458	case ATA_PROT_PIO:
5459		break;
5460
5461	case ATA_PROT_DMA:
5462		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5463		break;
5464
5465	case ATAPI_PROT_PIO:
5466	case ATAPI_PROT_NODATA:
5467		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5468		break;
5469
5470	case ATAPI_PROT_DMA:
5471		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5472		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5473		break;
5474
5475	default:
5476		WARN_ON(1);
5477		return AC_ERR_INVALID;
5478	}
5479
5480	mb();
5481
5482	ipr_send_command(ipr_cmd);
5483
5484	return 0;
5485}
5486
5487/**
5488 * ipr_qc_fill_rtf - Read result TF
5489 * @qc: ATA queued command
5490 *
5491 * Return value:
5492 * 	true
5493 **/
5494static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
5495{
5496	struct ipr_sata_port *sata_port = qc->ap->private_data;
5497	struct ipr_ioasa_gata *g = &sata_port->ioasa;
5498	struct ata_taskfile *tf = &qc->result_tf;
5499
5500	tf->feature = g->error;
5501	tf->nsect = g->nsect;
5502	tf->lbal = g->lbal;
5503	tf->lbam = g->lbam;
5504	tf->lbah = g->lbah;
5505	tf->device = g->device;
5506	tf->command = g->status;
5507	tf->hob_nsect = g->hob_nsect;
5508	tf->hob_lbal = g->hob_lbal;
5509	tf->hob_lbam = g->hob_lbam;
5510	tf->hob_lbah = g->hob_lbah;
5511	tf->ctl = g->alt_status;
5512
5513	return true;
5514}
5515
5516static struct ata_port_operations ipr_sata_ops = {
5517	.phy_reset = ipr_ata_phy_reset,
5518	.hardreset = ipr_sata_reset,
5519	.post_internal_cmd = ipr_ata_post_internal,
5520	.qc_prep = ata_noop_qc_prep,
5521	.qc_issue = ipr_qc_issue,
5522	.qc_fill_rtf = ipr_qc_fill_rtf,
5523	.port_start = ata_sas_port_start,
5524	.port_stop = ata_sas_port_stop
5525};
5526
5527static struct ata_port_info sata_port_info = {
5528	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5529	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5530	.pio_mask	= 0x10, /* pio4 */
5531	.mwdma_mask = 0x07,
5532	.udma_mask	= 0x7f, /* udma0-6 */
5533	.port_ops	= &ipr_sata_ops
5534};
5535
5536#ifdef CONFIG_PPC_PSERIES
5537static const u16 ipr_blocked_processors[] = {
5538	PV_NORTHSTAR,
5539	PV_PULSAR,
5540	PV_POWER4,
5541	PV_ICESTAR,
5542	PV_SSTAR,
5543	PV_POWER4p,
5544	PV_630,
5545	PV_630p
5546};
5547
5548/**
5549 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5550 * @ioa_cfg:	ioa cfg struct
5551 *
5552 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5553 * certain pSeries hardware. This function determines if the given
5554 * adapter is in one of these confgurations or not.
5555 *
5556 * Return value:
5557 * 	1 if adapter is not supported / 0 if adapter is supported
5558 **/
5559static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5560{
5561	int i;
5562
5563	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
5564		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5565			if (__is_processor(ipr_blocked_processors[i]))
5566				return 1;
5567		}
5568	}
5569	return 0;
5570}
5571#else
5572#define ipr_invalid_adapter(ioa_cfg) 0
5573#endif
5574
5575/**
5576 * ipr_ioa_bringdown_done - IOA bring down completion.
5577 * @ipr_cmd:	ipr command struct
5578 *
5579 * This function processes the completion of an adapter bring down.
5580 * It wakes any reset sleepers.
5581 *
5582 * Return value:
5583 * 	IPR_RC_JOB_RETURN
5584 **/
5585static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5586{
5587	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5588
5589	ENTER;
5590	ioa_cfg->in_reset_reload = 0;
5591	ioa_cfg->reset_retries = 0;
5592	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5593	wake_up_all(&ioa_cfg->reset_wait_q);
5594
5595	spin_unlock_irq(ioa_cfg->host->host_lock);
5596	scsi_unblock_requests(ioa_cfg->host);
5597	spin_lock_irq(ioa_cfg->host->host_lock);
5598	LEAVE;
5599
5600	return IPR_RC_JOB_RETURN;
5601}
5602
5603/**
5604 * ipr_ioa_reset_done - IOA reset completion.
5605 * @ipr_cmd:	ipr command struct
5606 *
5607 * This function processes the completion of an adapter reset.
5608 * It schedules any necessary mid-layer add/removes and
5609 * wakes any reset sleepers.
5610 *
5611 * Return value:
5612 * 	IPR_RC_JOB_RETURN
5613 **/
5614static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5615{
5616	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5617	struct ipr_resource_entry *res;
5618	struct ipr_hostrcb *hostrcb, *temp;
5619	int i = 0;
5620
5621	ENTER;
5622	ioa_cfg->in_reset_reload = 0;
5623	ioa_cfg->allow_cmds = 1;
5624	ioa_cfg->reset_cmd = NULL;
5625	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5626
5627	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5628		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5629			ipr_trace;
5630			break;
5631		}
5632	}
5633	schedule_work(&ioa_cfg->work_q);
5634
5635	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5636		list_del(&hostrcb->queue);
5637		if (i++ < IPR_NUM_LOG_HCAMS)
5638			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5639		else
5640			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5641	}
5642
5643	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5644	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5645
5646	ioa_cfg->reset_retries = 0;
5647	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5648	wake_up_all(&ioa_cfg->reset_wait_q);
5649
5650	spin_unlock(ioa_cfg->host->host_lock);
5651	scsi_unblock_requests(ioa_cfg->host);
5652	spin_lock(ioa_cfg->host->host_lock);
5653
5654	if (!ioa_cfg->allow_cmds)
5655		scsi_block_requests(ioa_cfg->host);
5656
5657	LEAVE;
5658	return IPR_RC_JOB_RETURN;
5659}
5660
5661/**
5662 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5663 * @supported_dev:	supported device struct
5664 * @vpids:			vendor product id struct
5665 *
5666 * Return value:
5667 * 	none
5668 **/
5669static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5670				 struct ipr_std_inq_vpids *vpids)
5671{
5672	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5673	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5674	supported_dev->num_records = 1;
5675	supported_dev->data_length =
5676		cpu_to_be16(sizeof(struct ipr_supported_device));
5677	supported_dev->reserved = 0;
5678}
5679
5680/**
5681 * ipr_set_supported_devs - Send Set Supported Devices for a device
5682 * @ipr_cmd:	ipr command struct
5683 *
5684 * This function sends a Set Supported Devices to the adapter
5685 *
5686 * Return value:
5687 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5688 **/
5689static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5690{
5691	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5692	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5693	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5694	struct ipr_resource_entry *res = ipr_cmd->u.res;
5695
5696	ipr_cmd->job_step = ipr_ioa_reset_done;
5697
5698	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5699		if (!ipr_is_scsi_disk(res))
5700			continue;
5701
5702		ipr_cmd->u.res = res;
5703		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5704
5705		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5706		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5707		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5708
5709		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5710		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5711		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5712
5713		ipr_init_ioadl(ipr_cmd,
5714			       ioa_cfg->vpd_cbs_dma +
5715				 offsetof(struct ipr_misc_cbs, supp_dev),
5716			       sizeof(struct ipr_supported_device),
5717			       IPR_IOADL_FLAGS_WRITE_LAST);
5718
5719		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5720			   IPR_SET_SUP_DEVICE_TIMEOUT);
5721
5722		ipr_cmd->job_step = ipr_set_supported_devs;
5723		return IPR_RC_JOB_RETURN;
5724	}
5725
5726	return IPR_RC_JOB_CONTINUE;
5727}
5728
5729/**
5730 * ipr_setup_write_cache - Disable write cache if needed
5731 * @ipr_cmd:	ipr command struct
5732 *
5733 * This function sets up adapters write cache to desired setting
5734 *
5735 * Return value:
5736 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5737 **/
5738static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5739{
5740	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5741
5742	ipr_cmd->job_step = ipr_set_supported_devs;
5743	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5744				    struct ipr_resource_entry, queue);
5745
5746	if (ioa_cfg->cache_state != CACHE_DISABLED)
5747		return IPR_RC_JOB_CONTINUE;
5748
5749	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5750	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5751	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5752	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5753
5754	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5755
5756	return IPR_RC_JOB_RETURN;
5757}
5758
5759/**
5760 * ipr_get_mode_page - Locate specified mode page
5761 * @mode_pages:	mode page buffer
5762 * @page_code:	page code to find
5763 * @len:		minimum required length for mode page
5764 *
5765 * Return value:
5766 * 	pointer to mode page / NULL on failure
5767 **/
5768static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5769			       u32 page_code, u32 len)
5770{
5771	struct ipr_mode_page_hdr *mode_hdr;
5772	u32 page_length;
5773	u32 length;
5774
5775	if (!mode_pages || (mode_pages->hdr.length == 0))
5776		return NULL;
5777
5778	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5779	mode_hdr = (struct ipr_mode_page_hdr *)
5780		(mode_pages->data + mode_pages->hdr.block_desc_len);
5781
5782	while (length) {
5783		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5784			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5785				return mode_hdr;
5786			break;
5787		} else {
5788			page_length = (sizeof(struct ipr_mode_page_hdr) +
5789				       mode_hdr->page_length);
5790			length -= page_length;
5791			mode_hdr = (struct ipr_mode_page_hdr *)
5792				((unsigned long)mode_hdr + page_length);
5793		}
5794	}
5795	return NULL;
5796}
5797
5798/**
5799 * ipr_check_term_power - Check for term power errors
5800 * @ioa_cfg:	ioa config struct
5801 * @mode_pages:	IOAFP mode pages buffer
5802 *
5803 * Check the IOAFP's mode page 28 for term power errors
5804 *
5805 * Return value:
5806 * 	nothing
5807 **/
5808static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5809				 struct ipr_mode_pages *mode_pages)
5810{
5811	int i;
5812	int entry_length;
5813	struct ipr_dev_bus_entry *bus;
5814	struct ipr_mode_page28 *mode_page;
5815
5816	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5817				      sizeof(struct ipr_mode_page28));
5818
5819	entry_length = mode_page->entry_length;
5820
5821	bus = mode_page->bus;
5822
5823	for (i = 0; i < mode_page->num_entries; i++) {
5824		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5825			dev_err(&ioa_cfg->pdev->dev,
5826				"Term power is absent on scsi bus %d\n",
5827				bus->res_addr.bus);
5828		}
5829
5830		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5831	}
5832}
5833
5834/**
5835 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5836 * @ioa_cfg:	ioa config struct
5837 *
5838 * Looks through the config table checking for SES devices. If
5839 * the SES device is in the SES table indicating a maximum SCSI
5840 * bus speed, the speed is limited for the bus.
5841 *
5842 * Return value:
5843 * 	none
5844 **/
5845static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5846{
5847	u32 max_xfer_rate;
5848	int i;
5849
5850	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5851		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5852						       ioa_cfg->bus_attr[i].bus_width);
5853
5854		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5855			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5856	}
5857}
5858
5859/**
5860 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5861 * @ioa_cfg:	ioa config struct
5862 * @mode_pages:	mode page 28 buffer
5863 *
5864 * Updates mode page 28 based on driver configuration
5865 *
5866 * Return value:
5867 * 	none
5868 **/
5869static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5870					  	struct ipr_mode_pages *mode_pages)
5871{
5872	int i, entry_length;
5873	struct ipr_dev_bus_entry *bus;
5874	struct ipr_bus_attributes *bus_attr;
5875	struct ipr_mode_page28 *mode_page;
5876
5877	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5878				      sizeof(struct ipr_mode_page28));
5879
5880	entry_length = mode_page->entry_length;
5881
5882	/* Loop for each device bus entry */
5883	for (i = 0, bus = mode_page->bus;
5884	     i < mode_page->num_entries;
5885	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5886		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5887			dev_err(&ioa_cfg->pdev->dev,
5888				"Invalid resource address reported: 0x%08X\n",
5889				IPR_GET_PHYS_LOC(bus->res_addr));
5890			continue;
5891		}
5892
5893		bus_attr = &ioa_cfg->bus_attr[i];
5894		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5895		bus->bus_width = bus_attr->bus_width;
5896		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5897		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5898		if (bus_attr->qas_enabled)
5899			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5900		else
5901			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5902	}
5903}
5904
5905/**
5906 * ipr_build_mode_select - Build a mode select command
5907 * @ipr_cmd:	ipr command struct
5908 * @res_handle:	resource handle to send command to
5909 * @parm:		Byte 2 of Mode Sense command
5910 * @dma_addr:	DMA buffer address
5911 * @xfer_len:	data transfer length
5912 *
5913 * Return value:
5914 * 	none
5915 **/
5916static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5917				  __be32 res_handle, u8 parm,
5918				  dma_addr_t dma_addr, u8 xfer_len)
5919{
5920	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5921
5922	ioarcb->res_handle = res_handle;
5923	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5924	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5925	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5926	ioarcb->cmd_pkt.cdb[1] = parm;
5927	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5928
5929	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
5930}
5931
5932/**
5933 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5934 * @ipr_cmd:	ipr command struct
5935 *
5936 * This function sets up the SCSI bus attributes and sends
5937 * a Mode Select for Page 28 to activate them.
5938 *
5939 * Return value:
5940 * 	IPR_RC_JOB_RETURN
5941 **/
5942static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5943{
5944	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5945	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5946	int length;
5947
5948	ENTER;
5949	ipr_scsi_bus_speed_limit(ioa_cfg);
5950	ipr_check_term_power(ioa_cfg, mode_pages);
5951	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5952	length = mode_pages->hdr.length + 1;
5953	mode_pages->hdr.length = 0;
5954
5955	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5956			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5957			      length);
5958
5959	ipr_cmd->job_step = ipr_setup_write_cache;
5960	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5961
5962	LEAVE;
5963	return IPR_RC_JOB_RETURN;
5964}
5965
5966/**
5967 * ipr_build_mode_sense - Builds a mode sense command
5968 * @ipr_cmd:	ipr command struct
5969 * @res:		resource entry struct
5970 * @parm:		Byte 2 of mode sense command
5971 * @dma_addr:	DMA address of mode sense buffer
5972 * @xfer_len:	Size of DMA buffer
5973 *
5974 * Return value:
5975 * 	none
5976 **/
5977static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5978				 __be32 res_handle,
5979				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
5980{
5981	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5982
5983	ioarcb->res_handle = res_handle;
5984	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5985	ioarcb->cmd_pkt.cdb[2] = parm;
5986	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5987	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5988
5989	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
5990}
5991
5992/**
5993 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5994 * @ipr_cmd:	ipr command struct
5995 *
5996 * This function handles the failure of an IOA bringup command.
5997 *
5998 * Return value:
5999 * 	IPR_RC_JOB_RETURN
6000 **/
6001static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6002{
6003	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6004	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6005
6006	dev_err(&ioa_cfg->pdev->dev,
6007		"0x%02X failed with IOASC: 0x%08X\n",
6008		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6009
6010	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6011	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6012	return IPR_RC_JOB_RETURN;
6013}
6014
6015/**
6016 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6017 * @ipr_cmd:	ipr command struct
6018 *
6019 * This function handles the failure of a Mode Sense to the IOAFP.
6020 * Some adapters do not handle all mode pages.
6021 *
6022 * Return value:
6023 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6024 **/
6025static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6026{
6027	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6028
6029	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6030		ipr_cmd->job_step = ipr_setup_write_cache;
6031		return IPR_RC_JOB_CONTINUE;
6032	}
6033
6034	return ipr_reset_cmd_failed(ipr_cmd);
6035}
6036
6037/**
6038 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6039 * @ipr_cmd:	ipr command struct
6040 *
6041 * This function send a Page 28 mode sense to the IOA to
6042 * retrieve SCSI bus attributes.
6043 *
6044 * Return value:
6045 * 	IPR_RC_JOB_RETURN
6046 **/
6047static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6048{
6049	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6050
6051	ENTER;
6052	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6053			     0x28, ioa_cfg->vpd_cbs_dma +
6054			     offsetof(struct ipr_misc_cbs, mode_pages),
6055			     sizeof(struct ipr_mode_pages));
6056
6057	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6058	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6059
6060	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6061
6062	LEAVE;
6063	return IPR_RC_JOB_RETURN;
6064}
6065
6066/**
6067 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6068 * @ipr_cmd:	ipr command struct
6069 *
6070 * This function enables dual IOA RAID support if possible.
6071 *
6072 * Return value:
6073 * 	IPR_RC_JOB_RETURN
6074 **/
6075static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6076{
6077	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6078	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6079	struct ipr_mode_page24 *mode_page;
6080	int length;
6081
6082	ENTER;
6083	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6084				      sizeof(struct ipr_mode_page24));
6085
6086	if (mode_page)
6087		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6088
6089	length = mode_pages->hdr.length + 1;
6090	mode_pages->hdr.length = 0;
6091
6092	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6093			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6094			      length);
6095
6096	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6097	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6098
6099	LEAVE;
6100	return IPR_RC_JOB_RETURN;
6101}
6102
6103/**
6104 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6105 * @ipr_cmd:	ipr command struct
6106 *
6107 * This function handles the failure of a Mode Sense to the IOAFP.
6108 * Some adapters do not handle all mode pages.
6109 *
6110 * Return value:
6111 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6112 **/
6113static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6114{
6115	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6116
6117	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6118		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6119		return IPR_RC_JOB_CONTINUE;
6120	}
6121
6122	return ipr_reset_cmd_failed(ipr_cmd);
6123}
6124
6125/**
6126 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6127 * @ipr_cmd:	ipr command struct
6128 *
6129 * This function send a mode sense to the IOA to retrieve
6130 * the IOA Advanced Function Control mode page.
6131 *
6132 * Return value:
6133 * 	IPR_RC_JOB_RETURN
6134 **/
6135static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6136{
6137	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6138
6139	ENTER;
6140	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6141			     0x24, ioa_cfg->vpd_cbs_dma +
6142			     offsetof(struct ipr_misc_cbs, mode_pages),
6143			     sizeof(struct ipr_mode_pages));
6144
6145	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6146	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6147
6148	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6149
6150	LEAVE;
6151	return IPR_RC_JOB_RETURN;
6152}
6153
6154/**
6155 * ipr_init_res_table - Initialize the resource table
6156 * @ipr_cmd:	ipr command struct
6157 *
6158 * This function looks through the existing resource table, comparing
6159 * it with the config table. This function will take care of old/new
6160 * devices and schedule adding/removing them from the mid-layer
6161 * as appropriate.
6162 *
6163 * Return value:
6164 * 	IPR_RC_JOB_CONTINUE
6165 **/
6166static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6167{
6168	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6169	struct ipr_resource_entry *res, *temp;
6170	struct ipr_config_table_entry *cfgte;
6171	int found, i;
6172	LIST_HEAD(old_res);
6173
6174	ENTER;
6175	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
6176		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6177
6178	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6179		list_move_tail(&res->queue, &old_res);
6180
6181	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
6182		cfgte = &ioa_cfg->cfg_table->dev[i];
6183		found = 0;
6184
6185		list_for_each_entry_safe(res, temp, &old_res, queue) {
6186			if (!memcmp(&res->cfgte.res_addr,
6187				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
6188				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6189				found = 1;
6190				break;
6191			}
6192		}
6193
6194		if (!found) {
6195			if (list_empty(&ioa_cfg->free_res_q)) {
6196				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6197				break;
6198			}
6199
6200			found = 1;
6201			res = list_entry(ioa_cfg->free_res_q.next,
6202					 struct ipr_resource_entry, queue);
6203			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6204			ipr_init_res_entry(res);
6205			res->add_to_ml = 1;
6206		}
6207
6208		if (found)
6209			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
6210	}
6211
6212	list_for_each_entry_safe(res, temp, &old_res, queue) {
6213		if (res->sdev) {
6214			res->del_from_ml = 1;
6215			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
6216			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6217		} else {
6218			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6219		}
6220	}
6221
6222	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6223		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6224	else
6225		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6226
6227	LEAVE;
6228	return IPR_RC_JOB_CONTINUE;
6229}
6230
6231/**
6232 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6233 * @ipr_cmd:	ipr command struct
6234 *
6235 * This function sends a Query IOA Configuration command
6236 * to the adapter to retrieve the IOA configuration table.
6237 *
6238 * Return value:
6239 * 	IPR_RC_JOB_RETURN
6240 **/
6241static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6242{
6243	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6244	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6245	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6246	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6247
6248	ENTER;
6249	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6250		ioa_cfg->dual_raid = 1;
6251	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6252		 ucode_vpd->major_release, ucode_vpd->card_type,
6253		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6254	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6255	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6256
6257	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6258	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
6259	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
6260
6261	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma,
6262		       sizeof(struct ipr_config_table),
6263		       IPR_IOADL_FLAGS_READ_LAST);
6264
6265	ipr_cmd->job_step = ipr_init_res_table;
6266
6267	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6268
6269	LEAVE;
6270	return IPR_RC_JOB_RETURN;
6271}
6272
6273/**
6274 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6275 * @ipr_cmd:	ipr command struct
6276 *
6277 * This utility function sends an inquiry to the adapter.
6278 *
6279 * Return value:
6280 * 	none
6281 **/
6282static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6283			      dma_addr_t dma_addr, u8 xfer_len)
6284{
6285	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6286
6287	ENTER;
6288	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6289	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6290
6291	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6292	ioarcb->cmd_pkt.cdb[1] = flags;
6293	ioarcb->cmd_pkt.cdb[2] = page;
6294	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6295
6296	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6297
6298	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6299	LEAVE;
6300}
6301
6302/**
6303 * ipr_inquiry_page_supported - Is the given inquiry page supported
6304 * @page0:		inquiry page 0 buffer
6305 * @page:		page code.
6306 *
6307 * This function determines if the specified inquiry page is supported.
6308 *
6309 * Return value:
6310 *	1 if page is supported / 0 if not
6311 **/
6312static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6313{
6314	int i;
6315
6316	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6317		if (page0->page[i] == page)
6318			return 1;
6319
6320	return 0;
6321}
6322
6323/**
6324 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6325 * @ipr_cmd:	ipr command struct
6326 *
6327 * This function sends a Page 0xD0 inquiry to the adapter
6328 * to retrieve adapter capabilities.
6329 *
6330 * Return value:
6331 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6332 **/
6333static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6334{
6335	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6336	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6337	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6338
6339	ENTER;
6340	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6341	memset(cap, 0, sizeof(*cap));
6342
6343	if (ipr_inquiry_page_supported(page0, 0xD0)) {
6344		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6345				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6346				  sizeof(struct ipr_inquiry_cap));
6347		return IPR_RC_JOB_RETURN;
6348	}
6349
6350	LEAVE;
6351	return IPR_RC_JOB_CONTINUE;
6352}
6353
6354/**
6355 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6356 * @ipr_cmd:	ipr command struct
6357 *
6358 * This function sends a Page 3 inquiry to the adapter
6359 * to retrieve software VPD information.
6360 *
6361 * Return value:
6362 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6363 **/
6364static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6365{
6366	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6367	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6368
6369	ENTER;
6370
6371	if (!ipr_inquiry_page_supported(page0, 1))
6372		ioa_cfg->cache_state = CACHE_NONE;
6373
6374	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6375
6376	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6377			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6378			  sizeof(struct ipr_inquiry_page3));
6379
6380	LEAVE;
6381	return IPR_RC_JOB_RETURN;
6382}
6383
6384/**
6385 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6386 * @ipr_cmd:	ipr command struct
6387 *
6388 * This function sends a Page 0 inquiry to the adapter
6389 * to retrieve supported inquiry pages.
6390 *
6391 * Return value:
6392 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6393 **/
6394static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6395{
6396	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6397	char type[5];
6398
6399	ENTER;
6400
6401	/* Grab the type out of the VPD and store it away */
6402	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6403	type[4] = '\0';
6404	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6405
6406	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6407
6408	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6409			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6410			  sizeof(struct ipr_inquiry_page0));
6411
6412	LEAVE;
6413	return IPR_RC_JOB_RETURN;
6414}
6415
6416/**
6417 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6418 * @ipr_cmd:	ipr command struct
6419 *
6420 * This function sends a standard inquiry to the adapter.
6421 *
6422 * Return value:
6423 * 	IPR_RC_JOB_RETURN
6424 **/
6425static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6426{
6427	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6428
6429	ENTER;
6430	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6431
6432	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6433			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6434			  sizeof(struct ipr_ioa_vpd));
6435
6436	LEAVE;
6437	return IPR_RC_JOB_RETURN;
6438}
6439
6440/**
6441 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6442 * @ipr_cmd:	ipr command struct
6443 *
6444 * This function send an Identify Host Request Response Queue
6445 * command to establish the HRRQ with the adapter.
6446 *
6447 * Return value:
6448 * 	IPR_RC_JOB_RETURN
6449 **/
6450static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6451{
6452	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6453	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6454
6455	ENTER;
6456	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6457
6458	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6459	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6460
6461	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6462	ioarcb->cmd_pkt.cdb[2] =
6463		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6464	ioarcb->cmd_pkt.cdb[3] =
6465		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6466	ioarcb->cmd_pkt.cdb[4] =
6467		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6468	ioarcb->cmd_pkt.cdb[5] =
6469		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6470	ioarcb->cmd_pkt.cdb[7] =
6471		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6472	ioarcb->cmd_pkt.cdb[8] =
6473		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6474
6475	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6476
6477	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6478
6479	LEAVE;
6480	return IPR_RC_JOB_RETURN;
6481}
6482
6483/**
6484 * ipr_reset_timer_done - Adapter reset timer function
6485 * @ipr_cmd:	ipr command struct
6486 *
6487 * Description: This function is used in adapter reset processing
6488 * for timing events. If the reset_cmd pointer in the IOA
6489 * config struct is not this adapter's we are doing nested
6490 * resets and fail_all_ops will take care of freeing the
6491 * command block.
6492 *
6493 * Return value:
6494 * 	none
6495 **/
6496static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6497{
6498	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6499	unsigned long lock_flags = 0;
6500
6501	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6502
6503	if (ioa_cfg->reset_cmd == ipr_cmd) {
6504		list_del(&ipr_cmd->queue);
6505		ipr_cmd->done(ipr_cmd);
6506	}
6507
6508	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6509}
6510
6511/**
6512 * ipr_reset_start_timer - Start a timer for adapter reset job
6513 * @ipr_cmd:	ipr command struct
6514 * @timeout:	timeout value
6515 *
6516 * Description: This function is used in adapter reset processing
6517 * for timing events. If the reset_cmd pointer in the IOA
6518 * config struct is not this adapter's we are doing nested
6519 * resets and fail_all_ops will take care of freeing the
6520 * command block.
6521 *
6522 * Return value:
6523 * 	none
6524 **/
6525static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6526				  unsigned long timeout)
6527{
6528	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6529	ipr_cmd->done = ipr_reset_ioa_job;
6530
6531	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6532	ipr_cmd->timer.expires = jiffies + timeout;
6533	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6534	add_timer(&ipr_cmd->timer);
6535}
6536
6537/**
6538 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6539 * @ioa_cfg:	ioa cfg struct
6540 *
6541 * Return value:
6542 * 	nothing
6543 **/
6544static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6545{
6546	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6547
6548	/* Initialize Host RRQ pointers */
6549	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6550	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6551	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6552	ioa_cfg->toggle_bit = 1;
6553
6554	/* Zero out config table */
6555	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6556}
6557
6558/**
6559 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6560 * @ipr_cmd:	ipr command struct
6561 *
6562 * This function reinitializes some control blocks and
6563 * enables destructive diagnostics on the adapter.
6564 *
6565 * Return value:
6566 * 	IPR_RC_JOB_RETURN
6567 **/
6568static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6569{
6570	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6571	volatile u32 int_reg;
6572
6573	ENTER;
6574	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6575	ipr_init_ioa_mem(ioa_cfg);
6576
6577	ioa_cfg->allow_interrupts = 1;
6578	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6579
6580	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6581		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6582		       ioa_cfg->regs.clr_interrupt_mask_reg);
6583		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6584		return IPR_RC_JOB_CONTINUE;
6585	}
6586
6587	/* Enable destructive diagnostics on IOA */
6588	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6589
6590	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6591	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6592
6593	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6594
6595	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6596	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6597	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6598	ipr_cmd->done = ipr_reset_ioa_job;
6599	add_timer(&ipr_cmd->timer);
6600	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6601
6602	LEAVE;
6603	return IPR_RC_JOB_RETURN;
6604}
6605
6606/**
6607 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6608 * @ipr_cmd:	ipr command struct
6609 *
6610 * This function is invoked when an adapter dump has run out
6611 * of processing time.
6612 *
6613 * Return value:
6614 * 	IPR_RC_JOB_CONTINUE
6615 **/
6616static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6617{
6618	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6619
6620	if (ioa_cfg->sdt_state == GET_DUMP)
6621		ioa_cfg->sdt_state = ABORT_DUMP;
6622
6623	ipr_cmd->job_step = ipr_reset_alert;
6624
6625	return IPR_RC_JOB_CONTINUE;
6626}
6627
6628/**
6629 * ipr_unit_check_no_data - Log a unit check/no data error log
6630 * @ioa_cfg:		ioa config struct
6631 *
6632 * Logs an error indicating the adapter unit checked, but for some
6633 * reason, we were unable to fetch the unit check buffer.
6634 *
6635 * Return value:
6636 * 	nothing
6637 **/
6638static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6639{
6640	ioa_cfg->errors_logged++;
6641	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6642}
6643
6644/**
6645 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6646 * @ioa_cfg:		ioa config struct
6647 *
6648 * Fetches the unit check buffer from the adapter by clocking the data
6649 * through the mailbox register.
6650 *
6651 * Return value:
6652 * 	nothing
6653 **/
6654static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6655{
6656	unsigned long mailbox;
6657	struct ipr_hostrcb *hostrcb;
6658	struct ipr_uc_sdt sdt;
6659	int rc, length;
6660	u32 ioasc;
6661
6662	mailbox = readl(ioa_cfg->ioa_mailbox);
6663
6664	if (!ipr_sdt_is_fmt2(mailbox)) {
6665		ipr_unit_check_no_data(ioa_cfg);
6666		return;
6667	}
6668
6669	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6670	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6671					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6672
6673	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6674	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6675		ipr_unit_check_no_data(ioa_cfg);
6676		return;
6677	}
6678
6679	/* Find length of the first sdt entry (UC buffer) */
6680	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6681		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6682
6683	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6684			     struct ipr_hostrcb, queue);
6685	list_del(&hostrcb->queue);
6686	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6687
6688	rc = ipr_get_ldump_data_section(ioa_cfg,
6689					be32_to_cpu(sdt.entry[0].bar_str_offset),
6690					(__be32 *)&hostrcb->hcam,
6691					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6692
6693	if (!rc) {
6694		ipr_handle_log_data(ioa_cfg, hostrcb);
6695		ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6696		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6697		    ioa_cfg->sdt_state == GET_DUMP)
6698			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6699	} else
6700		ipr_unit_check_no_data(ioa_cfg);
6701
6702	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6703}
6704
6705/**
6706 * ipr_reset_restore_cfg_space - Restore PCI config space.
6707 * @ipr_cmd:	ipr command struct
6708 *
6709 * Description: This function restores the saved PCI config space of
6710 * the adapter, fails all outstanding ops back to the callers, and
6711 * fetches the dump/unit check if applicable to this reset.
6712 *
6713 * Return value:
6714 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6715 **/
6716static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6717{
6718	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6719	int rc;
6720
6721	ENTER;
6722	ioa_cfg->pdev->state_saved = true;
6723	rc = pci_restore_state(ioa_cfg->pdev);
6724
6725	if (rc != PCIBIOS_SUCCESSFUL) {
6726		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6727		return IPR_RC_JOB_CONTINUE;
6728	}
6729
6730	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6731		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6732		return IPR_RC_JOB_CONTINUE;
6733	}
6734
6735	ipr_fail_all_ops(ioa_cfg);
6736
6737	if (ioa_cfg->ioa_unit_checked) {
6738		ioa_cfg->ioa_unit_checked = 0;
6739		ipr_get_unit_check_buffer(ioa_cfg);
6740		ipr_cmd->job_step = ipr_reset_alert;
6741		ipr_reset_start_timer(ipr_cmd, 0);
6742		return IPR_RC_JOB_RETURN;
6743	}
6744
6745	if (ioa_cfg->in_ioa_bringdown) {
6746		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6747	} else {
6748		ipr_cmd->job_step = ipr_reset_enable_ioa;
6749
6750		if (GET_DUMP == ioa_cfg->sdt_state) {
6751			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6752			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6753			schedule_work(&ioa_cfg->work_q);
6754			return IPR_RC_JOB_RETURN;
6755		}
6756	}
6757
6758	ENTER;
6759	return IPR_RC_JOB_CONTINUE;
6760}
6761
6762/**
6763 * ipr_reset_bist_done - BIST has completed on the adapter.
6764 * @ipr_cmd:	ipr command struct
6765 *
6766 * Description: Unblock config space and resume the reset process.
6767 *
6768 * Return value:
6769 * 	IPR_RC_JOB_CONTINUE
6770 **/
6771static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6772{
6773	ENTER;
6774	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6775	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6776	LEAVE;
6777	return IPR_RC_JOB_CONTINUE;
6778}
6779
6780/**
6781 * ipr_reset_start_bist - Run BIST on the adapter.
6782 * @ipr_cmd:	ipr command struct
6783 *
6784 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6785 *
6786 * Return value:
6787 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6788 **/
6789static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6790{
6791	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6792	int rc;
6793
6794	ENTER;
6795	pci_block_user_cfg_access(ioa_cfg->pdev);
6796	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6797
6798	if (rc != PCIBIOS_SUCCESSFUL) {
6799		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6800		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6801		rc = IPR_RC_JOB_CONTINUE;
6802	} else {
6803		ipr_cmd->job_step = ipr_reset_bist_done;
6804		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6805		rc = IPR_RC_JOB_RETURN;
6806	}
6807
6808	LEAVE;
6809	return rc;
6810}
6811
6812/**
6813 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6814 * @ipr_cmd:	ipr command struct
6815 *
6816 * Description: This clears PCI reset to the adapter and delays two seconds.
6817 *
6818 * Return value:
6819 * 	IPR_RC_JOB_RETURN
6820 **/
6821static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6822{
6823	ENTER;
6824	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6825	ipr_cmd->job_step = ipr_reset_bist_done;
6826	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6827	LEAVE;
6828	return IPR_RC_JOB_RETURN;
6829}
6830
6831/**
6832 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6833 * @ipr_cmd:	ipr command struct
6834 *
6835 * Description: This asserts PCI reset to the adapter.
6836 *
6837 * Return value:
6838 * 	IPR_RC_JOB_RETURN
6839 **/
6840static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6841{
6842	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6843	struct pci_dev *pdev = ioa_cfg->pdev;
6844
6845	ENTER;
6846	pci_block_user_cfg_access(pdev);
6847	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6848	ipr_cmd->job_step = ipr_reset_slot_reset_done;
6849	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6850	LEAVE;
6851	return IPR_RC_JOB_RETURN;
6852}
6853
6854/**
6855 * ipr_reset_allowed - Query whether or not IOA can be reset
6856 * @ioa_cfg:	ioa config struct
6857 *
6858 * Return value:
6859 * 	0 if reset not allowed / non-zero if reset is allowed
6860 **/
6861static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6862{
6863	volatile u32 temp_reg;
6864
6865	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6866	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6867}
6868
6869/**
6870 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6871 * @ipr_cmd:	ipr command struct
6872 *
6873 * Description: This function waits for adapter permission to run BIST,
6874 * then runs BIST. If the adapter does not give permission after a
6875 * reasonable time, we will reset the adapter anyway. The impact of
6876 * resetting the adapter without warning the adapter is the risk of
6877 * losing the persistent error log on the adapter. If the adapter is
6878 * reset while it is writing to the flash on the adapter, the flash
6879 * segment will have bad ECC and be zeroed.
6880 *
6881 * Return value:
6882 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6883 **/
6884static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6885{
6886	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6887	int rc = IPR_RC_JOB_RETURN;
6888
6889	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6890		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6891		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6892	} else {
6893		ipr_cmd->job_step = ioa_cfg->reset;
6894		rc = IPR_RC_JOB_CONTINUE;
6895	}
6896
6897	return rc;
6898}
6899
6900/**
6901 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6902 * @ipr_cmd:	ipr command struct
6903 *
6904 * Description: This function alerts the adapter that it will be reset.
6905 * If memory space is not currently enabled, proceed directly
6906 * to running BIST on the adapter. The timer must always be started
6907 * so we guarantee we do not run BIST from ipr_isr.
6908 *
6909 * Return value:
6910 * 	IPR_RC_JOB_RETURN
6911 **/
6912static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6913{
6914	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6915	u16 cmd_reg;
6916	int rc;
6917
6918	ENTER;
6919	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6920
6921	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6922		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6923		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6924		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6925	} else {
6926		ipr_cmd->job_step = ioa_cfg->reset;
6927	}
6928
6929	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6930	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6931
6932	LEAVE;
6933	return IPR_RC_JOB_RETURN;
6934}
6935
6936/**
6937 * ipr_reset_ucode_download_done - Microcode download completion
6938 * @ipr_cmd:	ipr command struct
6939 *
6940 * Description: This function unmaps the microcode download buffer.
6941 *
6942 * Return value:
6943 * 	IPR_RC_JOB_CONTINUE
6944 **/
6945static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6946{
6947	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6948	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6949
6950	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6951		     sglist->num_sg, DMA_TO_DEVICE);
6952
6953	ipr_cmd->job_step = ipr_reset_alert;
6954	return IPR_RC_JOB_CONTINUE;
6955}
6956
6957/**
6958 * ipr_reset_ucode_download - Download microcode to the adapter
6959 * @ipr_cmd:	ipr command struct
6960 *
6961 * Description: This function checks to see if it there is microcode
6962 * to download to the adapter. If there is, a download is performed.
6963 *
6964 * Return value:
6965 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6966 **/
6967static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6968{
6969	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6970	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6971
6972	ENTER;
6973	ipr_cmd->job_step = ipr_reset_alert;
6974
6975	if (!sglist)
6976		return IPR_RC_JOB_CONTINUE;
6977
6978	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6979	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6980	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6981	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6982	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6983	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6984	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6985
6986	if (ioa_cfg->sis64)
6987		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
6988	else
6989		ipr_build_ucode_ioadl(ipr_cmd, sglist);
6990	ipr_cmd->job_step = ipr_reset_ucode_download_done;
6991
6992	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6993		   IPR_WRITE_BUFFER_TIMEOUT);
6994
6995	LEAVE;
6996	return IPR_RC_JOB_RETURN;
6997}
6998
6999/**
7000 * ipr_reset_shutdown_ioa - Shutdown the adapter
7001 * @ipr_cmd:	ipr command struct
7002 *
7003 * Description: This function issues an adapter shutdown of the
7004 * specified type to the specified adapter as part of the
7005 * adapter reset job.
7006 *
7007 * Return value:
7008 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7009 **/
7010static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7011{
7012	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7013	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7014	unsigned long timeout;
7015	int rc = IPR_RC_JOB_CONTINUE;
7016
7017	ENTER;
7018	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7019		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7020		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7021		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7022		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7023
7024		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7025			timeout = IPR_SHUTDOWN_TIMEOUT;
7026		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7027			timeout = IPR_INTERNAL_TIMEOUT;
7028		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7029			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7030		else
7031			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7032
7033		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7034
7035		rc = IPR_RC_JOB_RETURN;
7036		ipr_cmd->job_step = ipr_reset_ucode_download;
7037	} else
7038		ipr_cmd->job_step = ipr_reset_alert;
7039
7040	LEAVE;
7041	return rc;
7042}
7043
7044/**
7045 * ipr_reset_ioa_job - Adapter reset job
7046 * @ipr_cmd:	ipr command struct
7047 *
7048 * Description: This function is the job router for the adapter reset job.
7049 *
7050 * Return value:
7051 * 	none
7052 **/
7053static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7054{
7055	u32 rc, ioasc;
7056	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7057
7058	do {
7059		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7060
7061		if (ioa_cfg->reset_cmd != ipr_cmd) {
7062			/*
7063			 * We are doing nested adapter resets and this is
7064			 * not the current reset job.
7065			 */
7066			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7067			return;
7068		}
7069
7070		if (IPR_IOASC_SENSE_KEY(ioasc)) {
7071			rc = ipr_cmd->job_step_failed(ipr_cmd);
7072			if (rc == IPR_RC_JOB_RETURN)
7073				return;
7074		}
7075
7076		ipr_reinit_ipr_cmnd(ipr_cmd);
7077		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7078		rc = ipr_cmd->job_step(ipr_cmd);
7079	} while(rc == IPR_RC_JOB_CONTINUE);
7080}
7081
7082/**
7083 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7084 * @ioa_cfg:		ioa config struct
7085 * @job_step:		first job step of reset job
7086 * @shutdown_type:	shutdown type
7087 *
7088 * Description: This function will initiate the reset of the given adapter
7089 * starting at the selected job step.
7090 * If the caller needs to wait on the completion of the reset,
7091 * the caller must sleep on the reset_wait_q.
7092 *
7093 * Return value:
7094 * 	none
7095 **/
7096static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7097				    int (*job_step) (struct ipr_cmnd *),
7098				    enum ipr_shutdown_type shutdown_type)
7099{
7100	struct ipr_cmnd *ipr_cmd;
7101
7102	ioa_cfg->in_reset_reload = 1;
7103	ioa_cfg->allow_cmds = 0;
7104	scsi_block_requests(ioa_cfg->host);
7105
7106	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7107	ioa_cfg->reset_cmd = ipr_cmd;
7108	ipr_cmd->job_step = job_step;
7109	ipr_cmd->u.shutdown_type = shutdown_type;
7110
7111	ipr_reset_ioa_job(ipr_cmd);
7112}
7113
7114/**
7115 * ipr_initiate_ioa_reset - Initiate an adapter reset
7116 * @ioa_cfg:		ioa config struct
7117 * @shutdown_type:	shutdown type
7118 *
7119 * Description: This function will initiate the reset of the given adapter.
7120 * If the caller needs to wait on the completion of the reset,
7121 * the caller must sleep on the reset_wait_q.
7122 *
7123 * Return value:
7124 * 	none
7125 **/
7126static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7127				   enum ipr_shutdown_type shutdown_type)
7128{
7129	if (ioa_cfg->ioa_is_dead)
7130		return;
7131
7132	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7133		ioa_cfg->sdt_state = ABORT_DUMP;
7134
7135	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7136		dev_err(&ioa_cfg->pdev->dev,
7137			"IOA taken offline - error recovery failed\n");
7138
7139		ioa_cfg->reset_retries = 0;
7140		ioa_cfg->ioa_is_dead = 1;
7141
7142		if (ioa_cfg->in_ioa_bringdown) {
7143			ioa_cfg->reset_cmd = NULL;
7144			ioa_cfg->in_reset_reload = 0;
7145			ipr_fail_all_ops(ioa_cfg);
7146			wake_up_all(&ioa_cfg->reset_wait_q);
7147
7148			spin_unlock_irq(ioa_cfg->host->host_lock);
7149			scsi_unblock_requests(ioa_cfg->host);
7150			spin_lock_irq(ioa_cfg->host->host_lock);
7151			return;
7152		} else {
7153			ioa_cfg->in_ioa_bringdown = 1;
7154			shutdown_type = IPR_SHUTDOWN_NONE;
7155		}
7156	}
7157
7158	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7159				shutdown_type);
7160}
7161
7162/**
7163 * ipr_reset_freeze - Hold off all I/O activity
7164 * @ipr_cmd:	ipr command struct
7165 *
7166 * Description: If the PCI slot is frozen, hold off all I/O
7167 * activity; then, as soon as the slot is available again,
7168 * initiate an adapter reset.
7169 */
7170static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7171{
7172	/* Disallow new interrupts, avoid loop */
7173	ipr_cmd->ioa_cfg->allow_interrupts = 0;
7174	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7175	ipr_cmd->done = ipr_reset_ioa_job;
7176	return IPR_RC_JOB_RETURN;
7177}
7178
7179/**
7180 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7181 * @pdev:	PCI device struct
7182 *
7183 * Description: This routine is called to tell us that the PCI bus
7184 * is down. Can't do anything here, except put the device driver
7185 * into a holding pattern, waiting for the PCI bus to come back.
7186 */
7187static void ipr_pci_frozen(struct pci_dev *pdev)
7188{
7189	unsigned long flags = 0;
7190	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7191
7192	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7193	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7194	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7195}
7196
7197/**
7198 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7199 * @pdev:	PCI device struct
7200 *
7201 * Description: This routine is called by the pci error recovery
7202 * code after the PCI slot has been reset, just before we
7203 * should resume normal operations.
7204 */
7205static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7206{
7207	unsigned long flags = 0;
7208	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7209
7210	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7211	if (ioa_cfg->needs_warm_reset)
7212		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7213	else
7214		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7215					IPR_SHUTDOWN_NONE);
7216	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7217	return PCI_ERS_RESULT_RECOVERED;
7218}
7219
7220/**
7221 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7222 * @pdev:	PCI device struct
7223 *
7224 * Description: This routine is called when the PCI bus has
7225 * permanently failed.
7226 */
7227static void ipr_pci_perm_failure(struct pci_dev *pdev)
7228{
7229	unsigned long flags = 0;
7230	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7231
7232	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7233	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7234		ioa_cfg->sdt_state = ABORT_DUMP;
7235	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7236	ioa_cfg->in_ioa_bringdown = 1;
7237	ioa_cfg->allow_cmds = 0;
7238	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7239	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7240}
7241
7242/**
7243 * ipr_pci_error_detected - Called when a PCI error is detected.
7244 * @pdev:	PCI device struct
7245 * @state:	PCI channel state
7246 *
7247 * Description: Called when a PCI error is detected.
7248 *
7249 * Return value:
7250 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7251 */
7252static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7253					       pci_channel_state_t state)
7254{
7255	switch (state) {
7256	case pci_channel_io_frozen:
7257		ipr_pci_frozen(pdev);
7258		return PCI_ERS_RESULT_NEED_RESET;
7259	case pci_channel_io_perm_failure:
7260		ipr_pci_perm_failure(pdev);
7261		return PCI_ERS_RESULT_DISCONNECT;
7262		break;
7263	default:
7264		break;
7265	}
7266	return PCI_ERS_RESULT_NEED_RESET;
7267}
7268
7269/**
7270 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7271 * @ioa_cfg:	ioa cfg struct
7272 *
7273 * Description: This is the second phase of adapter intialization
7274 * This function takes care of initilizing the adapter to the point
7275 * where it can accept new commands.
7276
7277 * Return value:
7278 * 	0 on success / -EIO on failure
7279 **/
7280static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7281{
7282	int rc = 0;
7283	unsigned long host_lock_flags = 0;
7284
7285	ENTER;
7286	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7287	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7288	if (ioa_cfg->needs_hard_reset) {
7289		ioa_cfg->needs_hard_reset = 0;
7290		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7291	} else
7292		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7293					IPR_SHUTDOWN_NONE);
7294
7295	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7296	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7297	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7298
7299	if (ioa_cfg->ioa_is_dead) {
7300		rc = -EIO;
7301	} else if (ipr_invalid_adapter(ioa_cfg)) {
7302		if (!ipr_testmode)
7303			rc = -EIO;
7304
7305		dev_err(&ioa_cfg->pdev->dev,
7306			"Adapter not supported in this hardware configuration.\n");
7307	}
7308
7309	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7310
7311	LEAVE;
7312	return rc;
7313}
7314
7315/**
7316 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7317 * @ioa_cfg:	ioa config struct
7318 *
7319 * Return value:
7320 * 	none
7321 **/
7322static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7323{
7324	int i;
7325
7326	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7327		if (ioa_cfg->ipr_cmnd_list[i])
7328			pci_pool_free(ioa_cfg->ipr_cmd_pool,
7329				      ioa_cfg->ipr_cmnd_list[i],
7330				      ioa_cfg->ipr_cmnd_list_dma[i]);
7331
7332		ioa_cfg->ipr_cmnd_list[i] = NULL;
7333	}
7334
7335	if (ioa_cfg->ipr_cmd_pool)
7336		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7337
7338	ioa_cfg->ipr_cmd_pool = NULL;
7339}
7340
7341/**
7342 * ipr_free_mem - Frees memory allocated for an adapter
7343 * @ioa_cfg:	ioa cfg struct
7344 *
7345 * Return value:
7346 * 	nothing
7347 **/
7348static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7349{
7350	int i;
7351
7352	kfree(ioa_cfg->res_entries);
7353	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7354			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7355	ipr_free_cmd_blks(ioa_cfg);
7356	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7357			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7358	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7359			    ioa_cfg->cfg_table,
7360			    ioa_cfg->cfg_table_dma);
7361
7362	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7363		pci_free_consistent(ioa_cfg->pdev,
7364				    sizeof(struct ipr_hostrcb),
7365				    ioa_cfg->hostrcb[i],
7366				    ioa_cfg->hostrcb_dma[i]);
7367	}
7368
7369	ipr_free_dump(ioa_cfg);
7370	kfree(ioa_cfg->trace);
7371}
7372
7373/**
7374 * ipr_free_all_resources - Free all allocated resources for an adapter.
7375 * @ipr_cmd:	ipr command struct
7376 *
7377 * This function frees all allocated resources for the
7378 * specified adapter.
7379 *
7380 * Return value:
7381 * 	none
7382 **/
7383static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7384{
7385	struct pci_dev *pdev = ioa_cfg->pdev;
7386
7387	ENTER;
7388	free_irq(pdev->irq, ioa_cfg);
7389	pci_disable_msi(pdev);
7390	iounmap(ioa_cfg->hdw_dma_regs);
7391	pci_release_regions(pdev);
7392	ipr_free_mem(ioa_cfg);
7393	scsi_host_put(ioa_cfg->host);
7394	pci_disable_device(pdev);
7395	LEAVE;
7396}
7397
7398/**
7399 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7400 * @ioa_cfg:	ioa config struct
7401 *
7402 * Return value:
7403 * 	0 on success / -ENOMEM on allocation failure
7404 **/
7405static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7406{
7407	struct ipr_cmnd *ipr_cmd;
7408	struct ipr_ioarcb *ioarcb;
7409	dma_addr_t dma_addr;
7410	int i;
7411
7412	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7413						 sizeof(struct ipr_cmnd), 16, 0);
7414
7415	if (!ioa_cfg->ipr_cmd_pool)
7416		return -ENOMEM;
7417
7418	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7419		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7420
7421		if (!ipr_cmd) {
7422			ipr_free_cmd_blks(ioa_cfg);
7423			return -ENOMEM;
7424		}
7425
7426		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7427		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7428		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7429
7430		ioarcb = &ipr_cmd->ioarcb;
7431		ipr_cmd->dma_addr = dma_addr;
7432		if (ioa_cfg->sis64)
7433			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
7434		else
7435			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7436
7437		ioarcb->host_response_handle = cpu_to_be32(i << 2);
7438		if (ioa_cfg->sis64) {
7439			ioarcb->u.sis64_addr_data.data_ioadl_addr =
7440				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
7441			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
7442				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7443		} else {
7444			ioarcb->write_ioadl_addr =
7445				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
7446			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7447			ioarcb->ioasa_host_pci_addr =
7448				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7449		}
7450		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7451		ipr_cmd->cmd_index = i;
7452		ipr_cmd->ioa_cfg = ioa_cfg;
7453		ipr_cmd->sense_buffer_dma = dma_addr +
7454			offsetof(struct ipr_cmnd, sense_buffer);
7455
7456		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7457	}
7458
7459	return 0;
7460}
7461
7462/**
7463 * ipr_alloc_mem - Allocate memory for an adapter
7464 * @ioa_cfg:	ioa config struct
7465 *
7466 * Return value:
7467 * 	0 on success / non-zero for error
7468 **/
7469static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7470{
7471	struct pci_dev *pdev = ioa_cfg->pdev;
7472	int i, rc = -ENOMEM;
7473
7474	ENTER;
7475	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7476				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7477
7478	if (!ioa_cfg->res_entries)
7479		goto out;
7480
7481	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7482		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7483
7484	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7485						sizeof(struct ipr_misc_cbs),
7486						&ioa_cfg->vpd_cbs_dma);
7487
7488	if (!ioa_cfg->vpd_cbs)
7489		goto out_free_res_entries;
7490
7491	if (ipr_alloc_cmd_blks(ioa_cfg))
7492		goto out_free_vpd_cbs;
7493
7494	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7495						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7496						 &ioa_cfg->host_rrq_dma);
7497
7498	if (!ioa_cfg->host_rrq)
7499		goto out_ipr_free_cmd_blocks;
7500
7501	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7502						  sizeof(struct ipr_config_table),
7503						  &ioa_cfg->cfg_table_dma);
7504
7505	if (!ioa_cfg->cfg_table)
7506		goto out_free_host_rrq;
7507
7508	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7509		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7510							   sizeof(struct ipr_hostrcb),
7511							   &ioa_cfg->hostrcb_dma[i]);
7512
7513		if (!ioa_cfg->hostrcb[i])
7514			goto out_free_hostrcb_dma;
7515
7516		ioa_cfg->hostrcb[i]->hostrcb_dma =
7517			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7518		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7519		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7520	}
7521
7522	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7523				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7524
7525	if (!ioa_cfg->trace)
7526		goto out_free_hostrcb_dma;
7527
7528	rc = 0;
7529out:
7530	LEAVE;
7531	return rc;
7532
7533out_free_hostrcb_dma:
7534	while (i-- > 0) {
7535		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7536				    ioa_cfg->hostrcb[i],
7537				    ioa_cfg->hostrcb_dma[i]);
7538	}
7539	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7540			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7541out_free_host_rrq:
7542	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7543			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7544out_ipr_free_cmd_blocks:
7545	ipr_free_cmd_blks(ioa_cfg);
7546out_free_vpd_cbs:
7547	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7548			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7549out_free_res_entries:
7550	kfree(ioa_cfg->res_entries);
7551	goto out;
7552}
7553
7554/**
7555 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7556 * @ioa_cfg:	ioa config struct
7557 *
7558 * Return value:
7559 * 	none
7560 **/
7561static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7562{
7563	int i;
7564
7565	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7566		ioa_cfg->bus_attr[i].bus = i;
7567		ioa_cfg->bus_attr[i].qas_enabled = 0;
7568		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7569		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7570			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7571		else
7572			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7573	}
7574}
7575
7576/**
7577 * ipr_init_ioa_cfg - Initialize IOA config struct
7578 * @ioa_cfg:	ioa config struct
7579 * @host:		scsi host struct
7580 * @pdev:		PCI dev struct
7581 *
7582 * Return value:
7583 * 	none
7584 **/
7585static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7586				       struct Scsi_Host *host, struct pci_dev *pdev)
7587{
7588	const struct ipr_interrupt_offsets *p;
7589	struct ipr_interrupts *t;
7590	void __iomem *base;
7591
7592	ioa_cfg->host = host;
7593	ioa_cfg->pdev = pdev;
7594	ioa_cfg->log_level = ipr_log_level;
7595	ioa_cfg->doorbell = IPR_DOORBELL;
7596	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7597	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7598	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7599	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7600	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7601	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7602	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7603	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7604
7605	INIT_LIST_HEAD(&ioa_cfg->free_q);
7606	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7607	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7608	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7609	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7610	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7611	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7612	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7613	init_waitqueue_head(&ioa_cfg->msi_wait_q);
7614	ioa_cfg->sdt_state = INACTIVE;
7615	if (ipr_enable_cache)
7616		ioa_cfg->cache_state = CACHE_ENABLED;
7617	else
7618		ioa_cfg->cache_state = CACHE_DISABLED;
7619
7620	ipr_initialize_bus_attr(ioa_cfg);
7621
7622	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7623	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7624	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7625	host->unique_id = host->host_no;
7626	host->max_cmd_len = IPR_MAX_CDB_LEN;
7627	pci_set_drvdata(pdev, ioa_cfg);
7628
7629	p = &ioa_cfg->chip_cfg->regs;
7630	t = &ioa_cfg->regs;
7631	base = ioa_cfg->hdw_dma_regs;
7632
7633	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7634	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7635	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7636	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7637	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7638	t->ioarrin_reg = base + p->ioarrin_reg;
7639	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7640	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7641	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7642}
7643
7644/**
7645 * ipr_get_chip_info - Find adapter chip information
7646 * @dev_id:		PCI device id struct
7647 *
7648 * Return value:
7649 * 	ptr to chip information on success / NULL on failure
7650 **/
7651static const struct ipr_chip_t * __devinit
7652ipr_get_chip_info(const struct pci_device_id *dev_id)
7653{
7654	int i;
7655
7656	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7657		if (ipr_chip[i].vendor == dev_id->vendor &&
7658		    ipr_chip[i].device == dev_id->device)
7659			return &ipr_chip[i];
7660	return NULL;
7661}
7662
7663/**
7664 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
7665 * @pdev:		PCI device struct
7666 *
7667 * Description: Simply set the msi_received flag to 1 indicating that
7668 * Message Signaled Interrupts are supported.
7669 *
7670 * Return value:
7671 * 	0 on success / non-zero on failure
7672 **/
7673static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
7674{
7675	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
7676	unsigned long lock_flags = 0;
7677	irqreturn_t rc = IRQ_HANDLED;
7678
7679	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7680
7681	ioa_cfg->msi_received = 1;
7682	wake_up(&ioa_cfg->msi_wait_q);
7683
7684	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7685	return rc;
7686}
7687
7688/**
7689 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
7690 * @pdev:		PCI device struct
7691 *
7692 * Description: The return value from pci_enable_msi() can not always be
7693 * trusted.  This routine sets up and initiates a test interrupt to determine
7694 * if the interrupt is received via the ipr_test_intr() service routine.
7695 * If the tests fails, the driver will fall back to LSI.
7696 *
7697 * Return value:
7698 * 	0 on success / non-zero on failure
7699 **/
7700static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7701				  struct pci_dev *pdev)
7702{
7703	int rc;
7704	volatile u32 int_reg;
7705	unsigned long lock_flags = 0;
7706
7707	ENTER;
7708
7709	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7710	init_waitqueue_head(&ioa_cfg->msi_wait_q);
7711	ioa_cfg->msi_received = 0;
7712	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7713	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7714	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7715	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7716
7717	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
7718	if (rc) {
7719		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
7720		return rc;
7721	} else if (ipr_debug)
7722		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
7723
7724	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
7725	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7726	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
7727	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7728
7729	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7730	if (!ioa_cfg->msi_received) {
7731		/* MSI test failed */
7732		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
7733		rc = -EOPNOTSUPP;
7734	} else if (ipr_debug)
7735		dev_info(&pdev->dev, "MSI test succeeded.\n");
7736
7737	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7738
7739	free_irq(pdev->irq, ioa_cfg);
7740
7741	LEAVE;
7742
7743	return rc;
7744}
7745
7746/**
7747 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7748 * @pdev:		PCI device struct
7749 * @dev_id:		PCI device id struct
7750 *
7751 * Return value:
7752 * 	0 on success / non-zero on failure
7753 **/
7754static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7755				   const struct pci_device_id *dev_id)
7756{
7757	struct ipr_ioa_cfg *ioa_cfg;
7758	struct Scsi_Host *host;
7759	unsigned long ipr_regs_pci;
7760	void __iomem *ipr_regs;
7761	int rc = PCIBIOS_SUCCESSFUL;
7762	volatile u32 mask, uproc, interrupts;
7763
7764	ENTER;
7765
7766	if ((rc = pci_enable_device(pdev))) {
7767		dev_err(&pdev->dev, "Cannot enable adapter\n");
7768		goto out;
7769	}
7770
7771	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7772
7773	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7774
7775	if (!host) {
7776		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7777		rc = -ENOMEM;
7778		goto out_disable;
7779	}
7780
7781	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7782	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7783	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7784		      sata_port_info.flags, &ipr_sata_ops);
7785
7786	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
7787
7788	if (!ioa_cfg->ipr_chip) {
7789		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7790			dev_id->vendor, dev_id->device);
7791		goto out_scsi_host_put;
7792	}
7793
7794	/* set SIS 32 or SIS 64 */
7795	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
7796	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7797
7798	if (ipr_transop_timeout)
7799		ioa_cfg->transop_timeout = ipr_transop_timeout;
7800	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7801		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7802	else
7803		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7804
7805	ioa_cfg->revid = pdev->revision;
7806
7807	ipr_regs_pci = pci_resource_start(pdev, 0);
7808
7809	rc = pci_request_regions(pdev, IPR_NAME);
7810	if (rc < 0) {
7811		dev_err(&pdev->dev,
7812			"Couldn't register memory range of registers\n");
7813		goto out_scsi_host_put;
7814	}
7815
7816	ipr_regs = pci_ioremap_bar(pdev, 0);
7817
7818	if (!ipr_regs) {
7819		dev_err(&pdev->dev,
7820			"Couldn't map memory range of registers\n");
7821		rc = -ENOMEM;
7822		goto out_release_regions;
7823	}
7824
7825	ioa_cfg->hdw_dma_regs = ipr_regs;
7826	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7827	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7828
7829	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7830
7831	pci_set_master(pdev);
7832
7833	if (ioa_cfg->sis64) {
7834		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7835		if (rc < 0) {
7836			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
7837			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7838		}
7839
7840	} else
7841		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7842
7843	if (rc < 0) {
7844		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7845		goto cleanup_nomem;
7846	}
7847
7848	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7849				   ioa_cfg->chip_cfg->cache_line_size);
7850
7851	if (rc != PCIBIOS_SUCCESSFUL) {
7852		dev_err(&pdev->dev, "Write of cache line size failed\n");
7853		rc = -EIO;
7854		goto cleanup_nomem;
7855	}
7856
7857	/* Enable MSI style interrupts if they are supported. */
7858	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
7859		rc = ipr_test_msi(ioa_cfg, pdev);
7860		if (rc == -EOPNOTSUPP)
7861			pci_disable_msi(pdev);
7862		else if (rc)
7863			goto out_msi_disable;
7864		else
7865			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
7866	} else if (ipr_debug)
7867		dev_info(&pdev->dev, "Cannot enable MSI.\n");
7868
7869	/* Save away PCI config space for use following IOA reset */
7870	rc = pci_save_state(pdev);
7871
7872	if (rc != PCIBIOS_SUCCESSFUL) {
7873		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7874		rc = -EIO;
7875		goto cleanup_nomem;
7876	}
7877
7878	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7879		goto cleanup_nomem;
7880
7881	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7882		goto cleanup_nomem;
7883
7884	rc = ipr_alloc_mem(ioa_cfg);
7885	if (rc < 0) {
7886		dev_err(&pdev->dev,
7887			"Couldn't allocate enough memory for device driver!\n");
7888		goto cleanup_nomem;
7889	}
7890
7891	/*
7892	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7893	 * the card is in an unknown state and needs a hard reset
7894	 */
7895	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7896	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
7897	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7898	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7899		ioa_cfg->needs_hard_reset = 1;
7900	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7901		ioa_cfg->needs_hard_reset = 1;
7902	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7903		ioa_cfg->ioa_unit_checked = 1;
7904
7905	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7906	rc = request_irq(pdev->irq, ipr_isr,
7907			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
7908			 IPR_NAME, ioa_cfg);
7909
7910	if (rc) {
7911		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7912			pdev->irq, rc);
7913		goto cleanup_nolog;
7914	}
7915
7916	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7917	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7918		ioa_cfg->needs_warm_reset = 1;
7919		ioa_cfg->reset = ipr_reset_slot_reset;
7920	} else
7921		ioa_cfg->reset = ipr_reset_start_bist;
7922
7923	spin_lock(&ipr_driver_lock);
7924	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7925	spin_unlock(&ipr_driver_lock);
7926
7927	LEAVE;
7928out:
7929	return rc;
7930
7931cleanup_nolog:
7932	ipr_free_mem(ioa_cfg);
7933cleanup_nomem:
7934	iounmap(ipr_regs);
7935out_msi_disable:
7936	pci_disable_msi(pdev);
7937out_release_regions:
7938	pci_release_regions(pdev);
7939out_scsi_host_put:
7940	scsi_host_put(host);
7941out_disable:
7942	pci_disable_device(pdev);
7943	goto out;
7944}
7945
7946/**
7947 * ipr_scan_vsets - Scans for VSET devices
7948 * @ioa_cfg:	ioa config struct
7949 *
7950 * Description: Since the VSET resources do not follow SAM in that we can have
7951 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7952 *
7953 * Return value:
7954 * 	none
7955 **/
7956static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7957{
7958	int target, lun;
7959
7960	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7961		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7962			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7963}
7964
7965/**
7966 * ipr_initiate_ioa_bringdown - Bring down an adapter
7967 * @ioa_cfg:		ioa config struct
7968 * @shutdown_type:	shutdown type
7969 *
7970 * Description: This function will initiate bringing down the adapter.
7971 * This consists of issuing an IOA shutdown to the adapter
7972 * to flush the cache, and running BIST.
7973 * If the caller needs to wait on the completion of the reset,
7974 * the caller must sleep on the reset_wait_q.
7975 *
7976 * Return value:
7977 * 	none
7978 **/
7979static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7980				       enum ipr_shutdown_type shutdown_type)
7981{
7982	ENTER;
7983	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7984		ioa_cfg->sdt_state = ABORT_DUMP;
7985	ioa_cfg->reset_retries = 0;
7986	ioa_cfg->in_ioa_bringdown = 1;
7987	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7988	LEAVE;
7989}
7990
7991/**
7992 * __ipr_remove - Remove a single adapter
7993 * @pdev:	pci device struct
7994 *
7995 * Adapter hot plug remove entry point.
7996 *
7997 * Return value:
7998 * 	none
7999 **/
8000static void __ipr_remove(struct pci_dev *pdev)
8001{
8002	unsigned long host_lock_flags = 0;
8003	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8004	ENTER;
8005
8006	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8007	while(ioa_cfg->in_reset_reload) {
8008		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8009		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8010		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8011	}
8012
8013	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8014
8015	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8016	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8017	flush_scheduled_work();
8018	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8019
8020	spin_lock(&ipr_driver_lock);
8021	list_del(&ioa_cfg->queue);
8022	spin_unlock(&ipr_driver_lock);
8023
8024	if (ioa_cfg->sdt_state == ABORT_DUMP)
8025		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8026	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8027
8028	ipr_free_all_resources(ioa_cfg);
8029
8030	LEAVE;
8031}
8032
8033/**
8034 * ipr_remove - IOA hot plug remove entry point
8035 * @pdev:	pci device struct
8036 *
8037 * Adapter hot plug remove entry point.
8038 *
8039 * Return value:
8040 * 	none
8041 **/
8042static void __devexit ipr_remove(struct pci_dev *pdev)
8043{
8044	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8045
8046	ENTER;
8047
8048	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8049			      &ipr_trace_attr);
8050	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8051			     &ipr_dump_attr);
8052	scsi_remove_host(ioa_cfg->host);
8053
8054	__ipr_remove(pdev);
8055
8056	LEAVE;
8057}
8058
8059/**
8060 * ipr_probe - Adapter hot plug add entry point
8061 *
8062 * Return value:
8063 * 	0 on success / non-zero on failure
8064 **/
8065static int __devinit ipr_probe(struct pci_dev *pdev,
8066			       const struct pci_device_id *dev_id)
8067{
8068	struct ipr_ioa_cfg *ioa_cfg;
8069	int rc;
8070
8071	rc = ipr_probe_ioa(pdev, dev_id);
8072
8073	if (rc)
8074		return rc;
8075
8076	ioa_cfg = pci_get_drvdata(pdev);
8077	rc = ipr_probe_ioa_part2(ioa_cfg);
8078
8079	if (rc) {
8080		__ipr_remove(pdev);
8081		return rc;
8082	}
8083
8084	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8085
8086	if (rc) {
8087		__ipr_remove(pdev);
8088		return rc;
8089	}
8090
8091	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8092				   &ipr_trace_attr);
8093
8094	if (rc) {
8095		scsi_remove_host(ioa_cfg->host);
8096		__ipr_remove(pdev);
8097		return rc;
8098	}
8099
8100	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8101				   &ipr_dump_attr);
8102
8103	if (rc) {
8104		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8105				      &ipr_trace_attr);
8106		scsi_remove_host(ioa_cfg->host);
8107		__ipr_remove(pdev);
8108		return rc;
8109	}
8110
8111	scsi_scan_host(ioa_cfg->host);
8112	ipr_scan_vsets(ioa_cfg);
8113	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8114	ioa_cfg->allow_ml_add_del = 1;
8115	ioa_cfg->host->max_channel = IPR_VSET_BUS;
8116	schedule_work(&ioa_cfg->work_q);
8117	return 0;
8118}
8119
8120/**
8121 * ipr_shutdown - Shutdown handler.
8122 * @pdev:	pci device struct
8123 *
8124 * This function is invoked upon system shutdown/reboot. It will issue
8125 * an adapter shutdown to the adapter to flush the write cache.
8126 *
8127 * Return value:
8128 * 	none
8129 **/
8130static void ipr_shutdown(struct pci_dev *pdev)
8131{
8132	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8133	unsigned long lock_flags = 0;
8134
8135	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8136	while(ioa_cfg->in_reset_reload) {
8137		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8138		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8139		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8140	}
8141
8142	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8143	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8144	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8145}
8146
8147static struct pci_device_id ipr_pci_table[] __devinitdata = {
8148	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8149		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8150	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8151		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8152	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8153		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8154	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8155		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8156	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8157		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8158	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8159		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8160	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8161		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8162	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8163		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8164		IPR_USE_LONG_TRANSOP_TIMEOUT },
8165	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8166	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8167	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8168	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8169	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8170	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8171	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8172	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8173	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8174	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8175	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8176	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8177	      IPR_USE_LONG_TRANSOP_TIMEOUT},
8178	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8179	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8180	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8181	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8182	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8183	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8184	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8185	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8186	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8187	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8188	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8189	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8190	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8191	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8192	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8193		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8194	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8195		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8196	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8197		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8198		IPR_USE_LONG_TRANSOP_TIMEOUT },
8199	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8200		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8201		IPR_USE_LONG_TRANSOP_TIMEOUT },
8202	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8203		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8204		IPR_USE_LONG_TRANSOP_TIMEOUT },
8205	{ }
8206};
8207MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8208
8209static struct pci_error_handlers ipr_err_handler = {
8210	.error_detected = ipr_pci_error_detected,
8211	.slot_reset = ipr_pci_slot_reset,
8212};
8213
8214static struct pci_driver ipr_driver = {
8215	.name = IPR_NAME,
8216	.id_table = ipr_pci_table,
8217	.probe = ipr_probe,
8218	.remove = __devexit_p(ipr_remove),
8219	.shutdown = ipr_shutdown,
8220	.err_handler = &ipr_err_handler,
8221};
8222
8223/**
8224 * ipr_init - Module entry point
8225 *
8226 * Return value:
8227 * 	0 on success / negative value on failure
8228 **/
8229static int __init ipr_init(void)
8230{
8231	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8232		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8233
8234	return pci_register_driver(&ipr_driver);
8235}
8236
8237/**
8238 * ipr_exit - Module unload
8239 *
8240 * Module unload entry point.
8241 *
8242 * Return value:
8243 * 	none
8244 **/
8245static void __exit ipr_exit(void)
8246{
8247	pci_unregister_driver(&ipr_driver);
8248}
8249
8250module_init(ipr_init);
8251module_exit(ipr_exit);
8252