ipr.c revision a74c16390a47dcb6c96b20b572ffc9936073d4b1
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/libata.h>
74#include <linux/hdreg.h>
75#include <asm/io.h>
76#include <asm/irq.h>
77#include <asm/processor.h>
78#include <scsi/scsi.h>
79#include <scsi/scsi_host.h>
80#include <scsi/scsi_tcq.h>
81#include <scsi/scsi_eh.h>
82#include <scsi/scsi_cmnd.h>
83#include "ipr.h"
84
85/*
86 *   Global Data
87 */
88static LIST_HEAD(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = 0;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0;
96static unsigned int ipr_dual_ioa_raid = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock);
98
99/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
102		.mailbox = 0x0042C,
103		.cache_line_size = 0x20,
104		{
105			.set_interrupt_mask_reg = 0x0022C,
106			.clr_interrupt_mask_reg = 0x00230,
107			.sense_interrupt_mask_reg = 0x0022C,
108			.clr_interrupt_reg = 0x00228,
109			.sense_interrupt_reg = 0x00224,
110			.ioarrin_reg = 0x00404,
111			.sense_uproc_interrupt_reg = 0x00214,
112			.set_uproc_interrupt_reg = 0x00214,
113			.clr_uproc_interrupt_reg = 0x00218
114		}
115	},
116	{ /* Snipe and Scamp */
117		.mailbox = 0x0052C,
118		.cache_line_size = 0x20,
119		{
120			.set_interrupt_mask_reg = 0x00288,
121			.clr_interrupt_mask_reg = 0x0028C,
122			.sense_interrupt_mask_reg = 0x00288,
123			.clr_interrupt_reg = 0x00284,
124			.sense_interrupt_reg = 0x00280,
125			.ioarrin_reg = 0x00504,
126			.sense_uproc_interrupt_reg = 0x00290,
127			.set_uproc_interrupt_reg = 0x00290,
128			.clr_uproc_interrupt_reg = 0x00294
129		}
130	},
131	{ /* CRoC */
132		.mailbox = 0x00040,
133		.cache_line_size = 0x20,
134		{
135			.set_interrupt_mask_reg = 0x00010,
136			.clr_interrupt_mask_reg = 0x00018,
137			.sense_interrupt_mask_reg = 0x00010,
138			.clr_interrupt_reg = 0x00008,
139			.sense_interrupt_reg = 0x00000,
140			.ioarrin_reg = 0x00070,
141			.sense_uproc_interrupt_reg = 0x00020,
142			.set_uproc_interrupt_reg = 0x00020,
143			.clr_uproc_interrupt_reg = 0x00028
144		}
145	},
146};
147
148static const struct ipr_chip_t ipr_chip[] = {
149	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
150	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
151	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
152	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
153	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
154	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
155	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
156};
157
158static int ipr_max_bus_speeds [] = {
159	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
160};
161
162MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
163MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
164module_param_named(max_speed, ipr_max_speed, uint, 0);
165MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
166module_param_named(log_level, ipr_log_level, uint, 0);
167MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
168module_param_named(testmode, ipr_testmode, int, 0);
169MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
170module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
171MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
172module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
173MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
174module_param_named(enable_cache, ipr_enable_cache, int, 0);
175MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
176module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
177MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
178module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
179MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
180MODULE_LICENSE("GPL");
181MODULE_VERSION(IPR_DRIVER_VERSION);
182
183/*  A constant array of IOASCs/URCs/Error Messages */
184static const
185struct ipr_error_table_t ipr_error_table[] = {
186	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
187	"8155: An unknown error was received"},
188	{0x00330000, 0, 0,
189	"Soft underlength error"},
190	{0x005A0000, 0, 0,
191	"Command to be cancelled not found"},
192	{0x00808000, 0, 0,
193	"Qualified success"},
194	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
195	"FFFE: Soft device bus error recovered by the IOA"},
196	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
197	"4101: Soft device bus fabric error"},
198	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
199	"FFF9: Device sector reassign successful"},
200	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
201	"FFF7: Media error recovered by device rewrite procedures"},
202	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
203	"7001: IOA sector reassignment successful"},
204	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
205	"FFF9: Soft media error. Sector reassignment recommended"},
206	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
207	"FFF7: Media error recovered by IOA rewrite procedures"},
208	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
209	"FF3D: Soft PCI bus error recovered by the IOA"},
210	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
211	"FFF6: Device hardware error recovered by the IOA"},
212	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
213	"FFF6: Device hardware error recovered by the device"},
214	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
215	"FF3D: Soft IOA error recovered by the IOA"},
216	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
217	"FFFA: Undefined device response recovered by the IOA"},
218	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
219	"FFF6: Device bus error, message or command phase"},
220	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
221	"FFFE: Task Management Function failed"},
222	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
223	"FFF6: Failure prediction threshold exceeded"},
224	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
225	"8009: Impending cache battery pack failure"},
226	{0x02040400, 0, 0,
227	"34FF: Disk device format in progress"},
228	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
229	"9070: IOA requested reset"},
230	{0x023F0000, 0, 0,
231	"Synchronization required"},
232	{0x024E0000, 0, 0,
233	"No ready, IOA shutdown"},
234	{0x025A0000, 0, 0,
235	"Not ready, IOA has been shutdown"},
236	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
237	"3020: Storage subsystem configuration error"},
238	{0x03110B00, 0, 0,
239	"FFF5: Medium error, data unreadable, recommend reassign"},
240	{0x03110C00, 0, 0,
241	"7000: Medium error, data unreadable, do not reassign"},
242	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
243	"FFF3: Disk media format bad"},
244	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
245	"3002: Addressed device failed to respond to selection"},
246	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
247	"3100: Device bus error"},
248	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
249	"3109: IOA timed out a device command"},
250	{0x04088000, 0, 0,
251	"3120: SCSI bus is not operational"},
252	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
253	"4100: Hard device bus fabric error"},
254	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
255	"9000: IOA reserved area data check"},
256	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
257	"9001: IOA reserved area invalid data pattern"},
258	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
259	"9002: IOA reserved area LRC error"},
260	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
261	"102E: Out of alternate sectors for disk storage"},
262	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
263	"FFF4: Data transfer underlength error"},
264	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
265	"FFF4: Data transfer overlength error"},
266	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
267	"3400: Logical unit failure"},
268	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
269	"FFF4: Device microcode is corrupt"},
270	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
271	"8150: PCI bus error"},
272	{0x04430000, 1, 0,
273	"Unsupported device bus message received"},
274	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
275	"FFF4: Disk device problem"},
276	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
277	"8150: Permanent IOA failure"},
278	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
279	"3010: Disk device returned wrong response to IOA"},
280	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
281	"8151: IOA microcode error"},
282	{0x04448500, 0, 0,
283	"Device bus status error"},
284	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
285	"8157: IOA error requiring IOA reset to recover"},
286	{0x04448700, 0, 0,
287	"ATA device status error"},
288	{0x04490000, 0, 0,
289	"Message reject received from the device"},
290	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
291	"8008: A permanent cache battery pack failure occurred"},
292	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
293	"9090: Disk unit has been modified after the last known status"},
294	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
295	"9081: IOA detected device error"},
296	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
297	"9082: IOA detected device error"},
298	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
299	"3110: Device bus error, message or command phase"},
300	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
301	"3110: SAS Command / Task Management Function failed"},
302	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
303	"9091: Incorrect hardware configuration change has been detected"},
304	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
305	"9073: Invalid multi-adapter configuration"},
306	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
307	"4010: Incorrect connection between cascaded expanders"},
308	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
309	"4020: Connections exceed IOA design limits"},
310	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
311	"4030: Incorrect multipath connection"},
312	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
313	"4110: Unsupported enclosure function"},
314	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
315	"FFF4: Command to logical unit failed"},
316	{0x05240000, 1, 0,
317	"Illegal request, invalid request type or request packet"},
318	{0x05250000, 0, 0,
319	"Illegal request, invalid resource handle"},
320	{0x05258000, 0, 0,
321	"Illegal request, commands not allowed to this device"},
322	{0x05258100, 0, 0,
323	"Illegal request, command not allowed to a secondary adapter"},
324	{0x05260000, 0, 0,
325	"Illegal request, invalid field in parameter list"},
326	{0x05260100, 0, 0,
327	"Illegal request, parameter not supported"},
328	{0x05260200, 0, 0,
329	"Illegal request, parameter value invalid"},
330	{0x052C0000, 0, 0,
331	"Illegal request, command sequence error"},
332	{0x052C8000, 1, 0,
333	"Illegal request, dual adapter support not enabled"},
334	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
335	"9031: Array protection temporarily suspended, protection resuming"},
336	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
337	"9040: Array protection temporarily suspended, protection resuming"},
338	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
339	"3140: Device bus not ready to ready transition"},
340	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
341	"FFFB: SCSI bus was reset"},
342	{0x06290500, 0, 0,
343	"FFFE: SCSI bus transition to single ended"},
344	{0x06290600, 0, 0,
345	"FFFE: SCSI bus transition to LVD"},
346	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
347	"FFFB: SCSI bus was reset by another initiator"},
348	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
349	"3029: A device replacement has occurred"},
350	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
351	"9051: IOA cache data exists for a missing or failed device"},
352	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
353	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
354	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
355	"9025: Disk unit is not supported at its physical location"},
356	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
357	"3020: IOA detected a SCSI bus configuration error"},
358	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
359	"3150: SCSI bus configuration error"},
360	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
361	"9074: Asymmetric advanced function disk configuration"},
362	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
363	"4040: Incomplete multipath connection between IOA and enclosure"},
364	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
365	"4041: Incomplete multipath connection between enclosure and device"},
366	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
367	"9075: Incomplete multipath connection between IOA and remote IOA"},
368	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
369	"9076: Configuration error, missing remote IOA"},
370	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
371	"4050: Enclosure does not support a required multipath function"},
372	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
373	"4070: Logically bad block written on device"},
374	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
375	"9041: Array protection temporarily suspended"},
376	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
377	"9042: Corrupt array parity detected on specified device"},
378	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
379	"9030: Array no longer protected due to missing or failed disk unit"},
380	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
381	"9071: Link operational transition"},
382	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
383	"9072: Link not operational transition"},
384	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
385	"9032: Array exposed but still protected"},
386	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
387	"70DD: Device forced failed by disrupt device command"},
388	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
389	"4061: Multipath redundancy level got better"},
390	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
391	"4060: Multipath redundancy level got worse"},
392	{0x07270000, 0, 0,
393	"Failure due to other device"},
394	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
395	"9008: IOA does not support functions expected by devices"},
396	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
397	"9010: Cache data associated with attached devices cannot be found"},
398	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
399	"9011: Cache data belongs to devices other than those attached"},
400	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
401	"9020: Array missing 2 or more devices with only 1 device present"},
402	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
403	"9021: Array missing 2 or more devices with 2 or more devices present"},
404	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
405	"9022: Exposed array is missing a required device"},
406	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
407	"9023: Array member(s) not at required physical locations"},
408	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
409	"9024: Array not functional due to present hardware configuration"},
410	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
411	"9026: Array not functional due to present hardware configuration"},
412	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
413	"9027: Array is missing a device and parity is out of sync"},
414	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
415	"9028: Maximum number of arrays already exist"},
416	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
417	"9050: Required cache data cannot be located for a disk unit"},
418	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
419	"9052: Cache data exists for a device that has been modified"},
420	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
421	"9054: IOA resources not available due to previous problems"},
422	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
423	"9092: Disk unit requires initialization before use"},
424	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
425	"9029: Incorrect hardware configuration change has been detected"},
426	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
427	"9060: One or more disk pairs are missing from an array"},
428	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
429	"9061: One or more disks are missing from an array"},
430	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
431	"9062: One or more disks are missing from an array"},
432	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
433	"9063: Maximum number of functional arrays has been exceeded"},
434	{0x0B260000, 0, 0,
435	"Aborted command, invalid descriptor"},
436	{0x0B5A0000, 0, 0,
437	"Command terminated by host"}
438};
439
440static const struct ipr_ses_table_entry ipr_ses_table[] = {
441	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
442	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
443	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
444	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
445	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
446	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
447	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
448	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
449	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
450	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
451	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
452	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
453	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
454};
455
456/*
457 *  Function Prototypes
458 */
459static int ipr_reset_alert(struct ipr_cmnd *);
460static void ipr_process_ccn(struct ipr_cmnd *);
461static void ipr_process_error(struct ipr_cmnd *);
462static void ipr_reset_ioa_job(struct ipr_cmnd *);
463static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
464				   enum ipr_shutdown_type);
465
466#ifdef CONFIG_SCSI_IPR_TRACE
467/**
468 * ipr_trc_hook - Add a trace entry to the driver trace
469 * @ipr_cmd:	ipr command struct
470 * @type:		trace type
471 * @add_data:	additional data
472 *
473 * Return value:
474 * 	none
475 **/
476static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
477			 u8 type, u32 add_data)
478{
479	struct ipr_trace_entry *trace_entry;
480	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
481
482	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
483	trace_entry->time = jiffies;
484	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
485	trace_entry->type = type;
486	if (ipr_cmd->ioa_cfg->sis64)
487		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
488	else
489		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
490	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
491	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
492	trace_entry->u.add_data = add_data;
493}
494#else
495#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
496#endif
497
498/**
499 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
500 * @ipr_cmd:	ipr command struct
501 *
502 * Return value:
503 * 	none
504 **/
505static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
506{
507	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
508	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
509	dma_addr_t dma_addr = ipr_cmd->dma_addr;
510
511	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
512	ioarcb->data_transfer_length = 0;
513	ioarcb->read_data_transfer_length = 0;
514	ioarcb->ioadl_len = 0;
515	ioarcb->read_ioadl_len = 0;
516
517	if (ipr_cmd->ioa_cfg->sis64)
518		ioarcb->u.sis64_addr_data.data_ioadl_addr =
519			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
520	else {
521		ioarcb->write_ioadl_addr =
522			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
523		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
524	}
525
526	ioasa->ioasc = 0;
527	ioasa->residual_data_len = 0;
528	ioasa->u.gata.status = 0;
529
530	ipr_cmd->scsi_cmd = NULL;
531	ipr_cmd->qc = NULL;
532	ipr_cmd->sense_buffer[0] = 0;
533	ipr_cmd->dma_use_sg = 0;
534}
535
536/**
537 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
538 * @ipr_cmd:	ipr command struct
539 *
540 * Return value:
541 * 	none
542 **/
543static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
544{
545	ipr_reinit_ipr_cmnd(ipr_cmd);
546	ipr_cmd->u.scratch = 0;
547	ipr_cmd->sibling = NULL;
548	init_timer(&ipr_cmd->timer);
549}
550
551/**
552 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
553 * @ioa_cfg:	ioa config struct
554 *
555 * Return value:
556 * 	pointer to ipr command struct
557 **/
558static
559struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
560{
561	struct ipr_cmnd *ipr_cmd;
562
563	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
564	list_del(&ipr_cmd->queue);
565	ipr_init_ipr_cmnd(ipr_cmd);
566
567	return ipr_cmd;
568}
569
570/**
571 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
572 * @ioa_cfg:	ioa config struct
573 * @clr_ints:     interrupts to clear
574 *
575 * This function masks all interrupts on the adapter, then clears the
576 * interrupts specified in the mask
577 *
578 * Return value:
579 * 	none
580 **/
581static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
582					  u32 clr_ints)
583{
584	volatile u32 int_reg;
585
586	/* Stop new interrupts */
587	ioa_cfg->allow_interrupts = 0;
588
589	/* Set interrupt mask to stop all new interrupts */
590	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
591
592	/* Clear any pending interrupts */
593	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
594	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
595}
596
597/**
598 * ipr_save_pcix_cmd_reg - Save PCI-X command register
599 * @ioa_cfg:	ioa config struct
600 *
601 * Return value:
602 * 	0 on success / -EIO on failure
603 **/
604static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
605{
606	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
607
608	if (pcix_cmd_reg == 0)
609		return 0;
610
611	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
612				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
613		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
614		return -EIO;
615	}
616
617	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
618	return 0;
619}
620
621/**
622 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
623 * @ioa_cfg:	ioa config struct
624 *
625 * Return value:
626 * 	0 on success / -EIO on failure
627 **/
628static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
629{
630	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
631
632	if (pcix_cmd_reg) {
633		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
634					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
635			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
636			return -EIO;
637		}
638	}
639
640	return 0;
641}
642
643/**
644 * ipr_sata_eh_done - done function for aborted SATA commands
645 * @ipr_cmd:	ipr command struct
646 *
647 * This function is invoked for ops generated to SATA
648 * devices which are being aborted.
649 *
650 * Return value:
651 * 	none
652 **/
653static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
654{
655	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
656	struct ata_queued_cmd *qc = ipr_cmd->qc;
657	struct ipr_sata_port *sata_port = qc->ap->private_data;
658
659	qc->err_mask |= AC_ERR_OTHER;
660	sata_port->ioasa.status |= ATA_BUSY;
661	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
662	ata_qc_complete(qc);
663}
664
665/**
666 * ipr_scsi_eh_done - mid-layer done function for aborted ops
667 * @ipr_cmd:	ipr command struct
668 *
669 * This function is invoked by the interrupt handler for
670 * ops generated by the SCSI mid-layer which are being aborted.
671 *
672 * Return value:
673 * 	none
674 **/
675static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
676{
677	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
678	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
679
680	scsi_cmd->result |= (DID_ERROR << 16);
681
682	scsi_dma_unmap(ipr_cmd->scsi_cmd);
683	scsi_cmd->scsi_done(scsi_cmd);
684	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
685}
686
687/**
688 * ipr_fail_all_ops - Fails all outstanding ops.
689 * @ioa_cfg:	ioa config struct
690 *
691 * This function fails all outstanding ops.
692 *
693 * Return value:
694 * 	none
695 **/
696static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
697{
698	struct ipr_cmnd *ipr_cmd, *temp;
699
700	ENTER;
701	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
702		list_del(&ipr_cmd->queue);
703
704		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
705		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
706
707		if (ipr_cmd->scsi_cmd)
708			ipr_cmd->done = ipr_scsi_eh_done;
709		else if (ipr_cmd->qc)
710			ipr_cmd->done = ipr_sata_eh_done;
711
712		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
713		del_timer(&ipr_cmd->timer);
714		ipr_cmd->done(ipr_cmd);
715	}
716
717	LEAVE;
718}
719
720/**
721 * ipr_send_command -  Send driver initiated requests.
722 * @ipr_cmd:		ipr command struct
723 *
724 * This function sends a command to the adapter using the correct write call.
725 * In the case of sis64, calculate the ioarcb size required. Then or in the
726 * appropriate bits.
727 *
728 * Return value:
729 * 	none
730 **/
731static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
732{
733	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
734	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
735
736	if (ioa_cfg->sis64) {
737		/* The default size is 256 bytes */
738		send_dma_addr |= 0x1;
739
740		/* If the number of ioadls * size of ioadl > 128 bytes,
741		   then use a 512 byte ioarcb */
742		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
743			send_dma_addr |= 0x4;
744		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
745	} else
746		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
747}
748
749/**
750 * ipr_do_req -  Send driver initiated requests.
751 * @ipr_cmd:		ipr command struct
752 * @done:			done function
753 * @timeout_func:	timeout function
754 * @timeout:		timeout value
755 *
756 * This function sends the specified command to the adapter with the
757 * timeout given. The done function is invoked on command completion.
758 *
759 * Return value:
760 * 	none
761 **/
762static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
763		       void (*done) (struct ipr_cmnd *),
764		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
765{
766	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
767
768	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
769
770	ipr_cmd->done = done;
771
772	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
773	ipr_cmd->timer.expires = jiffies + timeout;
774	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
775
776	add_timer(&ipr_cmd->timer);
777
778	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
779
780	mb();
781
782	ipr_send_command(ipr_cmd);
783}
784
785/**
786 * ipr_internal_cmd_done - Op done function for an internally generated op.
787 * @ipr_cmd:	ipr command struct
788 *
789 * This function is the op done function for an internally generated,
790 * blocking op. It simply wakes the sleeping thread.
791 *
792 * Return value:
793 * 	none
794 **/
795static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
796{
797	if (ipr_cmd->sibling)
798		ipr_cmd->sibling = NULL;
799	else
800		complete(&ipr_cmd->completion);
801}
802
803/**
804 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
805 * @ipr_cmd:	ipr command struct
806 * @dma_addr:	dma address
807 * @len:	transfer length
808 * @flags:	ioadl flag value
809 *
810 * This function initializes an ioadl in the case where there is only a single
811 * descriptor.
812 *
813 * Return value:
814 * 	nothing
815 **/
816static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
817			   u32 len, int flags)
818{
819	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
820	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
821
822	ipr_cmd->dma_use_sg = 1;
823
824	if (ipr_cmd->ioa_cfg->sis64) {
825		ioadl64->flags = cpu_to_be32(flags);
826		ioadl64->data_len = cpu_to_be32(len);
827		ioadl64->address = cpu_to_be64(dma_addr);
828
829		ipr_cmd->ioarcb.ioadl_len =
830		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
831		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
832	} else {
833		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
834		ioadl->address = cpu_to_be32(dma_addr);
835
836		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
837			ipr_cmd->ioarcb.read_ioadl_len =
838				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
839			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
840		} else {
841			ipr_cmd->ioarcb.ioadl_len =
842			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
843			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
844		}
845	}
846}
847
848/**
849 * ipr_send_blocking_cmd - Send command and sleep on its completion.
850 * @ipr_cmd:	ipr command struct
851 * @timeout_func:	function to invoke if command times out
852 * @timeout:	timeout
853 *
854 * Return value:
855 * 	none
856 **/
857static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
858				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
859				  u32 timeout)
860{
861	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
862
863	init_completion(&ipr_cmd->completion);
864	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
865
866	spin_unlock_irq(ioa_cfg->host->host_lock);
867	wait_for_completion(&ipr_cmd->completion);
868	spin_lock_irq(ioa_cfg->host->host_lock);
869}
870
871/**
872 * ipr_send_hcam - Send an HCAM to the adapter.
873 * @ioa_cfg:	ioa config struct
874 * @type:		HCAM type
875 * @hostrcb:	hostrcb struct
876 *
877 * This function will send a Host Controlled Async command to the adapter.
878 * If HCAMs are currently not allowed to be issued to the adapter, it will
879 * place the hostrcb on the free queue.
880 *
881 * Return value:
882 * 	none
883 **/
884static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
885			  struct ipr_hostrcb *hostrcb)
886{
887	struct ipr_cmnd *ipr_cmd;
888	struct ipr_ioarcb *ioarcb;
889
890	if (ioa_cfg->allow_cmds) {
891		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
892		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
893		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
894
895		ipr_cmd->u.hostrcb = hostrcb;
896		ioarcb = &ipr_cmd->ioarcb;
897
898		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
899		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
900		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
901		ioarcb->cmd_pkt.cdb[1] = type;
902		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
903		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
904
905		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
906			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
907
908		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
909			ipr_cmd->done = ipr_process_ccn;
910		else
911			ipr_cmd->done = ipr_process_error;
912
913		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
914
915		mb();
916
917		ipr_send_command(ipr_cmd);
918	} else {
919		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
920	}
921}
922
923/**
924 * ipr_init_res_entry - Initialize a resource entry struct.
925 * @res:	resource entry struct
926 *
927 * Return value:
928 * 	none
929 **/
930static void ipr_init_res_entry(struct ipr_resource_entry *res)
931{
932	res->needs_sync_complete = 0;
933	res->in_erp = 0;
934	res->add_to_ml = 0;
935	res->del_from_ml = 0;
936	res->resetting_device = 0;
937	res->sdev = NULL;
938	res->sata_port = NULL;
939}
940
941/**
942 * ipr_handle_config_change - Handle a config change from the adapter
943 * @ioa_cfg:	ioa config struct
944 * @hostrcb:	hostrcb
945 *
946 * Return value:
947 * 	none
948 **/
949static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
950			      struct ipr_hostrcb *hostrcb)
951{
952	struct ipr_resource_entry *res = NULL;
953	struct ipr_config_table_entry *cfgte;
954	u32 is_ndn = 1;
955
956	cfgte = &hostrcb->hcam.u.ccn.cfgte;
957
958	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
959		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
960			    sizeof(cfgte->res_addr))) {
961			is_ndn = 0;
962			break;
963		}
964	}
965
966	if (is_ndn) {
967		if (list_empty(&ioa_cfg->free_res_q)) {
968			ipr_send_hcam(ioa_cfg,
969				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
970				      hostrcb);
971			return;
972		}
973
974		res = list_entry(ioa_cfg->free_res_q.next,
975				 struct ipr_resource_entry, queue);
976
977		list_del(&res->queue);
978		ipr_init_res_entry(res);
979		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
980	}
981
982	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
983
984	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
985		if (res->sdev) {
986			res->del_from_ml = 1;
987			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
988			if (ioa_cfg->allow_ml_add_del)
989				schedule_work(&ioa_cfg->work_q);
990		} else
991			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
992	} else if (!res->sdev) {
993		res->add_to_ml = 1;
994		if (ioa_cfg->allow_ml_add_del)
995			schedule_work(&ioa_cfg->work_q);
996	}
997
998	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
999}
1000
1001/**
1002 * ipr_process_ccn - Op done function for a CCN.
1003 * @ipr_cmd:	ipr command struct
1004 *
1005 * This function is the op done function for a configuration
1006 * change notification host controlled async from the adapter.
1007 *
1008 * Return value:
1009 * 	none
1010 **/
1011static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1012{
1013	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1014	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1015	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1016
1017	list_del(&hostrcb->queue);
1018	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1019
1020	if (ioasc) {
1021		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1022			dev_err(&ioa_cfg->pdev->dev,
1023				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1024
1025		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1026	} else {
1027		ipr_handle_config_change(ioa_cfg, hostrcb);
1028	}
1029}
1030
1031/**
1032 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1033 * @i:		index into buffer
1034 * @buf:		string to modify
1035 *
1036 * This function will strip all trailing whitespace, pad the end
1037 * of the string with a single space, and NULL terminate the string.
1038 *
1039 * Return value:
1040 * 	new length of string
1041 **/
1042static int strip_and_pad_whitespace(int i, char *buf)
1043{
1044	while (i && buf[i] == ' ')
1045		i--;
1046	buf[i+1] = ' ';
1047	buf[i+2] = '\0';
1048	return i + 2;
1049}
1050
1051/**
1052 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1053 * @prefix:		string to print at start of printk
1054 * @hostrcb:	hostrcb pointer
1055 * @vpd:		vendor/product id/sn struct
1056 *
1057 * Return value:
1058 * 	none
1059 **/
1060static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1061				struct ipr_vpd *vpd)
1062{
1063	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1064	int i = 0;
1065
1066	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1067	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1068
1069	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1070	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1071
1072	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1073	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1074
1075	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1076}
1077
1078/**
1079 * ipr_log_vpd - Log the passed VPD to the error log.
1080 * @vpd:		vendor/product id/sn struct
1081 *
1082 * Return value:
1083 * 	none
1084 **/
1085static void ipr_log_vpd(struct ipr_vpd *vpd)
1086{
1087	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1088		    + IPR_SERIAL_NUM_LEN];
1089
1090	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1091	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1092	       IPR_PROD_ID_LEN);
1093	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1094	ipr_err("Vendor/Product ID: %s\n", buffer);
1095
1096	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1097	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1098	ipr_err("    Serial Number: %s\n", buffer);
1099}
1100
1101/**
1102 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1103 * @prefix:		string to print at start of printk
1104 * @hostrcb:	hostrcb pointer
1105 * @vpd:		vendor/product id/sn/wwn struct
1106 *
1107 * Return value:
1108 * 	none
1109 **/
1110static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1111				    struct ipr_ext_vpd *vpd)
1112{
1113	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1114	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1115		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1116}
1117
1118/**
1119 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1120 * @vpd:		vendor/product id/sn/wwn struct
1121 *
1122 * Return value:
1123 * 	none
1124 **/
1125static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1126{
1127	ipr_log_vpd(&vpd->vpd);
1128	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1129		be32_to_cpu(vpd->wwid[1]));
1130}
1131
1132/**
1133 * ipr_log_enhanced_cache_error - Log a cache error.
1134 * @ioa_cfg:	ioa config struct
1135 * @hostrcb:	hostrcb struct
1136 *
1137 * Return value:
1138 * 	none
1139 **/
1140static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1141					 struct ipr_hostrcb *hostrcb)
1142{
1143	struct ipr_hostrcb_type_12_error *error =
1144		&hostrcb->hcam.u.error.u.type_12_error;
1145
1146	ipr_err("-----Current Configuration-----\n");
1147	ipr_err("Cache Directory Card Information:\n");
1148	ipr_log_ext_vpd(&error->ioa_vpd);
1149	ipr_err("Adapter Card Information:\n");
1150	ipr_log_ext_vpd(&error->cfc_vpd);
1151
1152	ipr_err("-----Expected Configuration-----\n");
1153	ipr_err("Cache Directory Card Information:\n");
1154	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1155	ipr_err("Adapter Card Information:\n");
1156	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1157
1158	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1159		     be32_to_cpu(error->ioa_data[0]),
1160		     be32_to_cpu(error->ioa_data[1]),
1161		     be32_to_cpu(error->ioa_data[2]));
1162}
1163
1164/**
1165 * ipr_log_cache_error - Log a cache error.
1166 * @ioa_cfg:	ioa config struct
1167 * @hostrcb:	hostrcb struct
1168 *
1169 * Return value:
1170 * 	none
1171 **/
1172static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1173				struct ipr_hostrcb *hostrcb)
1174{
1175	struct ipr_hostrcb_type_02_error *error =
1176		&hostrcb->hcam.u.error.u.type_02_error;
1177
1178	ipr_err("-----Current Configuration-----\n");
1179	ipr_err("Cache Directory Card Information:\n");
1180	ipr_log_vpd(&error->ioa_vpd);
1181	ipr_err("Adapter Card Information:\n");
1182	ipr_log_vpd(&error->cfc_vpd);
1183
1184	ipr_err("-----Expected Configuration-----\n");
1185	ipr_err("Cache Directory Card Information:\n");
1186	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1187	ipr_err("Adapter Card Information:\n");
1188	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1189
1190	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1191		     be32_to_cpu(error->ioa_data[0]),
1192		     be32_to_cpu(error->ioa_data[1]),
1193		     be32_to_cpu(error->ioa_data[2]));
1194}
1195
1196/**
1197 * ipr_log_enhanced_config_error - Log a configuration error.
1198 * @ioa_cfg:	ioa config struct
1199 * @hostrcb:	hostrcb struct
1200 *
1201 * Return value:
1202 * 	none
1203 **/
1204static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1205					  struct ipr_hostrcb *hostrcb)
1206{
1207	int errors_logged, i;
1208	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1209	struct ipr_hostrcb_type_13_error *error;
1210
1211	error = &hostrcb->hcam.u.error.u.type_13_error;
1212	errors_logged = be32_to_cpu(error->errors_logged);
1213
1214	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1215		be32_to_cpu(error->errors_detected), errors_logged);
1216
1217	dev_entry = error->dev;
1218
1219	for (i = 0; i < errors_logged; i++, dev_entry++) {
1220		ipr_err_separator;
1221
1222		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1223		ipr_log_ext_vpd(&dev_entry->vpd);
1224
1225		ipr_err("-----New Device Information-----\n");
1226		ipr_log_ext_vpd(&dev_entry->new_vpd);
1227
1228		ipr_err("Cache Directory Card Information:\n");
1229		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1230
1231		ipr_err("Adapter Card Information:\n");
1232		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1233	}
1234}
1235
1236/**
1237 * ipr_log_config_error - Log a configuration error.
1238 * @ioa_cfg:	ioa config struct
1239 * @hostrcb:	hostrcb struct
1240 *
1241 * Return value:
1242 * 	none
1243 **/
1244static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1245				 struct ipr_hostrcb *hostrcb)
1246{
1247	int errors_logged, i;
1248	struct ipr_hostrcb_device_data_entry *dev_entry;
1249	struct ipr_hostrcb_type_03_error *error;
1250
1251	error = &hostrcb->hcam.u.error.u.type_03_error;
1252	errors_logged = be32_to_cpu(error->errors_logged);
1253
1254	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1255		be32_to_cpu(error->errors_detected), errors_logged);
1256
1257	dev_entry = error->dev;
1258
1259	for (i = 0; i < errors_logged; i++, dev_entry++) {
1260		ipr_err_separator;
1261
1262		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1263		ipr_log_vpd(&dev_entry->vpd);
1264
1265		ipr_err("-----New Device Information-----\n");
1266		ipr_log_vpd(&dev_entry->new_vpd);
1267
1268		ipr_err("Cache Directory Card Information:\n");
1269		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1270
1271		ipr_err("Adapter Card Information:\n");
1272		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1273
1274		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1275			be32_to_cpu(dev_entry->ioa_data[0]),
1276			be32_to_cpu(dev_entry->ioa_data[1]),
1277			be32_to_cpu(dev_entry->ioa_data[2]),
1278			be32_to_cpu(dev_entry->ioa_data[3]),
1279			be32_to_cpu(dev_entry->ioa_data[4]));
1280	}
1281}
1282
1283/**
1284 * ipr_log_enhanced_array_error - Log an array configuration error.
1285 * @ioa_cfg:	ioa config struct
1286 * @hostrcb:	hostrcb struct
1287 *
1288 * Return value:
1289 * 	none
1290 **/
1291static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1292					 struct ipr_hostrcb *hostrcb)
1293{
1294	int i, num_entries;
1295	struct ipr_hostrcb_type_14_error *error;
1296	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1297	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1298
1299	error = &hostrcb->hcam.u.error.u.type_14_error;
1300
1301	ipr_err_separator;
1302
1303	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1304		error->protection_level,
1305		ioa_cfg->host->host_no,
1306		error->last_func_vset_res_addr.bus,
1307		error->last_func_vset_res_addr.target,
1308		error->last_func_vset_res_addr.lun);
1309
1310	ipr_err_separator;
1311
1312	array_entry = error->array_member;
1313	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1314			    sizeof(error->array_member));
1315
1316	for (i = 0; i < num_entries; i++, array_entry++) {
1317		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1318			continue;
1319
1320		if (be32_to_cpu(error->exposed_mode_adn) == i)
1321			ipr_err("Exposed Array Member %d:\n", i);
1322		else
1323			ipr_err("Array Member %d:\n", i);
1324
1325		ipr_log_ext_vpd(&array_entry->vpd);
1326		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1327		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1328				 "Expected Location");
1329
1330		ipr_err_separator;
1331	}
1332}
1333
1334/**
1335 * ipr_log_array_error - Log an array configuration error.
1336 * @ioa_cfg:	ioa config struct
1337 * @hostrcb:	hostrcb struct
1338 *
1339 * Return value:
1340 * 	none
1341 **/
1342static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1343				struct ipr_hostrcb *hostrcb)
1344{
1345	int i;
1346	struct ipr_hostrcb_type_04_error *error;
1347	struct ipr_hostrcb_array_data_entry *array_entry;
1348	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1349
1350	error = &hostrcb->hcam.u.error.u.type_04_error;
1351
1352	ipr_err_separator;
1353
1354	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1355		error->protection_level,
1356		ioa_cfg->host->host_no,
1357		error->last_func_vset_res_addr.bus,
1358		error->last_func_vset_res_addr.target,
1359		error->last_func_vset_res_addr.lun);
1360
1361	ipr_err_separator;
1362
1363	array_entry = error->array_member;
1364
1365	for (i = 0; i < 18; i++) {
1366		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1367			continue;
1368
1369		if (be32_to_cpu(error->exposed_mode_adn) == i)
1370			ipr_err("Exposed Array Member %d:\n", i);
1371		else
1372			ipr_err("Array Member %d:\n", i);
1373
1374		ipr_log_vpd(&array_entry->vpd);
1375
1376		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1377		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1378				 "Expected Location");
1379
1380		ipr_err_separator;
1381
1382		if (i == 9)
1383			array_entry = error->array_member2;
1384		else
1385			array_entry++;
1386	}
1387}
1388
1389/**
1390 * ipr_log_hex_data - Log additional hex IOA error data.
1391 * @ioa_cfg:	ioa config struct
1392 * @data:		IOA error data
1393 * @len:		data length
1394 *
1395 * Return value:
1396 * 	none
1397 **/
1398static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1399{
1400	int i;
1401
1402	if (len == 0)
1403		return;
1404
1405	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1406		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1407
1408	for (i = 0; i < len / 4; i += 4) {
1409		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1410			be32_to_cpu(data[i]),
1411			be32_to_cpu(data[i+1]),
1412			be32_to_cpu(data[i+2]),
1413			be32_to_cpu(data[i+3]));
1414	}
1415}
1416
1417/**
1418 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1419 * @ioa_cfg:	ioa config struct
1420 * @hostrcb:	hostrcb struct
1421 *
1422 * Return value:
1423 * 	none
1424 **/
1425static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1426					    struct ipr_hostrcb *hostrcb)
1427{
1428	struct ipr_hostrcb_type_17_error *error;
1429
1430	error = &hostrcb->hcam.u.error.u.type_17_error;
1431	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1432	strim(error->failure_reason);
1433
1434	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1435		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1436	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1437	ipr_log_hex_data(ioa_cfg, error->data,
1438			 be32_to_cpu(hostrcb->hcam.length) -
1439			 (offsetof(struct ipr_hostrcb_error, u) +
1440			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1441}
1442
1443/**
1444 * ipr_log_dual_ioa_error - Log a dual adapter error.
1445 * @ioa_cfg:	ioa config struct
1446 * @hostrcb:	hostrcb struct
1447 *
1448 * Return value:
1449 * 	none
1450 **/
1451static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1452				   struct ipr_hostrcb *hostrcb)
1453{
1454	struct ipr_hostrcb_type_07_error *error;
1455
1456	error = &hostrcb->hcam.u.error.u.type_07_error;
1457	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1458	strim(error->failure_reason);
1459
1460	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1461		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1462	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1463	ipr_log_hex_data(ioa_cfg, error->data,
1464			 be32_to_cpu(hostrcb->hcam.length) -
1465			 (offsetof(struct ipr_hostrcb_error, u) +
1466			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1467}
1468
1469static const struct {
1470	u8 active;
1471	char *desc;
1472} path_active_desc[] = {
1473	{ IPR_PATH_NO_INFO, "Path" },
1474	{ IPR_PATH_ACTIVE, "Active path" },
1475	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1476};
1477
1478static const struct {
1479	u8 state;
1480	char *desc;
1481} path_state_desc[] = {
1482	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1483	{ IPR_PATH_HEALTHY, "is healthy" },
1484	{ IPR_PATH_DEGRADED, "is degraded" },
1485	{ IPR_PATH_FAILED, "is failed" }
1486};
1487
1488/**
1489 * ipr_log_fabric_path - Log a fabric path error
1490 * @hostrcb:	hostrcb struct
1491 * @fabric:		fabric descriptor
1492 *
1493 * Return value:
1494 * 	none
1495 **/
1496static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1497				struct ipr_hostrcb_fabric_desc *fabric)
1498{
1499	int i, j;
1500	u8 path_state = fabric->path_state;
1501	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1502	u8 state = path_state & IPR_PATH_STATE_MASK;
1503
1504	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1505		if (path_active_desc[i].active != active)
1506			continue;
1507
1508		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1509			if (path_state_desc[j].state != state)
1510				continue;
1511
1512			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1513				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1514					     path_active_desc[i].desc, path_state_desc[j].desc,
1515					     fabric->ioa_port);
1516			} else if (fabric->cascaded_expander == 0xff) {
1517				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1518					     path_active_desc[i].desc, path_state_desc[j].desc,
1519					     fabric->ioa_port, fabric->phy);
1520			} else if (fabric->phy == 0xff) {
1521				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1522					     path_active_desc[i].desc, path_state_desc[j].desc,
1523					     fabric->ioa_port, fabric->cascaded_expander);
1524			} else {
1525				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1526					     path_active_desc[i].desc, path_state_desc[j].desc,
1527					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1528			}
1529			return;
1530		}
1531	}
1532
1533	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1534		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1535}
1536
1537static const struct {
1538	u8 type;
1539	char *desc;
1540} path_type_desc[] = {
1541	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1542	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1543	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1544	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1545};
1546
1547static const struct {
1548	u8 status;
1549	char *desc;
1550} path_status_desc[] = {
1551	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1552	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1553	{ IPR_PATH_CFG_FAILED, "Failed" },
1554	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1555	{ IPR_PATH_NOT_DETECTED, "Missing" },
1556	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1557};
1558
1559static const char *link_rate[] = {
1560	"unknown",
1561	"disabled",
1562	"phy reset problem",
1563	"spinup hold",
1564	"port selector",
1565	"unknown",
1566	"unknown",
1567	"unknown",
1568	"1.5Gbps",
1569	"3.0Gbps",
1570	"unknown",
1571	"unknown",
1572	"unknown",
1573	"unknown",
1574	"unknown",
1575	"unknown"
1576};
1577
1578/**
1579 * ipr_log_path_elem - Log a fabric path element.
1580 * @hostrcb:	hostrcb struct
1581 * @cfg:		fabric path element struct
1582 *
1583 * Return value:
1584 * 	none
1585 **/
1586static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1587			      struct ipr_hostrcb_config_element *cfg)
1588{
1589	int i, j;
1590	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1591	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1592
1593	if (type == IPR_PATH_CFG_NOT_EXIST)
1594		return;
1595
1596	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1597		if (path_type_desc[i].type != type)
1598			continue;
1599
1600		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1601			if (path_status_desc[j].status != status)
1602				continue;
1603
1604			if (type == IPR_PATH_CFG_IOA_PORT) {
1605				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1606					     path_status_desc[j].desc, path_type_desc[i].desc,
1607					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1608					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1609			} else {
1610				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1611					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1612						     path_status_desc[j].desc, path_type_desc[i].desc,
1613						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1614						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1615				} else if (cfg->cascaded_expander == 0xff) {
1616					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1617						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1618						     path_type_desc[i].desc, cfg->phy,
1619						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1620						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1621				} else if (cfg->phy == 0xff) {
1622					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1623						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1624						     path_type_desc[i].desc, cfg->cascaded_expander,
1625						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1626						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1627				} else {
1628					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1629						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1630						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1631						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1632						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1633				}
1634			}
1635			return;
1636		}
1637	}
1638
1639	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1640		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1641		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1642		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1643}
1644
1645/**
1646 * ipr_log_fabric_error - Log a fabric error.
1647 * @ioa_cfg:	ioa config struct
1648 * @hostrcb:	hostrcb struct
1649 *
1650 * Return value:
1651 * 	none
1652 **/
1653static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1654				 struct ipr_hostrcb *hostrcb)
1655{
1656	struct ipr_hostrcb_type_20_error *error;
1657	struct ipr_hostrcb_fabric_desc *fabric;
1658	struct ipr_hostrcb_config_element *cfg;
1659	int i, add_len;
1660
1661	error = &hostrcb->hcam.u.error.u.type_20_error;
1662	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1663	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1664
1665	add_len = be32_to_cpu(hostrcb->hcam.length) -
1666		(offsetof(struct ipr_hostrcb_error, u) +
1667		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1668
1669	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1670		ipr_log_fabric_path(hostrcb, fabric);
1671		for_each_fabric_cfg(fabric, cfg)
1672			ipr_log_path_elem(hostrcb, cfg);
1673
1674		add_len -= be16_to_cpu(fabric->length);
1675		fabric = (struct ipr_hostrcb_fabric_desc *)
1676			((unsigned long)fabric + be16_to_cpu(fabric->length));
1677	}
1678
1679	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1680}
1681
1682/**
1683 * ipr_log_generic_error - Log an adapter error.
1684 * @ioa_cfg:	ioa config struct
1685 * @hostrcb:	hostrcb struct
1686 *
1687 * Return value:
1688 * 	none
1689 **/
1690static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1691				  struct ipr_hostrcb *hostrcb)
1692{
1693	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1694			 be32_to_cpu(hostrcb->hcam.length));
1695}
1696
1697/**
1698 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1699 * @ioasc:	IOASC
1700 *
1701 * This function will return the index of into the ipr_error_table
1702 * for the specified IOASC. If the IOASC is not in the table,
1703 * 0 will be returned, which points to the entry used for unknown errors.
1704 *
1705 * Return value:
1706 * 	index into the ipr_error_table
1707 **/
1708static u32 ipr_get_error(u32 ioasc)
1709{
1710	int i;
1711
1712	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1713		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1714			return i;
1715
1716	return 0;
1717}
1718
1719/**
1720 * ipr_handle_log_data - Log an adapter error.
1721 * @ioa_cfg:	ioa config struct
1722 * @hostrcb:	hostrcb struct
1723 *
1724 * This function logs an adapter error to the system.
1725 *
1726 * Return value:
1727 * 	none
1728 **/
1729static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1730				struct ipr_hostrcb *hostrcb)
1731{
1732	u32 ioasc;
1733	int error_index;
1734
1735	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1736		return;
1737
1738	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1739		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1740
1741	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1742
1743	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1744	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1745		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1746		scsi_report_bus_reset(ioa_cfg->host,
1747				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1748	}
1749
1750	error_index = ipr_get_error(ioasc);
1751
1752	if (!ipr_error_table[error_index].log_hcam)
1753		return;
1754
1755	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1756
1757	/* Set indication we have logged an error */
1758	ioa_cfg->errors_logged++;
1759
1760	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1761		return;
1762	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1763		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1764
1765	switch (hostrcb->hcam.overlay_id) {
1766	case IPR_HOST_RCB_OVERLAY_ID_2:
1767		ipr_log_cache_error(ioa_cfg, hostrcb);
1768		break;
1769	case IPR_HOST_RCB_OVERLAY_ID_3:
1770		ipr_log_config_error(ioa_cfg, hostrcb);
1771		break;
1772	case IPR_HOST_RCB_OVERLAY_ID_4:
1773	case IPR_HOST_RCB_OVERLAY_ID_6:
1774		ipr_log_array_error(ioa_cfg, hostrcb);
1775		break;
1776	case IPR_HOST_RCB_OVERLAY_ID_7:
1777		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1778		break;
1779	case IPR_HOST_RCB_OVERLAY_ID_12:
1780		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1781		break;
1782	case IPR_HOST_RCB_OVERLAY_ID_13:
1783		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1784		break;
1785	case IPR_HOST_RCB_OVERLAY_ID_14:
1786	case IPR_HOST_RCB_OVERLAY_ID_16:
1787		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1788		break;
1789	case IPR_HOST_RCB_OVERLAY_ID_17:
1790		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1791		break;
1792	case IPR_HOST_RCB_OVERLAY_ID_20:
1793		ipr_log_fabric_error(ioa_cfg, hostrcb);
1794		break;
1795	case IPR_HOST_RCB_OVERLAY_ID_1:
1796	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1797	default:
1798		ipr_log_generic_error(ioa_cfg, hostrcb);
1799		break;
1800	}
1801}
1802
1803/**
1804 * ipr_process_error - Op done function for an adapter error log.
1805 * @ipr_cmd:	ipr command struct
1806 *
1807 * This function is the op done function for an error log host
1808 * controlled async from the adapter. It will log the error and
1809 * send the HCAM back to the adapter.
1810 *
1811 * Return value:
1812 * 	none
1813 **/
1814static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1815{
1816	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1817	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1818	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1819	u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1820
1821	list_del(&hostrcb->queue);
1822	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1823
1824	if (!ioasc) {
1825		ipr_handle_log_data(ioa_cfg, hostrcb);
1826		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1827			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
1828	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1829		dev_err(&ioa_cfg->pdev->dev,
1830			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1831	}
1832
1833	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1834}
1835
1836/**
1837 * ipr_timeout -  An internally generated op has timed out.
1838 * @ipr_cmd:	ipr command struct
1839 *
1840 * This function blocks host requests and initiates an
1841 * adapter reset.
1842 *
1843 * Return value:
1844 * 	none
1845 **/
1846static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1847{
1848	unsigned long lock_flags = 0;
1849	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1850
1851	ENTER;
1852	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1853
1854	ioa_cfg->errors_logged++;
1855	dev_err(&ioa_cfg->pdev->dev,
1856		"Adapter being reset due to command timeout.\n");
1857
1858	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1859		ioa_cfg->sdt_state = GET_DUMP;
1860
1861	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1862		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1863
1864	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1865	LEAVE;
1866}
1867
1868/**
1869 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1870 * @ipr_cmd:	ipr command struct
1871 *
1872 * This function blocks host requests and initiates an
1873 * adapter reset.
1874 *
1875 * Return value:
1876 * 	none
1877 **/
1878static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1879{
1880	unsigned long lock_flags = 0;
1881	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1882
1883	ENTER;
1884	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1885
1886	ioa_cfg->errors_logged++;
1887	dev_err(&ioa_cfg->pdev->dev,
1888		"Adapter timed out transitioning to operational.\n");
1889
1890	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1891		ioa_cfg->sdt_state = GET_DUMP;
1892
1893	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1894		if (ipr_fastfail)
1895			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1896		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1897	}
1898
1899	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1900	LEAVE;
1901}
1902
1903/**
1904 * ipr_reset_reload - Reset/Reload the IOA
1905 * @ioa_cfg:		ioa config struct
1906 * @shutdown_type:	shutdown type
1907 *
1908 * This function resets the adapter and re-initializes it.
1909 * This function assumes that all new host commands have been stopped.
1910 * Return value:
1911 * 	SUCCESS / FAILED
1912 **/
1913static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1914			    enum ipr_shutdown_type shutdown_type)
1915{
1916	if (!ioa_cfg->in_reset_reload)
1917		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1918
1919	spin_unlock_irq(ioa_cfg->host->host_lock);
1920	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1921	spin_lock_irq(ioa_cfg->host->host_lock);
1922
1923	/* If we got hit with a host reset while we were already resetting
1924	 the adapter for some reason, and the reset failed. */
1925	if (ioa_cfg->ioa_is_dead) {
1926		ipr_trace;
1927		return FAILED;
1928	}
1929
1930	return SUCCESS;
1931}
1932
1933/**
1934 * ipr_find_ses_entry - Find matching SES in SES table
1935 * @res:	resource entry struct of SES
1936 *
1937 * Return value:
1938 * 	pointer to SES table entry / NULL on failure
1939 **/
1940static const struct ipr_ses_table_entry *
1941ipr_find_ses_entry(struct ipr_resource_entry *res)
1942{
1943	int i, j, matches;
1944	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1945
1946	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1947		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1948			if (ste->compare_product_id_byte[j] == 'X') {
1949				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1950					matches++;
1951				else
1952					break;
1953			} else
1954				matches++;
1955		}
1956
1957		if (matches == IPR_PROD_ID_LEN)
1958			return ste;
1959	}
1960
1961	return NULL;
1962}
1963
1964/**
1965 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1966 * @ioa_cfg:	ioa config struct
1967 * @bus:		SCSI bus
1968 * @bus_width:	bus width
1969 *
1970 * Return value:
1971 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1972 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1973 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1974 *	max 160MHz = max 320MB/sec).
1975 **/
1976static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1977{
1978	struct ipr_resource_entry *res;
1979	const struct ipr_ses_table_entry *ste;
1980	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1981
1982	/* Loop through each config table entry in the config table buffer */
1983	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1984		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1985			continue;
1986
1987		if (bus != res->cfgte.res_addr.bus)
1988			continue;
1989
1990		if (!(ste = ipr_find_ses_entry(res)))
1991			continue;
1992
1993		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1994	}
1995
1996	return max_xfer_rate;
1997}
1998
1999/**
2000 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2001 * @ioa_cfg:		ioa config struct
2002 * @max_delay:		max delay in micro-seconds to wait
2003 *
2004 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2005 *
2006 * Return value:
2007 * 	0 on success / other on failure
2008 **/
2009static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2010{
2011	volatile u32 pcii_reg;
2012	int delay = 1;
2013
2014	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2015	while (delay < max_delay) {
2016		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2017
2018		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2019			return 0;
2020
2021		/* udelay cannot be used if delay is more than a few milliseconds */
2022		if ((delay / 1000) > MAX_UDELAY_MS)
2023			mdelay(delay / 1000);
2024		else
2025			udelay(delay);
2026
2027		delay += delay;
2028	}
2029	return -EIO;
2030}
2031
2032/**
2033 * ipr_get_ldump_data_section - Dump IOA memory
2034 * @ioa_cfg:			ioa config struct
2035 * @start_addr:			adapter address to dump
2036 * @dest:				destination kernel buffer
2037 * @length_in_words:	length to dump in 4 byte words
2038 *
2039 * Return value:
2040 * 	0 on success / -EIO on failure
2041 **/
2042static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2043				      u32 start_addr,
2044				      __be32 *dest, u32 length_in_words)
2045{
2046	volatile u32 temp_pcii_reg;
2047	int i, delay = 0;
2048
2049	/* Write IOA interrupt reg starting LDUMP state  */
2050	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2051	       ioa_cfg->regs.set_uproc_interrupt_reg);
2052
2053	/* Wait for IO debug acknowledge */
2054	if (ipr_wait_iodbg_ack(ioa_cfg,
2055			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2056		dev_err(&ioa_cfg->pdev->dev,
2057			"IOA dump long data transfer timeout\n");
2058		return -EIO;
2059	}
2060
2061	/* Signal LDUMP interlocked - clear IO debug ack */
2062	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2063	       ioa_cfg->regs.clr_interrupt_reg);
2064
2065	/* Write Mailbox with starting address */
2066	writel(start_addr, ioa_cfg->ioa_mailbox);
2067
2068	/* Signal address valid - clear IOA Reset alert */
2069	writel(IPR_UPROCI_RESET_ALERT,
2070	       ioa_cfg->regs.clr_uproc_interrupt_reg);
2071
2072	for (i = 0; i < length_in_words; i++) {
2073		/* Wait for IO debug acknowledge */
2074		if (ipr_wait_iodbg_ack(ioa_cfg,
2075				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2076			dev_err(&ioa_cfg->pdev->dev,
2077				"IOA dump short data transfer timeout\n");
2078			return -EIO;
2079		}
2080
2081		/* Read data from mailbox and increment destination pointer */
2082		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2083		dest++;
2084
2085		/* For all but the last word of data, signal data received */
2086		if (i < (length_in_words - 1)) {
2087			/* Signal dump data received - Clear IO debug Ack */
2088			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2089			       ioa_cfg->regs.clr_interrupt_reg);
2090		}
2091	}
2092
2093	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2094	writel(IPR_UPROCI_RESET_ALERT,
2095	       ioa_cfg->regs.set_uproc_interrupt_reg);
2096
2097	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2098	       ioa_cfg->regs.clr_uproc_interrupt_reg);
2099
2100	/* Signal dump data received - Clear IO debug Ack */
2101	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2102	       ioa_cfg->regs.clr_interrupt_reg);
2103
2104	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2105	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2106		temp_pcii_reg =
2107		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2108
2109		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2110			return 0;
2111
2112		udelay(10);
2113		delay += 10;
2114	}
2115
2116	return 0;
2117}
2118
2119#ifdef CONFIG_SCSI_IPR_DUMP
2120/**
2121 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2122 * @ioa_cfg:		ioa config struct
2123 * @pci_address:	adapter address
2124 * @length:			length of data to copy
2125 *
2126 * Copy data from PCI adapter to kernel buffer.
2127 * Note: length MUST be a 4 byte multiple
2128 * Return value:
2129 * 	0 on success / other on failure
2130 **/
2131static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2132			unsigned long pci_address, u32 length)
2133{
2134	int bytes_copied = 0;
2135	int cur_len, rc, rem_len, rem_page_len;
2136	__be32 *page;
2137	unsigned long lock_flags = 0;
2138	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2139
2140	while (bytes_copied < length &&
2141	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2142		if (ioa_dump->page_offset >= PAGE_SIZE ||
2143		    ioa_dump->page_offset == 0) {
2144			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2145
2146			if (!page) {
2147				ipr_trace;
2148				return bytes_copied;
2149			}
2150
2151			ioa_dump->page_offset = 0;
2152			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2153			ioa_dump->next_page_index++;
2154		} else
2155			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2156
2157		rem_len = length - bytes_copied;
2158		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2159		cur_len = min(rem_len, rem_page_len);
2160
2161		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2162		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2163			rc = -EIO;
2164		} else {
2165			rc = ipr_get_ldump_data_section(ioa_cfg,
2166							pci_address + bytes_copied,
2167							&page[ioa_dump->page_offset / 4],
2168							(cur_len / sizeof(u32)));
2169		}
2170		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2171
2172		if (!rc) {
2173			ioa_dump->page_offset += cur_len;
2174			bytes_copied += cur_len;
2175		} else {
2176			ipr_trace;
2177			break;
2178		}
2179		schedule();
2180	}
2181
2182	return bytes_copied;
2183}
2184
2185/**
2186 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2187 * @hdr:	dump entry header struct
2188 *
2189 * Return value:
2190 * 	nothing
2191 **/
2192static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2193{
2194	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2195	hdr->num_elems = 1;
2196	hdr->offset = sizeof(*hdr);
2197	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2198}
2199
2200/**
2201 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2202 * @ioa_cfg:	ioa config struct
2203 * @driver_dump:	driver dump struct
2204 *
2205 * Return value:
2206 * 	nothing
2207 **/
2208static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2209				   struct ipr_driver_dump *driver_dump)
2210{
2211	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2212
2213	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2214	driver_dump->ioa_type_entry.hdr.len =
2215		sizeof(struct ipr_dump_ioa_type_entry) -
2216		sizeof(struct ipr_dump_entry_header);
2217	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2218	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2219	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2220	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2221		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2222		ucode_vpd->minor_release[1];
2223	driver_dump->hdr.num_entries++;
2224}
2225
2226/**
2227 * ipr_dump_version_data - Fill in the driver version in the dump.
2228 * @ioa_cfg:	ioa config struct
2229 * @driver_dump:	driver dump struct
2230 *
2231 * Return value:
2232 * 	nothing
2233 **/
2234static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2235				  struct ipr_driver_dump *driver_dump)
2236{
2237	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2238	driver_dump->version_entry.hdr.len =
2239		sizeof(struct ipr_dump_version_entry) -
2240		sizeof(struct ipr_dump_entry_header);
2241	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2242	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2243	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2244	driver_dump->hdr.num_entries++;
2245}
2246
2247/**
2248 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2249 * @ioa_cfg:	ioa config struct
2250 * @driver_dump:	driver dump struct
2251 *
2252 * Return value:
2253 * 	nothing
2254 **/
2255static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2256				   struct ipr_driver_dump *driver_dump)
2257{
2258	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2259	driver_dump->trace_entry.hdr.len =
2260		sizeof(struct ipr_dump_trace_entry) -
2261		sizeof(struct ipr_dump_entry_header);
2262	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2263	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2264	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2265	driver_dump->hdr.num_entries++;
2266}
2267
2268/**
2269 * ipr_dump_location_data - Fill in the IOA location in the dump.
2270 * @ioa_cfg:	ioa config struct
2271 * @driver_dump:	driver dump struct
2272 *
2273 * Return value:
2274 * 	nothing
2275 **/
2276static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2277				   struct ipr_driver_dump *driver_dump)
2278{
2279	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2280	driver_dump->location_entry.hdr.len =
2281		sizeof(struct ipr_dump_location_entry) -
2282		sizeof(struct ipr_dump_entry_header);
2283	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2284	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2285	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2286	driver_dump->hdr.num_entries++;
2287}
2288
2289/**
2290 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2291 * @ioa_cfg:	ioa config struct
2292 * @dump:		dump struct
2293 *
2294 * Return value:
2295 * 	nothing
2296 **/
2297static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2298{
2299	unsigned long start_addr, sdt_word;
2300	unsigned long lock_flags = 0;
2301	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2302	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2303	u32 num_entries, start_off, end_off;
2304	u32 bytes_to_copy, bytes_copied, rc;
2305	struct ipr_sdt *sdt;
2306	int i;
2307
2308	ENTER;
2309
2310	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2311
2312	if (ioa_cfg->sdt_state != GET_DUMP) {
2313		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2314		return;
2315	}
2316
2317	start_addr = readl(ioa_cfg->ioa_mailbox);
2318
2319	if (!ipr_sdt_is_fmt2(start_addr)) {
2320		dev_err(&ioa_cfg->pdev->dev,
2321			"Invalid dump table format: %lx\n", start_addr);
2322		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2323		return;
2324	}
2325
2326	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2327
2328	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2329
2330	/* Initialize the overall dump header */
2331	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2332	driver_dump->hdr.num_entries = 1;
2333	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2334	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2335	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2336	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2337
2338	ipr_dump_version_data(ioa_cfg, driver_dump);
2339	ipr_dump_location_data(ioa_cfg, driver_dump);
2340	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2341	ipr_dump_trace_data(ioa_cfg, driver_dump);
2342
2343	/* Update dump_header */
2344	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2345
2346	/* IOA Dump entry */
2347	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2348	ioa_dump->format = IPR_SDT_FMT2;
2349	ioa_dump->hdr.len = 0;
2350	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2351	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2352
2353	/* First entries in sdt are actually a list of dump addresses and
2354	 lengths to gather the real dump data.  sdt represents the pointer
2355	 to the ioa generated dump table.  Dump data will be extracted based
2356	 on entries in this table */
2357	sdt = &ioa_dump->sdt;
2358
2359	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2360					sizeof(struct ipr_sdt) / sizeof(__be32));
2361
2362	/* Smart Dump table is ready to use and the first entry is valid */
2363	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2364		dev_err(&ioa_cfg->pdev->dev,
2365			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2366			rc, be32_to_cpu(sdt->hdr.state));
2367		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2368		ioa_cfg->sdt_state = DUMP_OBTAINED;
2369		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2370		return;
2371	}
2372
2373	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2374
2375	if (num_entries > IPR_NUM_SDT_ENTRIES)
2376		num_entries = IPR_NUM_SDT_ENTRIES;
2377
2378	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2379
2380	for (i = 0; i < num_entries; i++) {
2381		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2382			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2383			break;
2384		}
2385
2386		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2387			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2388			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2389			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2390
2391			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2392				bytes_to_copy = end_off - start_off;
2393				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2394					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2395					continue;
2396				}
2397
2398				/* Copy data from adapter to driver buffers */
2399				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2400							    bytes_to_copy);
2401
2402				ioa_dump->hdr.len += bytes_copied;
2403
2404				if (bytes_copied != bytes_to_copy) {
2405					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2406					break;
2407				}
2408			}
2409		}
2410	}
2411
2412	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2413
2414	/* Update dump_header */
2415	driver_dump->hdr.len += ioa_dump->hdr.len;
2416	wmb();
2417	ioa_cfg->sdt_state = DUMP_OBTAINED;
2418	LEAVE;
2419}
2420
2421#else
2422#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2423#endif
2424
2425/**
2426 * ipr_release_dump - Free adapter dump memory
2427 * @kref:	kref struct
2428 *
2429 * Return value:
2430 *	nothing
2431 **/
2432static void ipr_release_dump(struct kref *kref)
2433{
2434	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2435	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2436	unsigned long lock_flags = 0;
2437	int i;
2438
2439	ENTER;
2440	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2441	ioa_cfg->dump = NULL;
2442	ioa_cfg->sdt_state = INACTIVE;
2443	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2444
2445	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2446		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2447
2448	kfree(dump);
2449	LEAVE;
2450}
2451
2452/**
2453 * ipr_worker_thread - Worker thread
2454 * @work:		ioa config struct
2455 *
2456 * Called at task level from a work thread. This function takes care
2457 * of adding and removing device from the mid-layer as configuration
2458 * changes are detected by the adapter.
2459 *
2460 * Return value:
2461 * 	nothing
2462 **/
2463static void ipr_worker_thread(struct work_struct *work)
2464{
2465	unsigned long lock_flags;
2466	struct ipr_resource_entry *res;
2467	struct scsi_device *sdev;
2468	struct ipr_dump *dump;
2469	struct ipr_ioa_cfg *ioa_cfg =
2470		container_of(work, struct ipr_ioa_cfg, work_q);
2471	u8 bus, target, lun;
2472	int did_work;
2473
2474	ENTER;
2475	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2476
2477	if (ioa_cfg->sdt_state == GET_DUMP) {
2478		dump = ioa_cfg->dump;
2479		if (!dump) {
2480			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2481			return;
2482		}
2483		kref_get(&dump->kref);
2484		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2485		ipr_get_ioa_dump(ioa_cfg, dump);
2486		kref_put(&dump->kref, ipr_release_dump);
2487
2488		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2489		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2490			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2491		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2492		return;
2493	}
2494
2495restart:
2496	do {
2497		did_work = 0;
2498		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2499			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2500			return;
2501		}
2502
2503		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2504			if (res->del_from_ml && res->sdev) {
2505				did_work = 1;
2506				sdev = res->sdev;
2507				if (!scsi_device_get(sdev)) {
2508					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2509					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2510					scsi_remove_device(sdev);
2511					scsi_device_put(sdev);
2512					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2513				}
2514				break;
2515			}
2516		}
2517	} while(did_work);
2518
2519	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2520		if (res->add_to_ml) {
2521			bus = res->cfgte.res_addr.bus;
2522			target = res->cfgte.res_addr.target;
2523			lun = res->cfgte.res_addr.lun;
2524			res->add_to_ml = 0;
2525			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2526			scsi_add_device(ioa_cfg->host, bus, target, lun);
2527			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2528			goto restart;
2529		}
2530	}
2531
2532	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2533	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
2534	LEAVE;
2535}
2536
2537#ifdef CONFIG_SCSI_IPR_TRACE
2538/**
2539 * ipr_read_trace - Dump the adapter trace
2540 * @kobj:		kobject struct
2541 * @bin_attr:		bin_attribute struct
2542 * @buf:		buffer
2543 * @off:		offset
2544 * @count:		buffer size
2545 *
2546 * Return value:
2547 *	number of bytes printed to buffer
2548 **/
2549static ssize_t ipr_read_trace(struct kobject *kobj,
2550			      struct bin_attribute *bin_attr,
2551			      char *buf, loff_t off, size_t count)
2552{
2553	struct device *dev = container_of(kobj, struct device, kobj);
2554	struct Scsi_Host *shost = class_to_shost(dev);
2555	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2556	unsigned long lock_flags = 0;
2557	ssize_t ret;
2558
2559	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2560	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
2561				IPR_TRACE_SIZE);
2562	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2563
2564	return ret;
2565}
2566
2567static struct bin_attribute ipr_trace_attr = {
2568	.attr =	{
2569		.name = "trace",
2570		.mode = S_IRUGO,
2571	},
2572	.size = 0,
2573	.read = ipr_read_trace,
2574};
2575#endif
2576
2577static const struct {
2578	enum ipr_cache_state state;
2579	char *name;
2580} cache_state [] = {
2581	{ CACHE_NONE, "none" },
2582	{ CACHE_DISABLED, "disabled" },
2583	{ CACHE_ENABLED, "enabled" }
2584};
2585
2586/**
2587 * ipr_show_write_caching - Show the write caching attribute
2588 * @dev:	device struct
2589 * @buf:	buffer
2590 *
2591 * Return value:
2592 *	number of bytes printed to buffer
2593 **/
2594static ssize_t ipr_show_write_caching(struct device *dev,
2595				      struct device_attribute *attr, char *buf)
2596{
2597	struct Scsi_Host *shost = class_to_shost(dev);
2598	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2599	unsigned long lock_flags = 0;
2600	int i, len = 0;
2601
2602	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2603	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2604		if (cache_state[i].state == ioa_cfg->cache_state) {
2605			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2606			break;
2607		}
2608	}
2609	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2610	return len;
2611}
2612
2613
2614/**
2615 * ipr_store_write_caching - Enable/disable adapter write cache
2616 * @dev:	device struct
2617 * @buf:	buffer
2618 * @count:	buffer size
2619 *
2620 * This function will enable/disable adapter write cache.
2621 *
2622 * Return value:
2623 * 	count on success / other on failure
2624 **/
2625static ssize_t ipr_store_write_caching(struct device *dev,
2626				       struct device_attribute *attr,
2627				       const char *buf, size_t count)
2628{
2629	struct Scsi_Host *shost = class_to_shost(dev);
2630	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2631	unsigned long lock_flags = 0;
2632	enum ipr_cache_state new_state = CACHE_INVALID;
2633	int i;
2634
2635	if (!capable(CAP_SYS_ADMIN))
2636		return -EACCES;
2637	if (ioa_cfg->cache_state == CACHE_NONE)
2638		return -EINVAL;
2639
2640	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2641		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2642			new_state = cache_state[i].state;
2643			break;
2644		}
2645	}
2646
2647	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2648		return -EINVAL;
2649
2650	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2651	if (ioa_cfg->cache_state == new_state) {
2652		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2653		return count;
2654	}
2655
2656	ioa_cfg->cache_state = new_state;
2657	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2658		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2659	if (!ioa_cfg->in_reset_reload)
2660		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2661	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2662	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2663
2664	return count;
2665}
2666
2667static struct device_attribute ipr_ioa_cache_attr = {
2668	.attr = {
2669		.name =		"write_cache",
2670		.mode =		S_IRUGO | S_IWUSR,
2671	},
2672	.show = ipr_show_write_caching,
2673	.store = ipr_store_write_caching
2674};
2675
2676/**
2677 * ipr_show_fw_version - Show the firmware version
2678 * @dev:	class device struct
2679 * @buf:	buffer
2680 *
2681 * Return value:
2682 *	number of bytes printed to buffer
2683 **/
2684static ssize_t ipr_show_fw_version(struct device *dev,
2685				   struct device_attribute *attr, char *buf)
2686{
2687	struct Scsi_Host *shost = class_to_shost(dev);
2688	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2689	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2690	unsigned long lock_flags = 0;
2691	int len;
2692
2693	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2694	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2695		       ucode_vpd->major_release, ucode_vpd->card_type,
2696		       ucode_vpd->minor_release[0],
2697		       ucode_vpd->minor_release[1]);
2698	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2699	return len;
2700}
2701
2702static struct device_attribute ipr_fw_version_attr = {
2703	.attr = {
2704		.name =		"fw_version",
2705		.mode =		S_IRUGO,
2706	},
2707	.show = ipr_show_fw_version,
2708};
2709
2710/**
2711 * ipr_show_log_level - Show the adapter's error logging level
2712 * @dev:	class device struct
2713 * @buf:	buffer
2714 *
2715 * Return value:
2716 * 	number of bytes printed to buffer
2717 **/
2718static ssize_t ipr_show_log_level(struct device *dev,
2719				   struct device_attribute *attr, char *buf)
2720{
2721	struct Scsi_Host *shost = class_to_shost(dev);
2722	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2723	unsigned long lock_flags = 0;
2724	int len;
2725
2726	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2727	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2728	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2729	return len;
2730}
2731
2732/**
2733 * ipr_store_log_level - Change the adapter's error logging level
2734 * @dev:	class device struct
2735 * @buf:	buffer
2736 *
2737 * Return value:
2738 * 	number of bytes printed to buffer
2739 **/
2740static ssize_t ipr_store_log_level(struct device *dev,
2741			           struct device_attribute *attr,
2742				   const char *buf, size_t count)
2743{
2744	struct Scsi_Host *shost = class_to_shost(dev);
2745	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2746	unsigned long lock_flags = 0;
2747
2748	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2749	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2750	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2751	return strlen(buf);
2752}
2753
2754static struct device_attribute ipr_log_level_attr = {
2755	.attr = {
2756		.name =		"log_level",
2757		.mode =		S_IRUGO | S_IWUSR,
2758	},
2759	.show = ipr_show_log_level,
2760	.store = ipr_store_log_level
2761};
2762
2763/**
2764 * ipr_store_diagnostics - IOA Diagnostics interface
2765 * @dev:	device struct
2766 * @buf:	buffer
2767 * @count:	buffer size
2768 *
2769 * This function will reset the adapter and wait a reasonable
2770 * amount of time for any errors that the adapter might log.
2771 *
2772 * Return value:
2773 * 	count on success / other on failure
2774 **/
2775static ssize_t ipr_store_diagnostics(struct device *dev,
2776				     struct device_attribute *attr,
2777				     const char *buf, size_t count)
2778{
2779	struct Scsi_Host *shost = class_to_shost(dev);
2780	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2781	unsigned long lock_flags = 0;
2782	int rc = count;
2783
2784	if (!capable(CAP_SYS_ADMIN))
2785		return -EACCES;
2786
2787	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2788	while(ioa_cfg->in_reset_reload) {
2789		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2790		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2791		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2792	}
2793
2794	ioa_cfg->errors_logged = 0;
2795	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2796
2797	if (ioa_cfg->in_reset_reload) {
2798		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2799		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2800
2801		/* Wait for a second for any errors to be logged */
2802		msleep(1000);
2803	} else {
2804		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2805		return -EIO;
2806	}
2807
2808	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2809	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2810		rc = -EIO;
2811	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2812
2813	return rc;
2814}
2815
2816static struct device_attribute ipr_diagnostics_attr = {
2817	.attr = {
2818		.name =		"run_diagnostics",
2819		.mode =		S_IWUSR,
2820	},
2821	.store = ipr_store_diagnostics
2822};
2823
2824/**
2825 * ipr_show_adapter_state - Show the adapter's state
2826 * @class_dev:	device struct
2827 * @buf:	buffer
2828 *
2829 * Return value:
2830 * 	number of bytes printed to buffer
2831 **/
2832static ssize_t ipr_show_adapter_state(struct device *dev,
2833				      struct device_attribute *attr, char *buf)
2834{
2835	struct Scsi_Host *shost = class_to_shost(dev);
2836	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2837	unsigned long lock_flags = 0;
2838	int len;
2839
2840	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2841	if (ioa_cfg->ioa_is_dead)
2842		len = snprintf(buf, PAGE_SIZE, "offline\n");
2843	else
2844		len = snprintf(buf, PAGE_SIZE, "online\n");
2845	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2846	return len;
2847}
2848
2849/**
2850 * ipr_store_adapter_state - Change adapter state
2851 * @dev:	device struct
2852 * @buf:	buffer
2853 * @count:	buffer size
2854 *
2855 * This function will change the adapter's state.
2856 *
2857 * Return value:
2858 * 	count on success / other on failure
2859 **/
2860static ssize_t ipr_store_adapter_state(struct device *dev,
2861				       struct device_attribute *attr,
2862				       const char *buf, size_t count)
2863{
2864	struct Scsi_Host *shost = class_to_shost(dev);
2865	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2866	unsigned long lock_flags;
2867	int result = count;
2868
2869	if (!capable(CAP_SYS_ADMIN))
2870		return -EACCES;
2871
2872	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2873	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2874		ioa_cfg->ioa_is_dead = 0;
2875		ioa_cfg->reset_retries = 0;
2876		ioa_cfg->in_ioa_bringdown = 0;
2877		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2878	}
2879	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2880	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2881
2882	return result;
2883}
2884
2885static struct device_attribute ipr_ioa_state_attr = {
2886	.attr = {
2887		.name =		"online_state",
2888		.mode =		S_IRUGO | S_IWUSR,
2889	},
2890	.show = ipr_show_adapter_state,
2891	.store = ipr_store_adapter_state
2892};
2893
2894/**
2895 * ipr_store_reset_adapter - Reset the adapter
2896 * @dev:	device struct
2897 * @buf:	buffer
2898 * @count:	buffer size
2899 *
2900 * This function will reset the adapter.
2901 *
2902 * Return value:
2903 * 	count on success / other on failure
2904 **/
2905static ssize_t ipr_store_reset_adapter(struct device *dev,
2906				       struct device_attribute *attr,
2907				       const char *buf, size_t count)
2908{
2909	struct Scsi_Host *shost = class_to_shost(dev);
2910	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2911	unsigned long lock_flags;
2912	int result = count;
2913
2914	if (!capable(CAP_SYS_ADMIN))
2915		return -EACCES;
2916
2917	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2918	if (!ioa_cfg->in_reset_reload)
2919		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2920	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2921	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2922
2923	return result;
2924}
2925
2926static struct device_attribute ipr_ioa_reset_attr = {
2927	.attr = {
2928		.name =		"reset_host",
2929		.mode =		S_IWUSR,
2930	},
2931	.store = ipr_store_reset_adapter
2932};
2933
2934/**
2935 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2936 * @buf_len:		buffer length
2937 *
2938 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2939 * list to use for microcode download
2940 *
2941 * Return value:
2942 * 	pointer to sglist / NULL on failure
2943 **/
2944static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2945{
2946	int sg_size, order, bsize_elem, num_elem, i, j;
2947	struct ipr_sglist *sglist;
2948	struct scatterlist *scatterlist;
2949	struct page *page;
2950
2951	/* Get the minimum size per scatter/gather element */
2952	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2953
2954	/* Get the actual size per element */
2955	order = get_order(sg_size);
2956
2957	/* Determine the actual number of bytes per element */
2958	bsize_elem = PAGE_SIZE * (1 << order);
2959
2960	/* Determine the actual number of sg entries needed */
2961	if (buf_len % bsize_elem)
2962		num_elem = (buf_len / bsize_elem) + 1;
2963	else
2964		num_elem = buf_len / bsize_elem;
2965
2966	/* Allocate a scatter/gather list for the DMA */
2967	sglist = kzalloc(sizeof(struct ipr_sglist) +
2968			 (sizeof(struct scatterlist) * (num_elem - 1)),
2969			 GFP_KERNEL);
2970
2971	if (sglist == NULL) {
2972		ipr_trace;
2973		return NULL;
2974	}
2975
2976	scatterlist = sglist->scatterlist;
2977	sg_init_table(scatterlist, num_elem);
2978
2979	sglist->order = order;
2980	sglist->num_sg = num_elem;
2981
2982	/* Allocate a bunch of sg elements */
2983	for (i = 0; i < num_elem; i++) {
2984		page = alloc_pages(GFP_KERNEL, order);
2985		if (!page) {
2986			ipr_trace;
2987
2988			/* Free up what we already allocated */
2989			for (j = i - 1; j >= 0; j--)
2990				__free_pages(sg_page(&scatterlist[j]), order);
2991			kfree(sglist);
2992			return NULL;
2993		}
2994
2995		sg_set_page(&scatterlist[i], page, 0, 0);
2996	}
2997
2998	return sglist;
2999}
3000
3001/**
3002 * ipr_free_ucode_buffer - Frees a microcode download buffer
3003 * @p_dnld:		scatter/gather list pointer
3004 *
3005 * Free a DMA'able ucode download buffer previously allocated with
3006 * ipr_alloc_ucode_buffer
3007 *
3008 * Return value:
3009 * 	nothing
3010 **/
3011static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3012{
3013	int i;
3014
3015	for (i = 0; i < sglist->num_sg; i++)
3016		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3017
3018	kfree(sglist);
3019}
3020
3021/**
3022 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3023 * @sglist:		scatter/gather list pointer
3024 * @buffer:		buffer pointer
3025 * @len:		buffer length
3026 *
3027 * Copy a microcode image from a user buffer into a buffer allocated by
3028 * ipr_alloc_ucode_buffer
3029 *
3030 * Return value:
3031 * 	0 on success / other on failure
3032 **/
3033static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3034				 u8 *buffer, u32 len)
3035{
3036	int bsize_elem, i, result = 0;
3037	struct scatterlist *scatterlist;
3038	void *kaddr;
3039
3040	/* Determine the actual number of bytes per element */
3041	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3042
3043	scatterlist = sglist->scatterlist;
3044
3045	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3046		struct page *page = sg_page(&scatterlist[i]);
3047
3048		kaddr = kmap(page);
3049		memcpy(kaddr, buffer, bsize_elem);
3050		kunmap(page);
3051
3052		scatterlist[i].length = bsize_elem;
3053
3054		if (result != 0) {
3055			ipr_trace;
3056			return result;
3057		}
3058	}
3059
3060	if (len % bsize_elem) {
3061		struct page *page = sg_page(&scatterlist[i]);
3062
3063		kaddr = kmap(page);
3064		memcpy(kaddr, buffer, len % bsize_elem);
3065		kunmap(page);
3066
3067		scatterlist[i].length = len % bsize_elem;
3068	}
3069
3070	sglist->buffer_len = len;
3071	return result;
3072}
3073
3074/**
3075 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3076 * @ipr_cmd:		ipr command struct
3077 * @sglist:		scatter/gather list
3078 *
3079 * Builds a microcode download IOA data list (IOADL).
3080 *
3081 **/
3082static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3083				    struct ipr_sglist *sglist)
3084{
3085	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3086	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3087	struct scatterlist *scatterlist = sglist->scatterlist;
3088	int i;
3089
3090	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3091	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3092	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3093
3094	ioarcb->ioadl_len =
3095		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3096	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3097		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3098		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3099		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3100	}
3101
3102	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3103}
3104
3105/**
3106 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3107 * @ipr_cmd:	ipr command struct
3108 * @sglist:		scatter/gather list
3109 *
3110 * Builds a microcode download IOA data list (IOADL).
3111 *
3112 **/
3113static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3114				  struct ipr_sglist *sglist)
3115{
3116	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3117	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3118	struct scatterlist *scatterlist = sglist->scatterlist;
3119	int i;
3120
3121	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3122	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3123	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3124
3125	ioarcb->ioadl_len =
3126		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3127
3128	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3129		ioadl[i].flags_and_data_len =
3130			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3131		ioadl[i].address =
3132			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3133	}
3134
3135	ioadl[i-1].flags_and_data_len |=
3136		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3137}
3138
3139/**
3140 * ipr_update_ioa_ucode - Update IOA's microcode
3141 * @ioa_cfg:	ioa config struct
3142 * @sglist:		scatter/gather list
3143 *
3144 * Initiate an adapter reset to update the IOA's microcode
3145 *
3146 * Return value:
3147 * 	0 on success / -EIO on failure
3148 **/
3149static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3150				struct ipr_sglist *sglist)
3151{
3152	unsigned long lock_flags;
3153
3154	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3155	while(ioa_cfg->in_reset_reload) {
3156		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3158		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3159	}
3160
3161	if (ioa_cfg->ucode_sglist) {
3162		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3163		dev_err(&ioa_cfg->pdev->dev,
3164			"Microcode download already in progress\n");
3165		return -EIO;
3166	}
3167
3168	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3169					sglist->num_sg, DMA_TO_DEVICE);
3170
3171	if (!sglist->num_dma_sg) {
3172		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3173		dev_err(&ioa_cfg->pdev->dev,
3174			"Failed to map microcode download buffer!\n");
3175		return -EIO;
3176	}
3177
3178	ioa_cfg->ucode_sglist = sglist;
3179	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3180	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3181	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3182
3183	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3184	ioa_cfg->ucode_sglist = NULL;
3185	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3186	return 0;
3187}
3188
3189/**
3190 * ipr_store_update_fw - Update the firmware on the adapter
3191 * @class_dev:	device struct
3192 * @buf:	buffer
3193 * @count:	buffer size
3194 *
3195 * This function will update the firmware on the adapter.
3196 *
3197 * Return value:
3198 * 	count on success / other on failure
3199 **/
3200static ssize_t ipr_store_update_fw(struct device *dev,
3201				   struct device_attribute *attr,
3202				   const char *buf, size_t count)
3203{
3204	struct Scsi_Host *shost = class_to_shost(dev);
3205	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3206	struct ipr_ucode_image_header *image_hdr;
3207	const struct firmware *fw_entry;
3208	struct ipr_sglist *sglist;
3209	char fname[100];
3210	char *src;
3211	int len, result, dnld_size;
3212
3213	if (!capable(CAP_SYS_ADMIN))
3214		return -EACCES;
3215
3216	len = snprintf(fname, 99, "%s", buf);
3217	fname[len-1] = '\0';
3218
3219	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3220		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3221		return -EIO;
3222	}
3223
3224	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3225
3226	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3227	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3228	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3229		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3230		release_firmware(fw_entry);
3231		return -EINVAL;
3232	}
3233
3234	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3235	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3236	sglist = ipr_alloc_ucode_buffer(dnld_size);
3237
3238	if (!sglist) {
3239		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3240		release_firmware(fw_entry);
3241		return -ENOMEM;
3242	}
3243
3244	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3245
3246	if (result) {
3247		dev_err(&ioa_cfg->pdev->dev,
3248			"Microcode buffer copy to DMA buffer failed\n");
3249		goto out;
3250	}
3251
3252	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3253
3254	if (!result)
3255		result = count;
3256out:
3257	ipr_free_ucode_buffer(sglist);
3258	release_firmware(fw_entry);
3259	return result;
3260}
3261
3262static struct device_attribute ipr_update_fw_attr = {
3263	.attr = {
3264		.name =		"update_fw",
3265		.mode =		S_IWUSR,
3266	},
3267	.store = ipr_store_update_fw
3268};
3269
3270static struct device_attribute *ipr_ioa_attrs[] = {
3271	&ipr_fw_version_attr,
3272	&ipr_log_level_attr,
3273	&ipr_diagnostics_attr,
3274	&ipr_ioa_state_attr,
3275	&ipr_ioa_reset_attr,
3276	&ipr_update_fw_attr,
3277	&ipr_ioa_cache_attr,
3278	NULL,
3279};
3280
3281#ifdef CONFIG_SCSI_IPR_DUMP
3282/**
3283 * ipr_read_dump - Dump the adapter
3284 * @kobj:		kobject struct
3285 * @bin_attr:		bin_attribute struct
3286 * @buf:		buffer
3287 * @off:		offset
3288 * @count:		buffer size
3289 *
3290 * Return value:
3291 *	number of bytes printed to buffer
3292 **/
3293static ssize_t ipr_read_dump(struct kobject *kobj,
3294			     struct bin_attribute *bin_attr,
3295			     char *buf, loff_t off, size_t count)
3296{
3297	struct device *cdev = container_of(kobj, struct device, kobj);
3298	struct Scsi_Host *shost = class_to_shost(cdev);
3299	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3300	struct ipr_dump *dump;
3301	unsigned long lock_flags = 0;
3302	char *src;
3303	int len;
3304	size_t rc = count;
3305
3306	if (!capable(CAP_SYS_ADMIN))
3307		return -EACCES;
3308
3309	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310	dump = ioa_cfg->dump;
3311
3312	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3313		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314		return 0;
3315	}
3316	kref_get(&dump->kref);
3317	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3318
3319	if (off > dump->driver_dump.hdr.len) {
3320		kref_put(&dump->kref, ipr_release_dump);
3321		return 0;
3322	}
3323
3324	if (off + count > dump->driver_dump.hdr.len) {
3325		count = dump->driver_dump.hdr.len - off;
3326		rc = count;
3327	}
3328
3329	if (count && off < sizeof(dump->driver_dump)) {
3330		if (off + count > sizeof(dump->driver_dump))
3331			len = sizeof(dump->driver_dump) - off;
3332		else
3333			len = count;
3334		src = (u8 *)&dump->driver_dump + off;
3335		memcpy(buf, src, len);
3336		buf += len;
3337		off += len;
3338		count -= len;
3339	}
3340
3341	off -= sizeof(dump->driver_dump);
3342
3343	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3344		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3345			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3346		else
3347			len = count;
3348		src = (u8 *)&dump->ioa_dump + off;
3349		memcpy(buf, src, len);
3350		buf += len;
3351		off += len;
3352		count -= len;
3353	}
3354
3355	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3356
3357	while (count) {
3358		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3359			len = PAGE_ALIGN(off) - off;
3360		else
3361			len = count;
3362		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3363		src += off & ~PAGE_MASK;
3364		memcpy(buf, src, len);
3365		buf += len;
3366		off += len;
3367		count -= len;
3368	}
3369
3370	kref_put(&dump->kref, ipr_release_dump);
3371	return rc;
3372}
3373
3374/**
3375 * ipr_alloc_dump - Prepare for adapter dump
3376 * @ioa_cfg:	ioa config struct
3377 *
3378 * Return value:
3379 *	0 on success / other on failure
3380 **/
3381static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3382{
3383	struct ipr_dump *dump;
3384	unsigned long lock_flags = 0;
3385
3386	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3387
3388	if (!dump) {
3389		ipr_err("Dump memory allocation failed\n");
3390		return -ENOMEM;
3391	}
3392
3393	kref_init(&dump->kref);
3394	dump->ioa_cfg = ioa_cfg;
3395
3396	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3397
3398	if (INACTIVE != ioa_cfg->sdt_state) {
3399		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3400		kfree(dump);
3401		return 0;
3402	}
3403
3404	ioa_cfg->dump = dump;
3405	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3406	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3407		ioa_cfg->dump_taken = 1;
3408		schedule_work(&ioa_cfg->work_q);
3409	}
3410	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411
3412	return 0;
3413}
3414
3415/**
3416 * ipr_free_dump - Free adapter dump memory
3417 * @ioa_cfg:	ioa config struct
3418 *
3419 * Return value:
3420 *	0 on success / other on failure
3421 **/
3422static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3423{
3424	struct ipr_dump *dump;
3425	unsigned long lock_flags = 0;
3426
3427	ENTER;
3428
3429	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3430	dump = ioa_cfg->dump;
3431	if (!dump) {
3432		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433		return 0;
3434	}
3435
3436	ioa_cfg->dump = NULL;
3437	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3438
3439	kref_put(&dump->kref, ipr_release_dump);
3440
3441	LEAVE;
3442	return 0;
3443}
3444
3445/**
3446 * ipr_write_dump - Setup dump state of adapter
3447 * @kobj:		kobject struct
3448 * @bin_attr:		bin_attribute struct
3449 * @buf:		buffer
3450 * @off:		offset
3451 * @count:		buffer size
3452 *
3453 * Return value:
3454 *	number of bytes printed to buffer
3455 **/
3456static ssize_t ipr_write_dump(struct kobject *kobj,
3457			      struct bin_attribute *bin_attr,
3458			      char *buf, loff_t off, size_t count)
3459{
3460	struct device *cdev = container_of(kobj, struct device, kobj);
3461	struct Scsi_Host *shost = class_to_shost(cdev);
3462	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463	int rc;
3464
3465	if (!capable(CAP_SYS_ADMIN))
3466		return -EACCES;
3467
3468	if (buf[0] == '1')
3469		rc = ipr_alloc_dump(ioa_cfg);
3470	else if (buf[0] == '0')
3471		rc = ipr_free_dump(ioa_cfg);
3472	else
3473		return -EINVAL;
3474
3475	if (rc)
3476		return rc;
3477	else
3478		return count;
3479}
3480
3481static struct bin_attribute ipr_dump_attr = {
3482	.attr =	{
3483		.name = "dump",
3484		.mode = S_IRUSR | S_IWUSR,
3485	},
3486	.size = 0,
3487	.read = ipr_read_dump,
3488	.write = ipr_write_dump
3489};
3490#else
3491static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3492#endif
3493
3494/**
3495 * ipr_change_queue_depth - Change the device's queue depth
3496 * @sdev:	scsi device struct
3497 * @qdepth:	depth to set
3498 * @reason:	calling context
3499 *
3500 * Return value:
3501 * 	actual depth set
3502 **/
3503static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3504				  int reason)
3505{
3506	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3507	struct ipr_resource_entry *res;
3508	unsigned long lock_flags = 0;
3509
3510	if (reason != SCSI_QDEPTH_DEFAULT)
3511		return -EOPNOTSUPP;
3512
3513	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3514	res = (struct ipr_resource_entry *)sdev->hostdata;
3515
3516	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3517		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3518	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3519
3520	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3521	return sdev->queue_depth;
3522}
3523
3524/**
3525 * ipr_change_queue_type - Change the device's queue type
3526 * @dsev:		scsi device struct
3527 * @tag_type:	type of tags to use
3528 *
3529 * Return value:
3530 * 	actual queue type set
3531 **/
3532static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3533{
3534	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3535	struct ipr_resource_entry *res;
3536	unsigned long lock_flags = 0;
3537
3538	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3539	res = (struct ipr_resource_entry *)sdev->hostdata;
3540
3541	if (res) {
3542		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3543			/*
3544			 * We don't bother quiescing the device here since the
3545			 * adapter firmware does it for us.
3546			 */
3547			scsi_set_tag_type(sdev, tag_type);
3548
3549			if (tag_type)
3550				scsi_activate_tcq(sdev, sdev->queue_depth);
3551			else
3552				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3553		} else
3554			tag_type = 0;
3555	} else
3556		tag_type = 0;
3557
3558	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3559	return tag_type;
3560}
3561
3562/**
3563 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3564 * @dev:	device struct
3565 * @buf:	buffer
3566 *
3567 * Return value:
3568 * 	number of bytes printed to buffer
3569 **/
3570static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3571{
3572	struct scsi_device *sdev = to_scsi_device(dev);
3573	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3574	struct ipr_resource_entry *res;
3575	unsigned long lock_flags = 0;
3576	ssize_t len = -ENXIO;
3577
3578	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3579	res = (struct ipr_resource_entry *)sdev->hostdata;
3580	if (res)
3581		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3582	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3583	return len;
3584}
3585
3586static struct device_attribute ipr_adapter_handle_attr = {
3587	.attr = {
3588		.name = 	"adapter_handle",
3589		.mode =		S_IRUSR,
3590	},
3591	.show = ipr_show_adapter_handle
3592};
3593
3594static struct device_attribute *ipr_dev_attrs[] = {
3595	&ipr_adapter_handle_attr,
3596	NULL,
3597};
3598
3599/**
3600 * ipr_biosparam - Return the HSC mapping
3601 * @sdev:			scsi device struct
3602 * @block_device:	block device pointer
3603 * @capacity:		capacity of the device
3604 * @parm:			Array containing returned HSC values.
3605 *
3606 * This function generates the HSC parms that fdisk uses.
3607 * We want to make sure we return something that places partitions
3608 * on 4k boundaries for best performance with the IOA.
3609 *
3610 * Return value:
3611 * 	0 on success
3612 **/
3613static int ipr_biosparam(struct scsi_device *sdev,
3614			 struct block_device *block_device,
3615			 sector_t capacity, int *parm)
3616{
3617	int heads, sectors;
3618	sector_t cylinders;
3619
3620	heads = 128;
3621	sectors = 32;
3622
3623	cylinders = capacity;
3624	sector_div(cylinders, (128 * 32));
3625
3626	/* return result */
3627	parm[0] = heads;
3628	parm[1] = sectors;
3629	parm[2] = cylinders;
3630
3631	return 0;
3632}
3633
3634/**
3635 * ipr_find_starget - Find target based on bus/target.
3636 * @starget:	scsi target struct
3637 *
3638 * Return value:
3639 * 	resource entry pointer if found / NULL if not found
3640 **/
3641static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3642{
3643	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3644	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3645	struct ipr_resource_entry *res;
3646
3647	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3648		if ((res->cfgte.res_addr.bus == starget->channel) &&
3649		    (res->cfgte.res_addr.target == starget->id) &&
3650		    (res->cfgte.res_addr.lun == 0)) {
3651			return res;
3652		}
3653	}
3654
3655	return NULL;
3656}
3657
3658static struct ata_port_info sata_port_info;
3659
3660/**
3661 * ipr_target_alloc - Prepare for commands to a SCSI target
3662 * @starget:	scsi target struct
3663 *
3664 * If the device is a SATA device, this function allocates an
3665 * ATA port with libata, else it does nothing.
3666 *
3667 * Return value:
3668 * 	0 on success / non-0 on failure
3669 **/
3670static int ipr_target_alloc(struct scsi_target *starget)
3671{
3672	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3673	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3674	struct ipr_sata_port *sata_port;
3675	struct ata_port *ap;
3676	struct ipr_resource_entry *res;
3677	unsigned long lock_flags;
3678
3679	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3680	res = ipr_find_starget(starget);
3681	starget->hostdata = NULL;
3682
3683	if (res && ipr_is_gata(res)) {
3684		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3685		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3686		if (!sata_port)
3687			return -ENOMEM;
3688
3689		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3690		if (ap) {
3691			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3692			sata_port->ioa_cfg = ioa_cfg;
3693			sata_port->ap = ap;
3694			sata_port->res = res;
3695
3696			res->sata_port = sata_port;
3697			ap->private_data = sata_port;
3698			starget->hostdata = sata_port;
3699		} else {
3700			kfree(sata_port);
3701			return -ENOMEM;
3702		}
3703	}
3704	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3705
3706	return 0;
3707}
3708
3709/**
3710 * ipr_target_destroy - Destroy a SCSI target
3711 * @starget:	scsi target struct
3712 *
3713 * If the device was a SATA device, this function frees the libata
3714 * ATA port, else it does nothing.
3715 *
3716 **/
3717static void ipr_target_destroy(struct scsi_target *starget)
3718{
3719	struct ipr_sata_port *sata_port = starget->hostdata;
3720
3721	if (sata_port) {
3722		starget->hostdata = NULL;
3723		ata_sas_port_destroy(sata_port->ap);
3724		kfree(sata_port);
3725	}
3726}
3727
3728/**
3729 * ipr_find_sdev - Find device based on bus/target/lun.
3730 * @sdev:	scsi device struct
3731 *
3732 * Return value:
3733 * 	resource entry pointer if found / NULL if not found
3734 **/
3735static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3736{
3737	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3738	struct ipr_resource_entry *res;
3739
3740	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3741		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3742		    (res->cfgte.res_addr.target == sdev->id) &&
3743		    (res->cfgte.res_addr.lun == sdev->lun))
3744			return res;
3745	}
3746
3747	return NULL;
3748}
3749
3750/**
3751 * ipr_slave_destroy - Unconfigure a SCSI device
3752 * @sdev:	scsi device struct
3753 *
3754 * Return value:
3755 * 	nothing
3756 **/
3757static void ipr_slave_destroy(struct scsi_device *sdev)
3758{
3759	struct ipr_resource_entry *res;
3760	struct ipr_ioa_cfg *ioa_cfg;
3761	unsigned long lock_flags = 0;
3762
3763	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3764
3765	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3766	res = (struct ipr_resource_entry *) sdev->hostdata;
3767	if (res) {
3768		if (res->sata_port)
3769			ata_port_disable(res->sata_port->ap);
3770		sdev->hostdata = NULL;
3771		res->sdev = NULL;
3772		res->sata_port = NULL;
3773	}
3774	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3775}
3776
3777/**
3778 * ipr_slave_configure - Configure a SCSI device
3779 * @sdev:	scsi device struct
3780 *
3781 * This function configures the specified scsi device.
3782 *
3783 * Return value:
3784 * 	0 on success
3785 **/
3786static int ipr_slave_configure(struct scsi_device *sdev)
3787{
3788	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3789	struct ipr_resource_entry *res;
3790	struct ata_port *ap = NULL;
3791	unsigned long lock_flags = 0;
3792
3793	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3794	res = sdev->hostdata;
3795	if (res) {
3796		if (ipr_is_af_dasd_device(res))
3797			sdev->type = TYPE_RAID;
3798		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3799			sdev->scsi_level = 4;
3800			sdev->no_uld_attach = 1;
3801		}
3802		if (ipr_is_vset_device(res)) {
3803			blk_queue_rq_timeout(sdev->request_queue,
3804					     IPR_VSET_RW_TIMEOUT);
3805			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3806		}
3807		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3808			sdev->allow_restart = 1;
3809		if (ipr_is_gata(res) && res->sata_port)
3810			ap = res->sata_port->ap;
3811		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3812
3813		if (ap) {
3814			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3815			ata_sas_slave_configure(sdev, ap);
3816		} else
3817			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3818		return 0;
3819	}
3820	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3821	return 0;
3822}
3823
3824/**
3825 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3826 * @sdev:	scsi device struct
3827 *
3828 * This function initializes an ATA port so that future commands
3829 * sent through queuecommand will work.
3830 *
3831 * Return value:
3832 * 	0 on success
3833 **/
3834static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3835{
3836	struct ipr_sata_port *sata_port = NULL;
3837	int rc = -ENXIO;
3838
3839	ENTER;
3840	if (sdev->sdev_target)
3841		sata_port = sdev->sdev_target->hostdata;
3842	if (sata_port)
3843		rc = ata_sas_port_init(sata_port->ap);
3844	if (rc)
3845		ipr_slave_destroy(sdev);
3846
3847	LEAVE;
3848	return rc;
3849}
3850
3851/**
3852 * ipr_slave_alloc - Prepare for commands to a device.
3853 * @sdev:	scsi device struct
3854 *
3855 * This function saves a pointer to the resource entry
3856 * in the scsi device struct if the device exists. We
3857 * can then use this pointer in ipr_queuecommand when
3858 * handling new commands.
3859 *
3860 * Return value:
3861 * 	0 on success / -ENXIO if device does not exist
3862 **/
3863static int ipr_slave_alloc(struct scsi_device *sdev)
3864{
3865	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3866	struct ipr_resource_entry *res;
3867	unsigned long lock_flags;
3868	int rc = -ENXIO;
3869
3870	sdev->hostdata = NULL;
3871
3872	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3873
3874	res = ipr_find_sdev(sdev);
3875	if (res) {
3876		res->sdev = sdev;
3877		res->add_to_ml = 0;
3878		res->in_erp = 0;
3879		sdev->hostdata = res;
3880		if (!ipr_is_naca_model(res))
3881			res->needs_sync_complete = 1;
3882		rc = 0;
3883		if (ipr_is_gata(res)) {
3884			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3885			return ipr_ata_slave_alloc(sdev);
3886		}
3887	}
3888
3889	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3890
3891	return rc;
3892}
3893
3894/**
3895 * ipr_eh_host_reset - Reset the host adapter
3896 * @scsi_cmd:	scsi command struct
3897 *
3898 * Return value:
3899 * 	SUCCESS / FAILED
3900 **/
3901static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3902{
3903	struct ipr_ioa_cfg *ioa_cfg;
3904	int rc;
3905
3906	ENTER;
3907	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3908
3909	dev_err(&ioa_cfg->pdev->dev,
3910		"Adapter being reset as a result of error recovery.\n");
3911
3912	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3913		ioa_cfg->sdt_state = GET_DUMP;
3914
3915	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3916
3917	LEAVE;
3918	return rc;
3919}
3920
3921static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3922{
3923	int rc;
3924
3925	spin_lock_irq(cmd->device->host->host_lock);
3926	rc = __ipr_eh_host_reset(cmd);
3927	spin_unlock_irq(cmd->device->host->host_lock);
3928
3929	return rc;
3930}
3931
3932/**
3933 * ipr_device_reset - Reset the device
3934 * @ioa_cfg:	ioa config struct
3935 * @res:		resource entry struct
3936 *
3937 * This function issues a device reset to the affected device.
3938 * If the device is a SCSI device, a LUN reset will be sent
3939 * to the device first. If that does not work, a target reset
3940 * will be sent. If the device is a SATA device, a PHY reset will
3941 * be sent.
3942 *
3943 * Return value:
3944 *	0 on success / non-zero on failure
3945 **/
3946static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3947			    struct ipr_resource_entry *res)
3948{
3949	struct ipr_cmnd *ipr_cmd;
3950	struct ipr_ioarcb *ioarcb;
3951	struct ipr_cmd_pkt *cmd_pkt;
3952	struct ipr_ioarcb_ata_regs *regs;
3953	u32 ioasc;
3954
3955	ENTER;
3956	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3957	ioarcb = &ipr_cmd->ioarcb;
3958	cmd_pkt = &ioarcb->cmd_pkt;
3959
3960	if (ipr_cmd->ioa_cfg->sis64) {
3961		regs = &ipr_cmd->i.ata_ioadl.regs;
3962		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
3963	} else
3964		regs = &ioarcb->u.add_data.u.regs;
3965
3966	ioarcb->res_handle = res->cfgte.res_handle;
3967	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3968	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3969	if (ipr_is_gata(res)) {
3970		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3971		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
3972		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3973	}
3974
3975	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3976	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3977	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3978	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3979		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3980		       sizeof(struct ipr_ioasa_gata));
3981
3982	LEAVE;
3983	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3984}
3985
3986/**
3987 * ipr_sata_reset - Reset the SATA port
3988 * @link:	SATA link to reset
3989 * @classes:	class of the attached device
3990 *
3991 * This function issues a SATA phy reset to the affected ATA link.
3992 *
3993 * Return value:
3994 *	0 on success / non-zero on failure
3995 **/
3996static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
3997				unsigned long deadline)
3998{
3999	struct ipr_sata_port *sata_port = link->ap->private_data;
4000	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4001	struct ipr_resource_entry *res;
4002	unsigned long lock_flags = 0;
4003	int rc = -ENXIO;
4004
4005	ENTER;
4006	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4007	while(ioa_cfg->in_reset_reload) {
4008		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4009		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4010		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4011	}
4012
4013	res = sata_port->res;
4014	if (res) {
4015		rc = ipr_device_reset(ioa_cfg, res);
4016		switch(res->cfgte.proto) {
4017		case IPR_PROTO_SATA:
4018		case IPR_PROTO_SAS_STP:
4019			*classes = ATA_DEV_ATA;
4020			break;
4021		case IPR_PROTO_SATA_ATAPI:
4022		case IPR_PROTO_SAS_STP_ATAPI:
4023			*classes = ATA_DEV_ATAPI;
4024			break;
4025		default:
4026			*classes = ATA_DEV_UNKNOWN;
4027			break;
4028		};
4029	}
4030
4031	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4032	LEAVE;
4033	return rc;
4034}
4035
4036/**
4037 * ipr_eh_dev_reset - Reset the device
4038 * @scsi_cmd:	scsi command struct
4039 *
4040 * This function issues a device reset to the affected device.
4041 * A LUN reset will be sent to the device first. If that does
4042 * not work, a target reset will be sent.
4043 *
4044 * Return value:
4045 *	SUCCESS / FAILED
4046 **/
4047static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4048{
4049	struct ipr_cmnd *ipr_cmd;
4050	struct ipr_ioa_cfg *ioa_cfg;
4051	struct ipr_resource_entry *res;
4052	struct ata_port *ap;
4053	int rc = 0;
4054
4055	ENTER;
4056	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4057	res = scsi_cmd->device->hostdata;
4058
4059	if (!res)
4060		return FAILED;
4061
4062	/*
4063	 * If we are currently going through reset/reload, return failed. This will force the
4064	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4065	 * reset to complete
4066	 */
4067	if (ioa_cfg->in_reset_reload)
4068		return FAILED;
4069	if (ioa_cfg->ioa_is_dead)
4070		return FAILED;
4071
4072	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4073		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
4074			if (ipr_cmd->scsi_cmd)
4075				ipr_cmd->done = ipr_scsi_eh_done;
4076			if (ipr_cmd->qc)
4077				ipr_cmd->done = ipr_sata_eh_done;
4078			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4079				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4080				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4081			}
4082		}
4083	}
4084
4085	res->resetting_device = 1;
4086	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4087
4088	if (ipr_is_gata(res) && res->sata_port) {
4089		ap = res->sata_port->ap;
4090		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4091		ata_std_error_handler(ap);
4092		spin_lock_irq(scsi_cmd->device->host->host_lock);
4093
4094		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4095			if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
4096				rc = -EIO;
4097				break;
4098			}
4099		}
4100	} else
4101		rc = ipr_device_reset(ioa_cfg, res);
4102	res->resetting_device = 0;
4103
4104	LEAVE;
4105	return (rc ? FAILED : SUCCESS);
4106}
4107
4108static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4109{
4110	int rc;
4111
4112	spin_lock_irq(cmd->device->host->host_lock);
4113	rc = __ipr_eh_dev_reset(cmd);
4114	spin_unlock_irq(cmd->device->host->host_lock);
4115
4116	return rc;
4117}
4118
4119/**
4120 * ipr_bus_reset_done - Op done function for bus reset.
4121 * @ipr_cmd:	ipr command struct
4122 *
4123 * This function is the op done function for a bus reset
4124 *
4125 * Return value:
4126 * 	none
4127 **/
4128static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4129{
4130	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4131	struct ipr_resource_entry *res;
4132
4133	ENTER;
4134	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4135		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
4136			    sizeof(res->cfgte.res_handle))) {
4137			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
4138			break;
4139		}
4140	}
4141
4142	/*
4143	 * If abort has not completed, indicate the reset has, else call the
4144	 * abort's done function to wake the sleeping eh thread
4145	 */
4146	if (ipr_cmd->sibling->sibling)
4147		ipr_cmd->sibling->sibling = NULL;
4148	else
4149		ipr_cmd->sibling->done(ipr_cmd->sibling);
4150
4151	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4152	LEAVE;
4153}
4154
4155/**
4156 * ipr_abort_timeout - An abort task has timed out
4157 * @ipr_cmd:	ipr command struct
4158 *
4159 * This function handles when an abort task times out. If this
4160 * happens we issue a bus reset since we have resources tied
4161 * up that must be freed before returning to the midlayer.
4162 *
4163 * Return value:
4164 *	none
4165 **/
4166static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4167{
4168	struct ipr_cmnd *reset_cmd;
4169	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4170	struct ipr_cmd_pkt *cmd_pkt;
4171	unsigned long lock_flags = 0;
4172
4173	ENTER;
4174	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4175	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4176		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4177		return;
4178	}
4179
4180	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4181	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4182	ipr_cmd->sibling = reset_cmd;
4183	reset_cmd->sibling = ipr_cmd;
4184	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4185	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4186	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4187	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4188	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4189
4190	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4191	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4192	LEAVE;
4193}
4194
4195/**
4196 * ipr_cancel_op - Cancel specified op
4197 * @scsi_cmd:	scsi command struct
4198 *
4199 * This function cancels specified op.
4200 *
4201 * Return value:
4202 *	SUCCESS / FAILED
4203 **/
4204static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4205{
4206	struct ipr_cmnd *ipr_cmd;
4207	struct ipr_ioa_cfg *ioa_cfg;
4208	struct ipr_resource_entry *res;
4209	struct ipr_cmd_pkt *cmd_pkt;
4210	u32 ioasc;
4211	int op_found = 0;
4212
4213	ENTER;
4214	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4215	res = scsi_cmd->device->hostdata;
4216
4217	/* If we are currently going through reset/reload, return failed.
4218	 * This will force the mid-layer to call ipr_eh_host_reset,
4219	 * which will then go to sleep and wait for the reset to complete
4220	 */
4221	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4222		return FAILED;
4223	if (!res || !ipr_is_gscsi(res))
4224		return FAILED;
4225
4226	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4227		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4228			ipr_cmd->done = ipr_scsi_eh_done;
4229			op_found = 1;
4230			break;
4231		}
4232	}
4233
4234	if (!op_found)
4235		return SUCCESS;
4236
4237	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4238	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4239	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4240	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4241	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4242	ipr_cmd->u.sdev = scsi_cmd->device;
4243
4244	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4245		    scsi_cmd->cmnd[0]);
4246	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4247	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4248
4249	/*
4250	 * If the abort task timed out and we sent a bus reset, we will get
4251	 * one the following responses to the abort
4252	 */
4253	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4254		ioasc = 0;
4255		ipr_trace;
4256	}
4257
4258	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4259	if (!ipr_is_naca_model(res))
4260		res->needs_sync_complete = 1;
4261
4262	LEAVE;
4263	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4264}
4265
4266/**
4267 * ipr_eh_abort - Abort a single op
4268 * @scsi_cmd:	scsi command struct
4269 *
4270 * Return value:
4271 * 	SUCCESS / FAILED
4272 **/
4273static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4274{
4275	unsigned long flags;
4276	int rc;
4277
4278	ENTER;
4279
4280	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4281	rc = ipr_cancel_op(scsi_cmd);
4282	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4283
4284	LEAVE;
4285	return rc;
4286}
4287
4288/**
4289 * ipr_handle_other_interrupt - Handle "other" interrupts
4290 * @ioa_cfg:	ioa config struct
4291 * @int_reg:	interrupt register
4292 *
4293 * Return value:
4294 * 	IRQ_NONE / IRQ_HANDLED
4295 **/
4296static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4297					      volatile u32 int_reg)
4298{
4299	irqreturn_t rc = IRQ_HANDLED;
4300
4301	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4302		/* Mask the interrupt */
4303		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4304
4305		/* Clear the interrupt */
4306		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4307		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4308
4309		list_del(&ioa_cfg->reset_cmd->queue);
4310		del_timer(&ioa_cfg->reset_cmd->timer);
4311		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4312	} else {
4313		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4314			ioa_cfg->ioa_unit_checked = 1;
4315		else
4316			dev_err(&ioa_cfg->pdev->dev,
4317				"Permanent IOA failure. 0x%08X\n", int_reg);
4318
4319		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4320			ioa_cfg->sdt_state = GET_DUMP;
4321
4322		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4323		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4324	}
4325
4326	return rc;
4327}
4328
4329/**
4330 * ipr_isr_eh - Interrupt service routine error handler
4331 * @ioa_cfg:	ioa config struct
4332 * @msg:	message to log
4333 *
4334 * Return value:
4335 * 	none
4336 **/
4337static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4338{
4339	ioa_cfg->errors_logged++;
4340	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4341
4342	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4343		ioa_cfg->sdt_state = GET_DUMP;
4344
4345	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4346}
4347
4348/**
4349 * ipr_isr - Interrupt service routine
4350 * @irq:	irq number
4351 * @devp:	pointer to ioa config struct
4352 *
4353 * Return value:
4354 * 	IRQ_NONE / IRQ_HANDLED
4355 **/
4356static irqreturn_t ipr_isr(int irq, void *devp)
4357{
4358	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4359	unsigned long lock_flags = 0;
4360	volatile u32 int_reg, int_mask_reg;
4361	u32 ioasc;
4362	u16 cmd_index;
4363	int num_hrrq = 0;
4364	struct ipr_cmnd *ipr_cmd;
4365	irqreturn_t rc = IRQ_NONE;
4366
4367	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4368
4369	/* If interrupts are disabled, ignore the interrupt */
4370	if (!ioa_cfg->allow_interrupts) {
4371		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4372		return IRQ_NONE;
4373	}
4374
4375	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4376	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4377
4378	/* If an interrupt on the adapter did not occur, ignore it */
4379	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4380		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4381		return IRQ_NONE;
4382	}
4383
4384	while (1) {
4385		ipr_cmd = NULL;
4386
4387		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4388		       ioa_cfg->toggle_bit) {
4389
4390			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4391				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4392
4393			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4394				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4395				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4396				return IRQ_HANDLED;
4397			}
4398
4399			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4400
4401			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4402
4403			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4404
4405			list_del(&ipr_cmd->queue);
4406			del_timer(&ipr_cmd->timer);
4407			ipr_cmd->done(ipr_cmd);
4408
4409			rc = IRQ_HANDLED;
4410
4411			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4412				ioa_cfg->hrrq_curr++;
4413			} else {
4414				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4415				ioa_cfg->toggle_bit ^= 1u;
4416			}
4417		}
4418
4419		if (ipr_cmd != NULL) {
4420			/* Clear the PCI interrupt */
4421			do {
4422				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4423				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4424			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4425					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4426
4427			if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4428				ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4429				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4430				return IRQ_HANDLED;
4431			}
4432
4433		} else
4434			break;
4435	}
4436
4437	if (unlikely(rc == IRQ_NONE))
4438		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4439
4440	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4441	return rc;
4442}
4443
4444/**
4445 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4446 * @ioa_cfg:	ioa config struct
4447 * @ipr_cmd:	ipr command struct
4448 *
4449 * Return value:
4450 * 	0 on success / -1 on failure
4451 **/
4452static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4453			     struct ipr_cmnd *ipr_cmd)
4454{
4455	int i, nseg;
4456	struct scatterlist *sg;
4457	u32 length;
4458	u32 ioadl_flags = 0;
4459	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4460	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4461	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
4462
4463	length = scsi_bufflen(scsi_cmd);
4464	if (!length)
4465		return 0;
4466
4467	nseg = scsi_dma_map(scsi_cmd);
4468	if (nseg < 0) {
4469		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4470		return -1;
4471	}
4472
4473	ipr_cmd->dma_use_sg = nseg;
4474
4475	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4476		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4477		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4478	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
4479		ioadl_flags = IPR_IOADL_FLAGS_READ;
4480
4481	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4482		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
4483		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
4484		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
4485	}
4486
4487	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4488	return 0;
4489}
4490
4491/**
4492 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4493 * @ioa_cfg:	ioa config struct
4494 * @ipr_cmd:	ipr command struct
4495 *
4496 * Return value:
4497 * 	0 on success / -1 on failure
4498 **/
4499static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4500			   struct ipr_cmnd *ipr_cmd)
4501{
4502	int i, nseg;
4503	struct scatterlist *sg;
4504	u32 length;
4505	u32 ioadl_flags = 0;
4506	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4507	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4508	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4509
4510	length = scsi_bufflen(scsi_cmd);
4511	if (!length)
4512		return 0;
4513
4514	nseg = scsi_dma_map(scsi_cmd);
4515	if (nseg < 0) {
4516		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4517		return -1;
4518	}
4519
4520	ipr_cmd->dma_use_sg = nseg;
4521
4522	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4523		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4524		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4525		ioarcb->data_transfer_length = cpu_to_be32(length);
4526		ioarcb->ioadl_len =
4527			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4528	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4529		ioadl_flags = IPR_IOADL_FLAGS_READ;
4530		ioarcb->read_data_transfer_length = cpu_to_be32(length);
4531		ioarcb->read_ioadl_len =
4532			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4533	}
4534
4535	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
4536		ioadl = ioarcb->u.add_data.u.ioadl;
4537		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
4538				    offsetof(struct ipr_ioarcb, u.add_data));
4539		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4540	}
4541
4542	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4543		ioadl[i].flags_and_data_len =
4544			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4545		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4546	}
4547
4548	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4549	return 0;
4550}
4551
4552/**
4553 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4554 * @scsi_cmd:	scsi command struct
4555 *
4556 * Return value:
4557 * 	task attributes
4558 **/
4559static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4560{
4561	u8 tag[2];
4562	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4563
4564	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4565		switch (tag[0]) {
4566		case MSG_SIMPLE_TAG:
4567			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4568			break;
4569		case MSG_HEAD_TAG:
4570			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4571			break;
4572		case MSG_ORDERED_TAG:
4573			rc = IPR_FLAGS_LO_ORDERED_TASK;
4574			break;
4575		};
4576	}
4577
4578	return rc;
4579}
4580
4581/**
4582 * ipr_erp_done - Process completion of ERP for a device
4583 * @ipr_cmd:		ipr command struct
4584 *
4585 * This function copies the sense buffer into the scsi_cmd
4586 * struct and pushes the scsi_done function.
4587 *
4588 * Return value:
4589 * 	nothing
4590 **/
4591static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4592{
4593	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4594	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4595	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4596	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4597
4598	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4599		scsi_cmd->result |= (DID_ERROR << 16);
4600		scmd_printk(KERN_ERR, scsi_cmd,
4601			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4602	} else {
4603		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4604		       SCSI_SENSE_BUFFERSIZE);
4605	}
4606
4607	if (res) {
4608		if (!ipr_is_naca_model(res))
4609			res->needs_sync_complete = 1;
4610		res->in_erp = 0;
4611	}
4612	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4613	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4614	scsi_cmd->scsi_done(scsi_cmd);
4615}
4616
4617/**
4618 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4619 * @ipr_cmd:	ipr command struct
4620 *
4621 * Return value:
4622 * 	none
4623 **/
4624static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4625{
4626	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4627	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4628	dma_addr_t dma_addr = ipr_cmd->dma_addr;
4629
4630	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4631	ioarcb->data_transfer_length = 0;
4632	ioarcb->read_data_transfer_length = 0;
4633	ioarcb->ioadl_len = 0;
4634	ioarcb->read_ioadl_len = 0;
4635	ioasa->ioasc = 0;
4636	ioasa->residual_data_len = 0;
4637
4638	if (ipr_cmd->ioa_cfg->sis64)
4639		ioarcb->u.sis64_addr_data.data_ioadl_addr =
4640			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
4641	else {
4642		ioarcb->write_ioadl_addr =
4643			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
4644		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4645	}
4646}
4647
4648/**
4649 * ipr_erp_request_sense - Send request sense to a device
4650 * @ipr_cmd:	ipr command struct
4651 *
4652 * This function sends a request sense to a device as a result
4653 * of a check condition.
4654 *
4655 * Return value:
4656 * 	nothing
4657 **/
4658static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4659{
4660	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4661	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4662
4663	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4664		ipr_erp_done(ipr_cmd);
4665		return;
4666	}
4667
4668	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4669
4670	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4671	cmd_pkt->cdb[0] = REQUEST_SENSE;
4672	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4673	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4674	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4675	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4676
4677	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
4678		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
4679
4680	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4681		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4682}
4683
4684/**
4685 * ipr_erp_cancel_all - Send cancel all to a device
4686 * @ipr_cmd:	ipr command struct
4687 *
4688 * This function sends a cancel all to a device to clear the
4689 * queue. If we are running TCQ on the device, QERR is set to 1,
4690 * which means all outstanding ops have been dropped on the floor.
4691 * Cancel all will return them to us.
4692 *
4693 * Return value:
4694 * 	nothing
4695 **/
4696static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4697{
4698	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4699	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4700	struct ipr_cmd_pkt *cmd_pkt;
4701
4702	res->in_erp = 1;
4703
4704	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4705
4706	if (!scsi_get_tag_type(scsi_cmd->device)) {
4707		ipr_erp_request_sense(ipr_cmd);
4708		return;
4709	}
4710
4711	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4712	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4713	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4714
4715	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4716		   IPR_CANCEL_ALL_TIMEOUT);
4717}
4718
4719/**
4720 * ipr_dump_ioasa - Dump contents of IOASA
4721 * @ioa_cfg:	ioa config struct
4722 * @ipr_cmd:	ipr command struct
4723 * @res:		resource entry struct
4724 *
4725 * This function is invoked by the interrupt handler when ops
4726 * fail. It will log the IOASA if appropriate. Only called
4727 * for GPDD ops.
4728 *
4729 * Return value:
4730 * 	none
4731 **/
4732static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4733			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4734{
4735	int i;
4736	u16 data_len;
4737	u32 ioasc, fd_ioasc;
4738	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4739	__be32 *ioasa_data = (__be32 *)ioasa;
4740	int error_index;
4741
4742	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4743	fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4744
4745	if (0 == ioasc)
4746		return;
4747
4748	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4749		return;
4750
4751	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4752		error_index = ipr_get_error(fd_ioasc);
4753	else
4754		error_index = ipr_get_error(ioasc);
4755
4756	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4757		/* Don't log an error if the IOA already logged one */
4758		if (ioasa->ilid != 0)
4759			return;
4760
4761		if (!ipr_is_gscsi(res))
4762			return;
4763
4764		if (ipr_error_table[error_index].log_ioasa == 0)
4765			return;
4766	}
4767
4768	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4769
4770	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4771		data_len = sizeof(struct ipr_ioasa);
4772	else
4773		data_len = be16_to_cpu(ioasa->ret_stat_len);
4774
4775	ipr_err("IOASA Dump:\n");
4776
4777	for (i = 0; i < data_len / 4; i += 4) {
4778		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4779			be32_to_cpu(ioasa_data[i]),
4780			be32_to_cpu(ioasa_data[i+1]),
4781			be32_to_cpu(ioasa_data[i+2]),
4782			be32_to_cpu(ioasa_data[i+3]));
4783	}
4784}
4785
4786/**
4787 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4788 * @ioasa:		IOASA
4789 * @sense_buf:	sense data buffer
4790 *
4791 * Return value:
4792 * 	none
4793 **/
4794static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4795{
4796	u32 failing_lba;
4797	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4798	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4799	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4800	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4801
4802	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4803
4804	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4805		return;
4806
4807	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4808
4809	if (ipr_is_vset_device(res) &&
4810	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4811	    ioasa->u.vset.failing_lba_hi != 0) {
4812		sense_buf[0] = 0x72;
4813		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4814		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4815		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4816
4817		sense_buf[7] = 12;
4818		sense_buf[8] = 0;
4819		sense_buf[9] = 0x0A;
4820		sense_buf[10] = 0x80;
4821
4822		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4823
4824		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4825		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4826		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4827		sense_buf[15] = failing_lba & 0x000000ff;
4828
4829		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4830
4831		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4832		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4833		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4834		sense_buf[19] = failing_lba & 0x000000ff;
4835	} else {
4836		sense_buf[0] = 0x70;
4837		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4838		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4839		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4840
4841		/* Illegal request */
4842		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4843		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4844			sense_buf[7] = 10;	/* additional length */
4845
4846			/* IOARCB was in error */
4847			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4848				sense_buf[15] = 0xC0;
4849			else	/* Parameter data was invalid */
4850				sense_buf[15] = 0x80;
4851
4852			sense_buf[16] =
4853			    ((IPR_FIELD_POINTER_MASK &
4854			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4855			sense_buf[17] =
4856			    (IPR_FIELD_POINTER_MASK &
4857			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4858		} else {
4859			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4860				if (ipr_is_vset_device(res))
4861					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4862				else
4863					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4864
4865				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4866				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4867				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4868				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4869				sense_buf[6] = failing_lba & 0x000000ff;
4870			}
4871
4872			sense_buf[7] = 6;	/* additional length */
4873		}
4874	}
4875}
4876
4877/**
4878 * ipr_get_autosense - Copy autosense data to sense buffer
4879 * @ipr_cmd:	ipr command struct
4880 *
4881 * This function copies the autosense buffer to the buffer
4882 * in the scsi_cmd, if there is autosense available.
4883 *
4884 * Return value:
4885 *	1 if autosense was available / 0 if not
4886 **/
4887static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4888{
4889	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4890
4891	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4892		return 0;
4893
4894	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4895	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4896		   SCSI_SENSE_BUFFERSIZE));
4897	return 1;
4898}
4899
4900/**
4901 * ipr_erp_start - Process an error response for a SCSI op
4902 * @ioa_cfg:	ioa config struct
4903 * @ipr_cmd:	ipr command struct
4904 *
4905 * This function determines whether or not to initiate ERP
4906 * on the affected device.
4907 *
4908 * Return value:
4909 * 	nothing
4910 **/
4911static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4912			      struct ipr_cmnd *ipr_cmd)
4913{
4914	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4915	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4916	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4917	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
4918
4919	if (!res) {
4920		ipr_scsi_eh_done(ipr_cmd);
4921		return;
4922	}
4923
4924	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
4925		ipr_gen_sense(ipr_cmd);
4926
4927	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4928
4929	switch (masked_ioasc) {
4930	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4931		if (ipr_is_naca_model(res))
4932			scsi_cmd->result |= (DID_ABORT << 16);
4933		else
4934			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4935		break;
4936	case IPR_IOASC_IR_RESOURCE_HANDLE:
4937	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4938		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4939		break;
4940	case IPR_IOASC_HW_SEL_TIMEOUT:
4941		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4942		if (!ipr_is_naca_model(res))
4943			res->needs_sync_complete = 1;
4944		break;
4945	case IPR_IOASC_SYNC_REQUIRED:
4946		if (!res->in_erp)
4947			res->needs_sync_complete = 1;
4948		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4949		break;
4950	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4951	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4952		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4953		break;
4954	case IPR_IOASC_BUS_WAS_RESET:
4955	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4956		/*
4957		 * Report the bus reset and ask for a retry. The device
4958		 * will give CC/UA the next command.
4959		 */
4960		if (!res->resetting_device)
4961			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4962		scsi_cmd->result |= (DID_ERROR << 16);
4963		if (!ipr_is_naca_model(res))
4964			res->needs_sync_complete = 1;
4965		break;
4966	case IPR_IOASC_HW_DEV_BUS_STATUS:
4967		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4968		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4969			if (!ipr_get_autosense(ipr_cmd)) {
4970				if (!ipr_is_naca_model(res)) {
4971					ipr_erp_cancel_all(ipr_cmd);
4972					return;
4973				}
4974			}
4975		}
4976		if (!ipr_is_naca_model(res))
4977			res->needs_sync_complete = 1;
4978		break;
4979	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4980		break;
4981	default:
4982		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4983			scsi_cmd->result |= (DID_ERROR << 16);
4984		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4985			res->needs_sync_complete = 1;
4986		break;
4987	}
4988
4989	scsi_dma_unmap(ipr_cmd->scsi_cmd);
4990	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4991	scsi_cmd->scsi_done(scsi_cmd);
4992}
4993
4994/**
4995 * ipr_scsi_done - mid-layer done function
4996 * @ipr_cmd:	ipr command struct
4997 *
4998 * This function is invoked by the interrupt handler for
4999 * ops generated by the SCSI mid-layer
5000 *
5001 * Return value:
5002 * 	none
5003 **/
5004static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5005{
5006	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5007	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5008	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5009
5010	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
5011
5012	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5013		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5014		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5015		scsi_cmd->scsi_done(scsi_cmd);
5016	} else
5017		ipr_erp_start(ioa_cfg, ipr_cmd);
5018}
5019
5020/**
5021 * ipr_queuecommand - Queue a mid-layer request
5022 * @scsi_cmd:	scsi command struct
5023 * @done:		done function
5024 *
5025 * This function queues a request generated by the mid-layer.
5026 *
5027 * Return value:
5028 *	0 on success
5029 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5030 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5031 **/
5032static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5033			    void (*done) (struct scsi_cmnd *))
5034{
5035	struct ipr_ioa_cfg *ioa_cfg;
5036	struct ipr_resource_entry *res;
5037	struct ipr_ioarcb *ioarcb;
5038	struct ipr_cmnd *ipr_cmd;
5039	int rc = 0;
5040
5041	scsi_cmd->scsi_done = done;
5042	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5043	res = scsi_cmd->device->hostdata;
5044	scsi_cmd->result = (DID_OK << 16);
5045
5046	/*
5047	 * We are currently blocking all devices due to a host reset
5048	 * We have told the host to stop giving us new requests, but
5049	 * ERP ops don't count. FIXME
5050	 */
5051	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5052		return SCSI_MLQUEUE_HOST_BUSY;
5053
5054	/*
5055	 * FIXME - Create scsi_set_host_offline interface
5056	 *  and the ioa_is_dead check can be removed
5057	 */
5058	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5059		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5060		scsi_cmd->result = (DID_NO_CONNECT << 16);
5061		scsi_cmd->scsi_done(scsi_cmd);
5062		return 0;
5063	}
5064
5065	if (ipr_is_gata(res) && res->sata_port)
5066		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5067
5068	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5069	ioarcb = &ipr_cmd->ioarcb;
5070	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5071
5072	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5073	ipr_cmd->scsi_cmd = scsi_cmd;
5074	ioarcb->res_handle = res->cfgte.res_handle;
5075	ipr_cmd->done = ipr_scsi_done;
5076	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5077
5078	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5079		if (scsi_cmd->underflow == 0)
5080			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5081
5082		if (res->needs_sync_complete) {
5083			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5084			res->needs_sync_complete = 0;
5085		}
5086
5087		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5088		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5089		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5090		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5091	}
5092
5093	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5094	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5095		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5096
5097	if (likely(rc == 0)) {
5098		if (ioa_cfg->sis64)
5099			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5100		else
5101			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5102	}
5103
5104	if (likely(rc == 0)) {
5105		mb();
5106		ipr_send_command(ipr_cmd);
5107	} else {
5108		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5109		 return SCSI_MLQUEUE_HOST_BUSY;
5110	}
5111
5112	return 0;
5113}
5114
5115/**
5116 * ipr_ioctl - IOCTL handler
5117 * @sdev:	scsi device struct
5118 * @cmd:	IOCTL cmd
5119 * @arg:	IOCTL arg
5120 *
5121 * Return value:
5122 * 	0 on success / other on failure
5123 **/
5124static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5125{
5126	struct ipr_resource_entry *res;
5127
5128	res = (struct ipr_resource_entry *)sdev->hostdata;
5129	if (res && ipr_is_gata(res)) {
5130		if (cmd == HDIO_GET_IDENTITY)
5131			return -ENOTTY;
5132		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5133	}
5134
5135	return -EINVAL;
5136}
5137
5138/**
5139 * ipr_info - Get information about the card/driver
5140 * @scsi_host:	scsi host struct
5141 *
5142 * Return value:
5143 * 	pointer to buffer with description string
5144 **/
5145static const char * ipr_ioa_info(struct Scsi_Host *host)
5146{
5147	static char buffer[512];
5148	struct ipr_ioa_cfg *ioa_cfg;
5149	unsigned long lock_flags = 0;
5150
5151	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5152
5153	spin_lock_irqsave(host->host_lock, lock_flags);
5154	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5155	spin_unlock_irqrestore(host->host_lock, lock_flags);
5156
5157	return buffer;
5158}
5159
5160static struct scsi_host_template driver_template = {
5161	.module = THIS_MODULE,
5162	.name = "IPR",
5163	.info = ipr_ioa_info,
5164	.ioctl = ipr_ioctl,
5165	.queuecommand = ipr_queuecommand,
5166	.eh_abort_handler = ipr_eh_abort,
5167	.eh_device_reset_handler = ipr_eh_dev_reset,
5168	.eh_host_reset_handler = ipr_eh_host_reset,
5169	.slave_alloc = ipr_slave_alloc,
5170	.slave_configure = ipr_slave_configure,
5171	.slave_destroy = ipr_slave_destroy,
5172	.target_alloc = ipr_target_alloc,
5173	.target_destroy = ipr_target_destroy,
5174	.change_queue_depth = ipr_change_queue_depth,
5175	.change_queue_type = ipr_change_queue_type,
5176	.bios_param = ipr_biosparam,
5177	.can_queue = IPR_MAX_COMMANDS,
5178	.this_id = -1,
5179	.sg_tablesize = IPR_MAX_SGLIST,
5180	.max_sectors = IPR_IOA_MAX_SECTORS,
5181	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5182	.use_clustering = ENABLE_CLUSTERING,
5183	.shost_attrs = ipr_ioa_attrs,
5184	.sdev_attrs = ipr_dev_attrs,
5185	.proc_name = IPR_NAME
5186};
5187
5188/**
5189 * ipr_ata_phy_reset - libata phy_reset handler
5190 * @ap:		ata port to reset
5191 *
5192 **/
5193static void ipr_ata_phy_reset(struct ata_port *ap)
5194{
5195	unsigned long flags;
5196	struct ipr_sata_port *sata_port = ap->private_data;
5197	struct ipr_resource_entry *res = sata_port->res;
5198	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5199	int rc;
5200
5201	ENTER;
5202	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5203	while(ioa_cfg->in_reset_reload) {
5204		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5205		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5206		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5207	}
5208
5209	if (!ioa_cfg->allow_cmds)
5210		goto out_unlock;
5211
5212	rc = ipr_device_reset(ioa_cfg, res);
5213
5214	if (rc) {
5215		ata_port_disable(ap);
5216		goto out_unlock;
5217	}
5218
5219	switch(res->cfgte.proto) {
5220	case IPR_PROTO_SATA:
5221	case IPR_PROTO_SAS_STP:
5222		ap->link.device[0].class = ATA_DEV_ATA;
5223		break;
5224	case IPR_PROTO_SATA_ATAPI:
5225	case IPR_PROTO_SAS_STP_ATAPI:
5226		ap->link.device[0].class = ATA_DEV_ATAPI;
5227		break;
5228	default:
5229		ap->link.device[0].class = ATA_DEV_UNKNOWN;
5230		ata_port_disable(ap);
5231		break;
5232	};
5233
5234out_unlock:
5235	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5236	LEAVE;
5237}
5238
5239/**
5240 * ipr_ata_post_internal - Cleanup after an internal command
5241 * @qc:	ATA queued command
5242 *
5243 * Return value:
5244 * 	none
5245 **/
5246static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5247{
5248	struct ipr_sata_port *sata_port = qc->ap->private_data;
5249	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5250	struct ipr_cmnd *ipr_cmd;
5251	unsigned long flags;
5252
5253	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5254	while(ioa_cfg->in_reset_reload) {
5255		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5256		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5257		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5258	}
5259
5260	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5261		if (ipr_cmd->qc == qc) {
5262			ipr_device_reset(ioa_cfg, sata_port->res);
5263			break;
5264		}
5265	}
5266	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5267}
5268
5269/**
5270 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5271 * @regs:	destination
5272 * @tf:	source ATA taskfile
5273 *
5274 * Return value:
5275 * 	none
5276 **/
5277static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5278			     struct ata_taskfile *tf)
5279{
5280	regs->feature = tf->feature;
5281	regs->nsect = tf->nsect;
5282	regs->lbal = tf->lbal;
5283	regs->lbam = tf->lbam;
5284	regs->lbah = tf->lbah;
5285	regs->device = tf->device;
5286	regs->command = tf->command;
5287	regs->hob_feature = tf->hob_feature;
5288	regs->hob_nsect = tf->hob_nsect;
5289	regs->hob_lbal = tf->hob_lbal;
5290	regs->hob_lbam = tf->hob_lbam;
5291	regs->hob_lbah = tf->hob_lbah;
5292	regs->ctl = tf->ctl;
5293}
5294
5295/**
5296 * ipr_sata_done - done function for SATA commands
5297 * @ipr_cmd:	ipr command struct
5298 *
5299 * This function is invoked by the interrupt handler for
5300 * ops generated by the SCSI mid-layer to SATA devices
5301 *
5302 * Return value:
5303 * 	none
5304 **/
5305static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5306{
5307	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5308	struct ata_queued_cmd *qc = ipr_cmd->qc;
5309	struct ipr_sata_port *sata_port = qc->ap->private_data;
5310	struct ipr_resource_entry *res = sata_port->res;
5311	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5312
5313	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5314	       sizeof(struct ipr_ioasa_gata));
5315	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5316
5317	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5318		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5319					 res->cfgte.res_addr.target);
5320
5321	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5322		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5323	else
5324		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5325	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5326	ata_qc_complete(qc);
5327}
5328
5329/**
5330 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5331 * @ipr_cmd:	ipr command struct
5332 * @qc:		ATA queued command
5333 *
5334 **/
5335static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5336				  struct ata_queued_cmd *qc)
5337{
5338	u32 ioadl_flags = 0;
5339	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5340	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5341	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5342	int len = qc->nbytes;
5343	struct scatterlist *sg;
5344	unsigned int si;
5345	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5346
5347	if (len == 0)
5348		return;
5349
5350	if (qc->dma_dir == DMA_TO_DEVICE) {
5351		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5352		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5353	} else if (qc->dma_dir == DMA_FROM_DEVICE)
5354		ioadl_flags = IPR_IOADL_FLAGS_READ;
5355
5356	ioarcb->data_transfer_length = cpu_to_be32(len);
5357	ioarcb->ioadl_len =
5358		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5359	ioarcb->u.sis64_addr_data.data_ioadl_addr =
5360		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5361
5362	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5363		ioadl64->flags = cpu_to_be32(ioadl_flags);
5364		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5365		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5366
5367		last_ioadl64 = ioadl64;
5368		ioadl64++;
5369	}
5370
5371	if (likely(last_ioadl64))
5372		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5373}
5374
5375/**
5376 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5377 * @ipr_cmd:	ipr command struct
5378 * @qc:		ATA queued command
5379 *
5380 **/
5381static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5382				struct ata_queued_cmd *qc)
5383{
5384	u32 ioadl_flags = 0;
5385	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5386	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5387	struct ipr_ioadl_desc *last_ioadl = NULL;
5388	int len = qc->nbytes;
5389	struct scatterlist *sg;
5390	unsigned int si;
5391
5392	if (len == 0)
5393		return;
5394
5395	if (qc->dma_dir == DMA_TO_DEVICE) {
5396		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5397		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5398		ioarcb->data_transfer_length = cpu_to_be32(len);
5399		ioarcb->ioadl_len =
5400			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5401	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5402		ioadl_flags = IPR_IOADL_FLAGS_READ;
5403		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5404		ioarcb->read_ioadl_len =
5405			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5406	}
5407
5408	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5409		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5410		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5411
5412		last_ioadl = ioadl;
5413		ioadl++;
5414	}
5415
5416	if (likely(last_ioadl))
5417		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5418}
5419
5420/**
5421 * ipr_qc_issue - Issue a SATA qc to a device
5422 * @qc:	queued command
5423 *
5424 * Return value:
5425 * 	0 if success
5426 **/
5427static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5428{
5429	struct ata_port *ap = qc->ap;
5430	struct ipr_sata_port *sata_port = ap->private_data;
5431	struct ipr_resource_entry *res = sata_port->res;
5432	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5433	struct ipr_cmnd *ipr_cmd;
5434	struct ipr_ioarcb *ioarcb;
5435	struct ipr_ioarcb_ata_regs *regs;
5436
5437	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5438		return AC_ERR_SYSTEM;
5439
5440	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5441	ioarcb = &ipr_cmd->ioarcb;
5442
5443	if (ioa_cfg->sis64) {
5444		regs = &ipr_cmd->i.ata_ioadl.regs;
5445		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5446	} else
5447		regs = &ioarcb->u.add_data.u.regs;
5448
5449	memset(regs, 0, sizeof(*regs));
5450	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
5451
5452	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5453	ipr_cmd->qc = qc;
5454	ipr_cmd->done = ipr_sata_done;
5455	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5456	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5457	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5458	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5459	ipr_cmd->dma_use_sg = qc->n_elem;
5460
5461	if (ioa_cfg->sis64)
5462		ipr_build_ata_ioadl64(ipr_cmd, qc);
5463	else
5464		ipr_build_ata_ioadl(ipr_cmd, qc);
5465
5466	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5467	ipr_copy_sata_tf(regs, &qc->tf);
5468	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5469	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5470
5471	switch (qc->tf.protocol) {
5472	case ATA_PROT_NODATA:
5473	case ATA_PROT_PIO:
5474		break;
5475
5476	case ATA_PROT_DMA:
5477		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5478		break;
5479
5480	case ATAPI_PROT_PIO:
5481	case ATAPI_PROT_NODATA:
5482		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5483		break;
5484
5485	case ATAPI_PROT_DMA:
5486		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5487		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5488		break;
5489
5490	default:
5491		WARN_ON(1);
5492		return AC_ERR_INVALID;
5493	}
5494
5495	mb();
5496
5497	ipr_send_command(ipr_cmd);
5498
5499	return 0;
5500}
5501
5502/**
5503 * ipr_qc_fill_rtf - Read result TF
5504 * @qc: ATA queued command
5505 *
5506 * Return value:
5507 * 	true
5508 **/
5509static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
5510{
5511	struct ipr_sata_port *sata_port = qc->ap->private_data;
5512	struct ipr_ioasa_gata *g = &sata_port->ioasa;
5513	struct ata_taskfile *tf = &qc->result_tf;
5514
5515	tf->feature = g->error;
5516	tf->nsect = g->nsect;
5517	tf->lbal = g->lbal;
5518	tf->lbam = g->lbam;
5519	tf->lbah = g->lbah;
5520	tf->device = g->device;
5521	tf->command = g->status;
5522	tf->hob_nsect = g->hob_nsect;
5523	tf->hob_lbal = g->hob_lbal;
5524	tf->hob_lbam = g->hob_lbam;
5525	tf->hob_lbah = g->hob_lbah;
5526	tf->ctl = g->alt_status;
5527
5528	return true;
5529}
5530
5531static struct ata_port_operations ipr_sata_ops = {
5532	.phy_reset = ipr_ata_phy_reset,
5533	.hardreset = ipr_sata_reset,
5534	.post_internal_cmd = ipr_ata_post_internal,
5535	.qc_prep = ata_noop_qc_prep,
5536	.qc_issue = ipr_qc_issue,
5537	.qc_fill_rtf = ipr_qc_fill_rtf,
5538	.port_start = ata_sas_port_start,
5539	.port_stop = ata_sas_port_stop
5540};
5541
5542static struct ata_port_info sata_port_info = {
5543	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5544	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5545	.pio_mask	= 0x10, /* pio4 */
5546	.mwdma_mask = 0x07,
5547	.udma_mask	= 0x7f, /* udma0-6 */
5548	.port_ops	= &ipr_sata_ops
5549};
5550
5551#ifdef CONFIG_PPC_PSERIES
5552static const u16 ipr_blocked_processors[] = {
5553	PV_NORTHSTAR,
5554	PV_PULSAR,
5555	PV_POWER4,
5556	PV_ICESTAR,
5557	PV_SSTAR,
5558	PV_POWER4p,
5559	PV_630,
5560	PV_630p
5561};
5562
5563/**
5564 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5565 * @ioa_cfg:	ioa cfg struct
5566 *
5567 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5568 * certain pSeries hardware. This function determines if the given
5569 * adapter is in one of these confgurations or not.
5570 *
5571 * Return value:
5572 * 	1 if adapter is not supported / 0 if adapter is supported
5573 **/
5574static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5575{
5576	int i;
5577
5578	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
5579		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5580			if (__is_processor(ipr_blocked_processors[i]))
5581				return 1;
5582		}
5583	}
5584	return 0;
5585}
5586#else
5587#define ipr_invalid_adapter(ioa_cfg) 0
5588#endif
5589
5590/**
5591 * ipr_ioa_bringdown_done - IOA bring down completion.
5592 * @ipr_cmd:	ipr command struct
5593 *
5594 * This function processes the completion of an adapter bring down.
5595 * It wakes any reset sleepers.
5596 *
5597 * Return value:
5598 * 	IPR_RC_JOB_RETURN
5599 **/
5600static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5601{
5602	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5603
5604	ENTER;
5605	ioa_cfg->in_reset_reload = 0;
5606	ioa_cfg->reset_retries = 0;
5607	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5608	wake_up_all(&ioa_cfg->reset_wait_q);
5609
5610	spin_unlock_irq(ioa_cfg->host->host_lock);
5611	scsi_unblock_requests(ioa_cfg->host);
5612	spin_lock_irq(ioa_cfg->host->host_lock);
5613	LEAVE;
5614
5615	return IPR_RC_JOB_RETURN;
5616}
5617
5618/**
5619 * ipr_ioa_reset_done - IOA reset completion.
5620 * @ipr_cmd:	ipr command struct
5621 *
5622 * This function processes the completion of an adapter reset.
5623 * It schedules any necessary mid-layer add/removes and
5624 * wakes any reset sleepers.
5625 *
5626 * Return value:
5627 * 	IPR_RC_JOB_RETURN
5628 **/
5629static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5630{
5631	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5632	struct ipr_resource_entry *res;
5633	struct ipr_hostrcb *hostrcb, *temp;
5634	int i = 0;
5635
5636	ENTER;
5637	ioa_cfg->in_reset_reload = 0;
5638	ioa_cfg->allow_cmds = 1;
5639	ioa_cfg->reset_cmd = NULL;
5640	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5641
5642	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5643		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5644			ipr_trace;
5645			break;
5646		}
5647	}
5648	schedule_work(&ioa_cfg->work_q);
5649
5650	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5651		list_del(&hostrcb->queue);
5652		if (i++ < IPR_NUM_LOG_HCAMS)
5653			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5654		else
5655			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5656	}
5657
5658	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
5659	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5660
5661	ioa_cfg->reset_retries = 0;
5662	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5663	wake_up_all(&ioa_cfg->reset_wait_q);
5664
5665	spin_unlock(ioa_cfg->host->host_lock);
5666	scsi_unblock_requests(ioa_cfg->host);
5667	spin_lock(ioa_cfg->host->host_lock);
5668
5669	if (!ioa_cfg->allow_cmds)
5670		scsi_block_requests(ioa_cfg->host);
5671
5672	LEAVE;
5673	return IPR_RC_JOB_RETURN;
5674}
5675
5676/**
5677 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5678 * @supported_dev:	supported device struct
5679 * @vpids:			vendor product id struct
5680 *
5681 * Return value:
5682 * 	none
5683 **/
5684static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5685				 struct ipr_std_inq_vpids *vpids)
5686{
5687	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5688	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5689	supported_dev->num_records = 1;
5690	supported_dev->data_length =
5691		cpu_to_be16(sizeof(struct ipr_supported_device));
5692	supported_dev->reserved = 0;
5693}
5694
5695/**
5696 * ipr_set_supported_devs - Send Set Supported Devices for a device
5697 * @ipr_cmd:	ipr command struct
5698 *
5699 * This function sends a Set Supported Devices to the adapter
5700 *
5701 * Return value:
5702 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5703 **/
5704static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5705{
5706	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5707	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5708	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5709	struct ipr_resource_entry *res = ipr_cmd->u.res;
5710
5711	ipr_cmd->job_step = ipr_ioa_reset_done;
5712
5713	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5714		if (!ipr_is_scsi_disk(res))
5715			continue;
5716
5717		ipr_cmd->u.res = res;
5718		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5719
5720		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5721		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5722		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5723
5724		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5725		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5726		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5727
5728		ipr_init_ioadl(ipr_cmd,
5729			       ioa_cfg->vpd_cbs_dma +
5730				 offsetof(struct ipr_misc_cbs, supp_dev),
5731			       sizeof(struct ipr_supported_device),
5732			       IPR_IOADL_FLAGS_WRITE_LAST);
5733
5734		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5735			   IPR_SET_SUP_DEVICE_TIMEOUT);
5736
5737		ipr_cmd->job_step = ipr_set_supported_devs;
5738		return IPR_RC_JOB_RETURN;
5739	}
5740
5741	return IPR_RC_JOB_CONTINUE;
5742}
5743
5744/**
5745 * ipr_setup_write_cache - Disable write cache if needed
5746 * @ipr_cmd:	ipr command struct
5747 *
5748 * This function sets up adapters write cache to desired setting
5749 *
5750 * Return value:
5751 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5752 **/
5753static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5754{
5755	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5756
5757	ipr_cmd->job_step = ipr_set_supported_devs;
5758	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5759				    struct ipr_resource_entry, queue);
5760
5761	if (ioa_cfg->cache_state != CACHE_DISABLED)
5762		return IPR_RC_JOB_CONTINUE;
5763
5764	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5765	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5766	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5767	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5768
5769	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5770
5771	return IPR_RC_JOB_RETURN;
5772}
5773
5774/**
5775 * ipr_get_mode_page - Locate specified mode page
5776 * @mode_pages:	mode page buffer
5777 * @page_code:	page code to find
5778 * @len:		minimum required length for mode page
5779 *
5780 * Return value:
5781 * 	pointer to mode page / NULL on failure
5782 **/
5783static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5784			       u32 page_code, u32 len)
5785{
5786	struct ipr_mode_page_hdr *mode_hdr;
5787	u32 page_length;
5788	u32 length;
5789
5790	if (!mode_pages || (mode_pages->hdr.length == 0))
5791		return NULL;
5792
5793	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5794	mode_hdr = (struct ipr_mode_page_hdr *)
5795		(mode_pages->data + mode_pages->hdr.block_desc_len);
5796
5797	while (length) {
5798		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5799			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5800				return mode_hdr;
5801			break;
5802		} else {
5803			page_length = (sizeof(struct ipr_mode_page_hdr) +
5804				       mode_hdr->page_length);
5805			length -= page_length;
5806			mode_hdr = (struct ipr_mode_page_hdr *)
5807				((unsigned long)mode_hdr + page_length);
5808		}
5809	}
5810	return NULL;
5811}
5812
5813/**
5814 * ipr_check_term_power - Check for term power errors
5815 * @ioa_cfg:	ioa config struct
5816 * @mode_pages:	IOAFP mode pages buffer
5817 *
5818 * Check the IOAFP's mode page 28 for term power errors
5819 *
5820 * Return value:
5821 * 	nothing
5822 **/
5823static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5824				 struct ipr_mode_pages *mode_pages)
5825{
5826	int i;
5827	int entry_length;
5828	struct ipr_dev_bus_entry *bus;
5829	struct ipr_mode_page28 *mode_page;
5830
5831	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5832				      sizeof(struct ipr_mode_page28));
5833
5834	entry_length = mode_page->entry_length;
5835
5836	bus = mode_page->bus;
5837
5838	for (i = 0; i < mode_page->num_entries; i++) {
5839		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5840			dev_err(&ioa_cfg->pdev->dev,
5841				"Term power is absent on scsi bus %d\n",
5842				bus->res_addr.bus);
5843		}
5844
5845		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5846	}
5847}
5848
5849/**
5850 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5851 * @ioa_cfg:	ioa config struct
5852 *
5853 * Looks through the config table checking for SES devices. If
5854 * the SES device is in the SES table indicating a maximum SCSI
5855 * bus speed, the speed is limited for the bus.
5856 *
5857 * Return value:
5858 * 	none
5859 **/
5860static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5861{
5862	u32 max_xfer_rate;
5863	int i;
5864
5865	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5866		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5867						       ioa_cfg->bus_attr[i].bus_width);
5868
5869		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5870			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5871	}
5872}
5873
5874/**
5875 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5876 * @ioa_cfg:	ioa config struct
5877 * @mode_pages:	mode page 28 buffer
5878 *
5879 * Updates mode page 28 based on driver configuration
5880 *
5881 * Return value:
5882 * 	none
5883 **/
5884static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5885					  	struct ipr_mode_pages *mode_pages)
5886{
5887	int i, entry_length;
5888	struct ipr_dev_bus_entry *bus;
5889	struct ipr_bus_attributes *bus_attr;
5890	struct ipr_mode_page28 *mode_page;
5891
5892	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5893				      sizeof(struct ipr_mode_page28));
5894
5895	entry_length = mode_page->entry_length;
5896
5897	/* Loop for each device bus entry */
5898	for (i = 0, bus = mode_page->bus;
5899	     i < mode_page->num_entries;
5900	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5901		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5902			dev_err(&ioa_cfg->pdev->dev,
5903				"Invalid resource address reported: 0x%08X\n",
5904				IPR_GET_PHYS_LOC(bus->res_addr));
5905			continue;
5906		}
5907
5908		bus_attr = &ioa_cfg->bus_attr[i];
5909		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5910		bus->bus_width = bus_attr->bus_width;
5911		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5912		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5913		if (bus_attr->qas_enabled)
5914			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5915		else
5916			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5917	}
5918}
5919
5920/**
5921 * ipr_build_mode_select - Build a mode select command
5922 * @ipr_cmd:	ipr command struct
5923 * @res_handle:	resource handle to send command to
5924 * @parm:		Byte 2 of Mode Sense command
5925 * @dma_addr:	DMA buffer address
5926 * @xfer_len:	data transfer length
5927 *
5928 * Return value:
5929 * 	none
5930 **/
5931static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5932				  __be32 res_handle, u8 parm,
5933				  dma_addr_t dma_addr, u8 xfer_len)
5934{
5935	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5936
5937	ioarcb->res_handle = res_handle;
5938	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5939	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5940	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5941	ioarcb->cmd_pkt.cdb[1] = parm;
5942	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5943
5944	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
5945}
5946
5947/**
5948 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5949 * @ipr_cmd:	ipr command struct
5950 *
5951 * This function sets up the SCSI bus attributes and sends
5952 * a Mode Select for Page 28 to activate them.
5953 *
5954 * Return value:
5955 * 	IPR_RC_JOB_RETURN
5956 **/
5957static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5958{
5959	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5960	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5961	int length;
5962
5963	ENTER;
5964	ipr_scsi_bus_speed_limit(ioa_cfg);
5965	ipr_check_term_power(ioa_cfg, mode_pages);
5966	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5967	length = mode_pages->hdr.length + 1;
5968	mode_pages->hdr.length = 0;
5969
5970	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5971			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5972			      length);
5973
5974	ipr_cmd->job_step = ipr_setup_write_cache;
5975	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5976
5977	LEAVE;
5978	return IPR_RC_JOB_RETURN;
5979}
5980
5981/**
5982 * ipr_build_mode_sense - Builds a mode sense command
5983 * @ipr_cmd:	ipr command struct
5984 * @res:		resource entry struct
5985 * @parm:		Byte 2 of mode sense command
5986 * @dma_addr:	DMA address of mode sense buffer
5987 * @xfer_len:	Size of DMA buffer
5988 *
5989 * Return value:
5990 * 	none
5991 **/
5992static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5993				 __be32 res_handle,
5994				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
5995{
5996	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5997
5998	ioarcb->res_handle = res_handle;
5999	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6000	ioarcb->cmd_pkt.cdb[2] = parm;
6001	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6002	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6003
6004	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6005}
6006
6007/**
6008 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6009 * @ipr_cmd:	ipr command struct
6010 *
6011 * This function handles the failure of an IOA bringup command.
6012 *
6013 * Return value:
6014 * 	IPR_RC_JOB_RETURN
6015 **/
6016static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6017{
6018	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6019	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6020
6021	dev_err(&ioa_cfg->pdev->dev,
6022		"0x%02X failed with IOASC: 0x%08X\n",
6023		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6024
6025	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6026	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6027	return IPR_RC_JOB_RETURN;
6028}
6029
6030/**
6031 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6032 * @ipr_cmd:	ipr command struct
6033 *
6034 * This function handles the failure of a Mode Sense to the IOAFP.
6035 * Some adapters do not handle all mode pages.
6036 *
6037 * Return value:
6038 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6039 **/
6040static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6041{
6042	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6043
6044	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6045		ipr_cmd->job_step = ipr_setup_write_cache;
6046		return IPR_RC_JOB_CONTINUE;
6047	}
6048
6049	return ipr_reset_cmd_failed(ipr_cmd);
6050}
6051
6052/**
6053 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6054 * @ipr_cmd:	ipr command struct
6055 *
6056 * This function send a Page 28 mode sense to the IOA to
6057 * retrieve SCSI bus attributes.
6058 *
6059 * Return value:
6060 * 	IPR_RC_JOB_RETURN
6061 **/
6062static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6063{
6064	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6065
6066	ENTER;
6067	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6068			     0x28, ioa_cfg->vpd_cbs_dma +
6069			     offsetof(struct ipr_misc_cbs, mode_pages),
6070			     sizeof(struct ipr_mode_pages));
6071
6072	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6073	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6074
6075	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6076
6077	LEAVE;
6078	return IPR_RC_JOB_RETURN;
6079}
6080
6081/**
6082 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6083 * @ipr_cmd:	ipr command struct
6084 *
6085 * This function enables dual IOA RAID support if possible.
6086 *
6087 * Return value:
6088 * 	IPR_RC_JOB_RETURN
6089 **/
6090static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6091{
6092	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6093	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6094	struct ipr_mode_page24 *mode_page;
6095	int length;
6096
6097	ENTER;
6098	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6099				      sizeof(struct ipr_mode_page24));
6100
6101	if (mode_page)
6102		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6103
6104	length = mode_pages->hdr.length + 1;
6105	mode_pages->hdr.length = 0;
6106
6107	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6108			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6109			      length);
6110
6111	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6112	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6113
6114	LEAVE;
6115	return IPR_RC_JOB_RETURN;
6116}
6117
6118/**
6119 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6120 * @ipr_cmd:	ipr command struct
6121 *
6122 * This function handles the failure of a Mode Sense to the IOAFP.
6123 * Some adapters do not handle all mode pages.
6124 *
6125 * Return value:
6126 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6127 **/
6128static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6129{
6130	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6131
6132	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6133		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6134		return IPR_RC_JOB_CONTINUE;
6135	}
6136
6137	return ipr_reset_cmd_failed(ipr_cmd);
6138}
6139
6140/**
6141 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6142 * @ipr_cmd:	ipr command struct
6143 *
6144 * This function send a mode sense to the IOA to retrieve
6145 * the IOA Advanced Function Control mode page.
6146 *
6147 * Return value:
6148 * 	IPR_RC_JOB_RETURN
6149 **/
6150static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6151{
6152	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6153
6154	ENTER;
6155	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6156			     0x24, ioa_cfg->vpd_cbs_dma +
6157			     offsetof(struct ipr_misc_cbs, mode_pages),
6158			     sizeof(struct ipr_mode_pages));
6159
6160	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6161	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6162
6163	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6164
6165	LEAVE;
6166	return IPR_RC_JOB_RETURN;
6167}
6168
6169/**
6170 * ipr_init_res_table - Initialize the resource table
6171 * @ipr_cmd:	ipr command struct
6172 *
6173 * This function looks through the existing resource table, comparing
6174 * it with the config table. This function will take care of old/new
6175 * devices and schedule adding/removing them from the mid-layer
6176 * as appropriate.
6177 *
6178 * Return value:
6179 * 	IPR_RC_JOB_CONTINUE
6180 **/
6181static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6182{
6183	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6184	struct ipr_resource_entry *res, *temp;
6185	struct ipr_config_table_entry *cfgte;
6186	int found, i;
6187	LIST_HEAD(old_res);
6188
6189	ENTER;
6190	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
6191		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6192
6193	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6194		list_move_tail(&res->queue, &old_res);
6195
6196	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
6197		cfgte = &ioa_cfg->cfg_table->dev[i];
6198		found = 0;
6199
6200		list_for_each_entry_safe(res, temp, &old_res, queue) {
6201			if (!memcmp(&res->cfgte.res_addr,
6202				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
6203				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6204				found = 1;
6205				break;
6206			}
6207		}
6208
6209		if (!found) {
6210			if (list_empty(&ioa_cfg->free_res_q)) {
6211				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6212				break;
6213			}
6214
6215			found = 1;
6216			res = list_entry(ioa_cfg->free_res_q.next,
6217					 struct ipr_resource_entry, queue);
6218			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6219			ipr_init_res_entry(res);
6220			res->add_to_ml = 1;
6221		}
6222
6223		if (found)
6224			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
6225	}
6226
6227	list_for_each_entry_safe(res, temp, &old_res, queue) {
6228		if (res->sdev) {
6229			res->del_from_ml = 1;
6230			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
6231			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6232		} else {
6233			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6234		}
6235	}
6236
6237	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6238		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6239	else
6240		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6241
6242	LEAVE;
6243	return IPR_RC_JOB_CONTINUE;
6244}
6245
6246/**
6247 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6248 * @ipr_cmd:	ipr command struct
6249 *
6250 * This function sends a Query IOA Configuration command
6251 * to the adapter to retrieve the IOA configuration table.
6252 *
6253 * Return value:
6254 * 	IPR_RC_JOB_RETURN
6255 **/
6256static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6257{
6258	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6259	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6260	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6261	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6262
6263	ENTER;
6264	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6265		ioa_cfg->dual_raid = 1;
6266	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6267		 ucode_vpd->major_release, ucode_vpd->card_type,
6268		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6269	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6270	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6271
6272	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6273	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
6274	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
6275
6276	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma,
6277		       sizeof(struct ipr_config_table),
6278		       IPR_IOADL_FLAGS_READ_LAST);
6279
6280	ipr_cmd->job_step = ipr_init_res_table;
6281
6282	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6283
6284	LEAVE;
6285	return IPR_RC_JOB_RETURN;
6286}
6287
6288/**
6289 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6290 * @ipr_cmd:	ipr command struct
6291 *
6292 * This utility function sends an inquiry to the adapter.
6293 *
6294 * Return value:
6295 * 	none
6296 **/
6297static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6298			      dma_addr_t dma_addr, u8 xfer_len)
6299{
6300	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6301
6302	ENTER;
6303	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6304	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6305
6306	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6307	ioarcb->cmd_pkt.cdb[1] = flags;
6308	ioarcb->cmd_pkt.cdb[2] = page;
6309	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6310
6311	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6312
6313	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6314	LEAVE;
6315}
6316
6317/**
6318 * ipr_inquiry_page_supported - Is the given inquiry page supported
6319 * @page0:		inquiry page 0 buffer
6320 * @page:		page code.
6321 *
6322 * This function determines if the specified inquiry page is supported.
6323 *
6324 * Return value:
6325 *	1 if page is supported / 0 if not
6326 **/
6327static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6328{
6329	int i;
6330
6331	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6332		if (page0->page[i] == page)
6333			return 1;
6334
6335	return 0;
6336}
6337
6338/**
6339 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6340 * @ipr_cmd:	ipr command struct
6341 *
6342 * This function sends a Page 0xD0 inquiry to the adapter
6343 * to retrieve adapter capabilities.
6344 *
6345 * Return value:
6346 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6347 **/
6348static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6349{
6350	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6351	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6352	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6353
6354	ENTER;
6355	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6356	memset(cap, 0, sizeof(*cap));
6357
6358	if (ipr_inquiry_page_supported(page0, 0xD0)) {
6359		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6360				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6361				  sizeof(struct ipr_inquiry_cap));
6362		return IPR_RC_JOB_RETURN;
6363	}
6364
6365	LEAVE;
6366	return IPR_RC_JOB_CONTINUE;
6367}
6368
6369/**
6370 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6371 * @ipr_cmd:	ipr command struct
6372 *
6373 * This function sends a Page 3 inquiry to the adapter
6374 * to retrieve software VPD information.
6375 *
6376 * Return value:
6377 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6378 **/
6379static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6380{
6381	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6382	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6383
6384	ENTER;
6385
6386	if (!ipr_inquiry_page_supported(page0, 1))
6387		ioa_cfg->cache_state = CACHE_NONE;
6388
6389	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6390
6391	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6392			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6393			  sizeof(struct ipr_inquiry_page3));
6394
6395	LEAVE;
6396	return IPR_RC_JOB_RETURN;
6397}
6398
6399/**
6400 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6401 * @ipr_cmd:	ipr command struct
6402 *
6403 * This function sends a Page 0 inquiry to the adapter
6404 * to retrieve supported inquiry pages.
6405 *
6406 * Return value:
6407 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6408 **/
6409static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6410{
6411	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6412	char type[5];
6413
6414	ENTER;
6415
6416	/* Grab the type out of the VPD and store it away */
6417	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6418	type[4] = '\0';
6419	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6420
6421	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6422
6423	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6424			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6425			  sizeof(struct ipr_inquiry_page0));
6426
6427	LEAVE;
6428	return IPR_RC_JOB_RETURN;
6429}
6430
6431/**
6432 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6433 * @ipr_cmd:	ipr command struct
6434 *
6435 * This function sends a standard inquiry to the adapter.
6436 *
6437 * Return value:
6438 * 	IPR_RC_JOB_RETURN
6439 **/
6440static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6441{
6442	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6443
6444	ENTER;
6445	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6446
6447	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6448			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6449			  sizeof(struct ipr_ioa_vpd));
6450
6451	LEAVE;
6452	return IPR_RC_JOB_RETURN;
6453}
6454
6455/**
6456 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6457 * @ipr_cmd:	ipr command struct
6458 *
6459 * This function send an Identify Host Request Response Queue
6460 * command to establish the HRRQ with the adapter.
6461 *
6462 * Return value:
6463 * 	IPR_RC_JOB_RETURN
6464 **/
6465static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6466{
6467	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6468	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6469
6470	ENTER;
6471	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6472
6473	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6474	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6475
6476	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6477	ioarcb->cmd_pkt.cdb[2] =
6478		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6479	ioarcb->cmd_pkt.cdb[3] =
6480		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6481	ioarcb->cmd_pkt.cdb[4] =
6482		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6483	ioarcb->cmd_pkt.cdb[5] =
6484		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6485	ioarcb->cmd_pkt.cdb[7] =
6486		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6487	ioarcb->cmd_pkt.cdb[8] =
6488		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6489
6490	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6491
6492	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6493
6494	LEAVE;
6495	return IPR_RC_JOB_RETURN;
6496}
6497
6498/**
6499 * ipr_reset_timer_done - Adapter reset timer function
6500 * @ipr_cmd:	ipr command struct
6501 *
6502 * Description: This function is used in adapter reset processing
6503 * for timing events. If the reset_cmd pointer in the IOA
6504 * config struct is not this adapter's we are doing nested
6505 * resets and fail_all_ops will take care of freeing the
6506 * command block.
6507 *
6508 * Return value:
6509 * 	none
6510 **/
6511static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6512{
6513	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6514	unsigned long lock_flags = 0;
6515
6516	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6517
6518	if (ioa_cfg->reset_cmd == ipr_cmd) {
6519		list_del(&ipr_cmd->queue);
6520		ipr_cmd->done(ipr_cmd);
6521	}
6522
6523	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6524}
6525
6526/**
6527 * ipr_reset_start_timer - Start a timer for adapter reset job
6528 * @ipr_cmd:	ipr command struct
6529 * @timeout:	timeout value
6530 *
6531 * Description: This function is used in adapter reset processing
6532 * for timing events. If the reset_cmd pointer in the IOA
6533 * config struct is not this adapter's we are doing nested
6534 * resets and fail_all_ops will take care of freeing the
6535 * command block.
6536 *
6537 * Return value:
6538 * 	none
6539 **/
6540static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6541				  unsigned long timeout)
6542{
6543	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6544	ipr_cmd->done = ipr_reset_ioa_job;
6545
6546	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6547	ipr_cmd->timer.expires = jiffies + timeout;
6548	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6549	add_timer(&ipr_cmd->timer);
6550}
6551
6552/**
6553 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6554 * @ioa_cfg:	ioa cfg struct
6555 *
6556 * Return value:
6557 * 	nothing
6558 **/
6559static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6560{
6561	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6562
6563	/* Initialize Host RRQ pointers */
6564	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6565	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6566	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6567	ioa_cfg->toggle_bit = 1;
6568
6569	/* Zero out config table */
6570	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6571}
6572
6573/**
6574 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6575 * @ipr_cmd:	ipr command struct
6576 *
6577 * This function reinitializes some control blocks and
6578 * enables destructive diagnostics on the adapter.
6579 *
6580 * Return value:
6581 * 	IPR_RC_JOB_RETURN
6582 **/
6583static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6584{
6585	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6586	volatile u32 int_reg;
6587
6588	ENTER;
6589	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6590	ipr_init_ioa_mem(ioa_cfg);
6591
6592	ioa_cfg->allow_interrupts = 1;
6593	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6594
6595	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6596		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6597		       ioa_cfg->regs.clr_interrupt_mask_reg);
6598		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6599		return IPR_RC_JOB_CONTINUE;
6600	}
6601
6602	/* Enable destructive diagnostics on IOA */
6603	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6604
6605	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6606	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6607
6608	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6609
6610	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6611	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6612	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6613	ipr_cmd->done = ipr_reset_ioa_job;
6614	add_timer(&ipr_cmd->timer);
6615	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6616
6617	LEAVE;
6618	return IPR_RC_JOB_RETURN;
6619}
6620
6621/**
6622 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6623 * @ipr_cmd:	ipr command struct
6624 *
6625 * This function is invoked when an adapter dump has run out
6626 * of processing time.
6627 *
6628 * Return value:
6629 * 	IPR_RC_JOB_CONTINUE
6630 **/
6631static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6632{
6633	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6634
6635	if (ioa_cfg->sdt_state == GET_DUMP)
6636		ioa_cfg->sdt_state = ABORT_DUMP;
6637
6638	ipr_cmd->job_step = ipr_reset_alert;
6639
6640	return IPR_RC_JOB_CONTINUE;
6641}
6642
6643/**
6644 * ipr_unit_check_no_data - Log a unit check/no data error log
6645 * @ioa_cfg:		ioa config struct
6646 *
6647 * Logs an error indicating the adapter unit checked, but for some
6648 * reason, we were unable to fetch the unit check buffer.
6649 *
6650 * Return value:
6651 * 	nothing
6652 **/
6653static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6654{
6655	ioa_cfg->errors_logged++;
6656	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6657}
6658
6659/**
6660 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6661 * @ioa_cfg:		ioa config struct
6662 *
6663 * Fetches the unit check buffer from the adapter by clocking the data
6664 * through the mailbox register.
6665 *
6666 * Return value:
6667 * 	nothing
6668 **/
6669static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6670{
6671	unsigned long mailbox;
6672	struct ipr_hostrcb *hostrcb;
6673	struct ipr_uc_sdt sdt;
6674	int rc, length;
6675	u32 ioasc;
6676
6677	mailbox = readl(ioa_cfg->ioa_mailbox);
6678
6679	if (!ipr_sdt_is_fmt2(mailbox)) {
6680		ipr_unit_check_no_data(ioa_cfg);
6681		return;
6682	}
6683
6684	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6685	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6686					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6687
6688	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6689	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6690		ipr_unit_check_no_data(ioa_cfg);
6691		return;
6692	}
6693
6694	/* Find length of the first sdt entry (UC buffer) */
6695	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6696		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6697
6698	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6699			     struct ipr_hostrcb, queue);
6700	list_del(&hostrcb->queue);
6701	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6702
6703	rc = ipr_get_ldump_data_section(ioa_cfg,
6704					be32_to_cpu(sdt.entry[0].bar_str_offset),
6705					(__be32 *)&hostrcb->hcam,
6706					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6707
6708	if (!rc) {
6709		ipr_handle_log_data(ioa_cfg, hostrcb);
6710		ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6711		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6712		    ioa_cfg->sdt_state == GET_DUMP)
6713			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6714	} else
6715		ipr_unit_check_no_data(ioa_cfg);
6716
6717	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6718}
6719
6720/**
6721 * ipr_reset_restore_cfg_space - Restore PCI config space.
6722 * @ipr_cmd:	ipr command struct
6723 *
6724 * Description: This function restores the saved PCI config space of
6725 * the adapter, fails all outstanding ops back to the callers, and
6726 * fetches the dump/unit check if applicable to this reset.
6727 *
6728 * Return value:
6729 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6730 **/
6731static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6732{
6733	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6734	int rc;
6735
6736	ENTER;
6737	ioa_cfg->pdev->state_saved = true;
6738	rc = pci_restore_state(ioa_cfg->pdev);
6739
6740	if (rc != PCIBIOS_SUCCESSFUL) {
6741		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6742		return IPR_RC_JOB_CONTINUE;
6743	}
6744
6745	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6746		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6747		return IPR_RC_JOB_CONTINUE;
6748	}
6749
6750	ipr_fail_all_ops(ioa_cfg);
6751
6752	if (ioa_cfg->ioa_unit_checked) {
6753		ioa_cfg->ioa_unit_checked = 0;
6754		ipr_get_unit_check_buffer(ioa_cfg);
6755		ipr_cmd->job_step = ipr_reset_alert;
6756		ipr_reset_start_timer(ipr_cmd, 0);
6757		return IPR_RC_JOB_RETURN;
6758	}
6759
6760	if (ioa_cfg->in_ioa_bringdown) {
6761		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6762	} else {
6763		ipr_cmd->job_step = ipr_reset_enable_ioa;
6764
6765		if (GET_DUMP == ioa_cfg->sdt_state) {
6766			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6767			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6768			schedule_work(&ioa_cfg->work_q);
6769			return IPR_RC_JOB_RETURN;
6770		}
6771	}
6772
6773	ENTER;
6774	return IPR_RC_JOB_CONTINUE;
6775}
6776
6777/**
6778 * ipr_reset_bist_done - BIST has completed on the adapter.
6779 * @ipr_cmd:	ipr command struct
6780 *
6781 * Description: Unblock config space and resume the reset process.
6782 *
6783 * Return value:
6784 * 	IPR_RC_JOB_CONTINUE
6785 **/
6786static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6787{
6788	ENTER;
6789	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6790	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6791	LEAVE;
6792	return IPR_RC_JOB_CONTINUE;
6793}
6794
6795/**
6796 * ipr_reset_start_bist - Run BIST on the adapter.
6797 * @ipr_cmd:	ipr command struct
6798 *
6799 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6800 *
6801 * Return value:
6802 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6803 **/
6804static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6805{
6806	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6807	int rc;
6808
6809	ENTER;
6810	pci_block_user_cfg_access(ioa_cfg->pdev);
6811	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6812
6813	if (rc != PCIBIOS_SUCCESSFUL) {
6814		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6815		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6816		rc = IPR_RC_JOB_CONTINUE;
6817	} else {
6818		ipr_cmd->job_step = ipr_reset_bist_done;
6819		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6820		rc = IPR_RC_JOB_RETURN;
6821	}
6822
6823	LEAVE;
6824	return rc;
6825}
6826
6827/**
6828 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
6829 * @ipr_cmd:	ipr command struct
6830 *
6831 * Description: This clears PCI reset to the adapter and delays two seconds.
6832 *
6833 * Return value:
6834 * 	IPR_RC_JOB_RETURN
6835 **/
6836static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
6837{
6838	ENTER;
6839	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
6840	ipr_cmd->job_step = ipr_reset_bist_done;
6841	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6842	LEAVE;
6843	return IPR_RC_JOB_RETURN;
6844}
6845
6846/**
6847 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
6848 * @ipr_cmd:	ipr command struct
6849 *
6850 * Description: This asserts PCI reset to the adapter.
6851 *
6852 * Return value:
6853 * 	IPR_RC_JOB_RETURN
6854 **/
6855static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
6856{
6857	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6858	struct pci_dev *pdev = ioa_cfg->pdev;
6859
6860	ENTER;
6861	pci_block_user_cfg_access(pdev);
6862	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
6863	ipr_cmd->job_step = ipr_reset_slot_reset_done;
6864	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
6865	LEAVE;
6866	return IPR_RC_JOB_RETURN;
6867}
6868
6869/**
6870 * ipr_reset_allowed - Query whether or not IOA can be reset
6871 * @ioa_cfg:	ioa config struct
6872 *
6873 * Return value:
6874 * 	0 if reset not allowed / non-zero if reset is allowed
6875 **/
6876static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6877{
6878	volatile u32 temp_reg;
6879
6880	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6881	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6882}
6883
6884/**
6885 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6886 * @ipr_cmd:	ipr command struct
6887 *
6888 * Description: This function waits for adapter permission to run BIST,
6889 * then runs BIST. If the adapter does not give permission after a
6890 * reasonable time, we will reset the adapter anyway. The impact of
6891 * resetting the adapter without warning the adapter is the risk of
6892 * losing the persistent error log on the adapter. If the adapter is
6893 * reset while it is writing to the flash on the adapter, the flash
6894 * segment will have bad ECC and be zeroed.
6895 *
6896 * Return value:
6897 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6898 **/
6899static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6900{
6901	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6902	int rc = IPR_RC_JOB_RETURN;
6903
6904	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6905		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6906		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6907	} else {
6908		ipr_cmd->job_step = ioa_cfg->reset;
6909		rc = IPR_RC_JOB_CONTINUE;
6910	}
6911
6912	return rc;
6913}
6914
6915/**
6916 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6917 * @ipr_cmd:	ipr command struct
6918 *
6919 * Description: This function alerts the adapter that it will be reset.
6920 * If memory space is not currently enabled, proceed directly
6921 * to running BIST on the adapter. The timer must always be started
6922 * so we guarantee we do not run BIST from ipr_isr.
6923 *
6924 * Return value:
6925 * 	IPR_RC_JOB_RETURN
6926 **/
6927static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6928{
6929	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6930	u16 cmd_reg;
6931	int rc;
6932
6933	ENTER;
6934	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6935
6936	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6937		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6938		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6939		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6940	} else {
6941		ipr_cmd->job_step = ioa_cfg->reset;
6942	}
6943
6944	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6945	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6946
6947	LEAVE;
6948	return IPR_RC_JOB_RETURN;
6949}
6950
6951/**
6952 * ipr_reset_ucode_download_done - Microcode download completion
6953 * @ipr_cmd:	ipr command struct
6954 *
6955 * Description: This function unmaps the microcode download buffer.
6956 *
6957 * Return value:
6958 * 	IPR_RC_JOB_CONTINUE
6959 **/
6960static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6961{
6962	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6964
6965	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6966		     sglist->num_sg, DMA_TO_DEVICE);
6967
6968	ipr_cmd->job_step = ipr_reset_alert;
6969	return IPR_RC_JOB_CONTINUE;
6970}
6971
6972/**
6973 * ipr_reset_ucode_download - Download microcode to the adapter
6974 * @ipr_cmd:	ipr command struct
6975 *
6976 * Description: This function checks to see if it there is microcode
6977 * to download to the adapter. If there is, a download is performed.
6978 *
6979 * Return value:
6980 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6981 **/
6982static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6983{
6984	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6985	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6986
6987	ENTER;
6988	ipr_cmd->job_step = ipr_reset_alert;
6989
6990	if (!sglist)
6991		return IPR_RC_JOB_CONTINUE;
6992
6993	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6994	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6995	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6996	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6997	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6998	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6999	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7000
7001	if (ioa_cfg->sis64)
7002		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7003	else
7004		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7005	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7006
7007	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7008		   IPR_WRITE_BUFFER_TIMEOUT);
7009
7010	LEAVE;
7011	return IPR_RC_JOB_RETURN;
7012}
7013
7014/**
7015 * ipr_reset_shutdown_ioa - Shutdown the adapter
7016 * @ipr_cmd:	ipr command struct
7017 *
7018 * Description: This function issues an adapter shutdown of the
7019 * specified type to the specified adapter as part of the
7020 * adapter reset job.
7021 *
7022 * Return value:
7023 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7024 **/
7025static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7026{
7027	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7028	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7029	unsigned long timeout;
7030	int rc = IPR_RC_JOB_CONTINUE;
7031
7032	ENTER;
7033	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7034		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7035		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7036		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7037		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7038
7039		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7040			timeout = IPR_SHUTDOWN_TIMEOUT;
7041		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7042			timeout = IPR_INTERNAL_TIMEOUT;
7043		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7044			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7045		else
7046			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7047
7048		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7049
7050		rc = IPR_RC_JOB_RETURN;
7051		ipr_cmd->job_step = ipr_reset_ucode_download;
7052	} else
7053		ipr_cmd->job_step = ipr_reset_alert;
7054
7055	LEAVE;
7056	return rc;
7057}
7058
7059/**
7060 * ipr_reset_ioa_job - Adapter reset job
7061 * @ipr_cmd:	ipr command struct
7062 *
7063 * Description: This function is the job router for the adapter reset job.
7064 *
7065 * Return value:
7066 * 	none
7067 **/
7068static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7069{
7070	u32 rc, ioasc;
7071	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7072
7073	do {
7074		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7075
7076		if (ioa_cfg->reset_cmd != ipr_cmd) {
7077			/*
7078			 * We are doing nested adapter resets and this is
7079			 * not the current reset job.
7080			 */
7081			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7082			return;
7083		}
7084
7085		if (IPR_IOASC_SENSE_KEY(ioasc)) {
7086			rc = ipr_cmd->job_step_failed(ipr_cmd);
7087			if (rc == IPR_RC_JOB_RETURN)
7088				return;
7089		}
7090
7091		ipr_reinit_ipr_cmnd(ipr_cmd);
7092		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7093		rc = ipr_cmd->job_step(ipr_cmd);
7094	} while(rc == IPR_RC_JOB_CONTINUE);
7095}
7096
7097/**
7098 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7099 * @ioa_cfg:		ioa config struct
7100 * @job_step:		first job step of reset job
7101 * @shutdown_type:	shutdown type
7102 *
7103 * Description: This function will initiate the reset of the given adapter
7104 * starting at the selected job step.
7105 * If the caller needs to wait on the completion of the reset,
7106 * the caller must sleep on the reset_wait_q.
7107 *
7108 * Return value:
7109 * 	none
7110 **/
7111static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7112				    int (*job_step) (struct ipr_cmnd *),
7113				    enum ipr_shutdown_type shutdown_type)
7114{
7115	struct ipr_cmnd *ipr_cmd;
7116
7117	ioa_cfg->in_reset_reload = 1;
7118	ioa_cfg->allow_cmds = 0;
7119	scsi_block_requests(ioa_cfg->host);
7120
7121	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7122	ioa_cfg->reset_cmd = ipr_cmd;
7123	ipr_cmd->job_step = job_step;
7124	ipr_cmd->u.shutdown_type = shutdown_type;
7125
7126	ipr_reset_ioa_job(ipr_cmd);
7127}
7128
7129/**
7130 * ipr_initiate_ioa_reset - Initiate an adapter reset
7131 * @ioa_cfg:		ioa config struct
7132 * @shutdown_type:	shutdown type
7133 *
7134 * Description: This function will initiate the reset of the given adapter.
7135 * If the caller needs to wait on the completion of the reset,
7136 * the caller must sleep on the reset_wait_q.
7137 *
7138 * Return value:
7139 * 	none
7140 **/
7141static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7142				   enum ipr_shutdown_type shutdown_type)
7143{
7144	if (ioa_cfg->ioa_is_dead)
7145		return;
7146
7147	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7148		ioa_cfg->sdt_state = ABORT_DUMP;
7149
7150	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7151		dev_err(&ioa_cfg->pdev->dev,
7152			"IOA taken offline - error recovery failed\n");
7153
7154		ioa_cfg->reset_retries = 0;
7155		ioa_cfg->ioa_is_dead = 1;
7156
7157		if (ioa_cfg->in_ioa_bringdown) {
7158			ioa_cfg->reset_cmd = NULL;
7159			ioa_cfg->in_reset_reload = 0;
7160			ipr_fail_all_ops(ioa_cfg);
7161			wake_up_all(&ioa_cfg->reset_wait_q);
7162
7163			spin_unlock_irq(ioa_cfg->host->host_lock);
7164			scsi_unblock_requests(ioa_cfg->host);
7165			spin_lock_irq(ioa_cfg->host->host_lock);
7166			return;
7167		} else {
7168			ioa_cfg->in_ioa_bringdown = 1;
7169			shutdown_type = IPR_SHUTDOWN_NONE;
7170		}
7171	}
7172
7173	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7174				shutdown_type);
7175}
7176
7177/**
7178 * ipr_reset_freeze - Hold off all I/O activity
7179 * @ipr_cmd:	ipr command struct
7180 *
7181 * Description: If the PCI slot is frozen, hold off all I/O
7182 * activity; then, as soon as the slot is available again,
7183 * initiate an adapter reset.
7184 */
7185static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7186{
7187	/* Disallow new interrupts, avoid loop */
7188	ipr_cmd->ioa_cfg->allow_interrupts = 0;
7189	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7190	ipr_cmd->done = ipr_reset_ioa_job;
7191	return IPR_RC_JOB_RETURN;
7192}
7193
7194/**
7195 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7196 * @pdev:	PCI device struct
7197 *
7198 * Description: This routine is called to tell us that the PCI bus
7199 * is down. Can't do anything here, except put the device driver
7200 * into a holding pattern, waiting for the PCI bus to come back.
7201 */
7202static void ipr_pci_frozen(struct pci_dev *pdev)
7203{
7204	unsigned long flags = 0;
7205	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7206
7207	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7208	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7209	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7210}
7211
7212/**
7213 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7214 * @pdev:	PCI device struct
7215 *
7216 * Description: This routine is called by the pci error recovery
7217 * code after the PCI slot has been reset, just before we
7218 * should resume normal operations.
7219 */
7220static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7221{
7222	unsigned long flags = 0;
7223	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7224
7225	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7226	if (ioa_cfg->needs_warm_reset)
7227		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7228	else
7229		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7230					IPR_SHUTDOWN_NONE);
7231	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7232	return PCI_ERS_RESULT_RECOVERED;
7233}
7234
7235/**
7236 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7237 * @pdev:	PCI device struct
7238 *
7239 * Description: This routine is called when the PCI bus has
7240 * permanently failed.
7241 */
7242static void ipr_pci_perm_failure(struct pci_dev *pdev)
7243{
7244	unsigned long flags = 0;
7245	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7246
7247	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7248	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7249		ioa_cfg->sdt_state = ABORT_DUMP;
7250	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7251	ioa_cfg->in_ioa_bringdown = 1;
7252	ioa_cfg->allow_cmds = 0;
7253	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7254	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7255}
7256
7257/**
7258 * ipr_pci_error_detected - Called when a PCI error is detected.
7259 * @pdev:	PCI device struct
7260 * @state:	PCI channel state
7261 *
7262 * Description: Called when a PCI error is detected.
7263 *
7264 * Return value:
7265 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7266 */
7267static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7268					       pci_channel_state_t state)
7269{
7270	switch (state) {
7271	case pci_channel_io_frozen:
7272		ipr_pci_frozen(pdev);
7273		return PCI_ERS_RESULT_NEED_RESET;
7274	case pci_channel_io_perm_failure:
7275		ipr_pci_perm_failure(pdev);
7276		return PCI_ERS_RESULT_DISCONNECT;
7277		break;
7278	default:
7279		break;
7280	}
7281	return PCI_ERS_RESULT_NEED_RESET;
7282}
7283
7284/**
7285 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7286 * @ioa_cfg:	ioa cfg struct
7287 *
7288 * Description: This is the second phase of adapter intialization
7289 * This function takes care of initilizing the adapter to the point
7290 * where it can accept new commands.
7291
7292 * Return value:
7293 * 	0 on success / -EIO on failure
7294 **/
7295static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7296{
7297	int rc = 0;
7298	unsigned long host_lock_flags = 0;
7299
7300	ENTER;
7301	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7302	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7303	if (ioa_cfg->needs_hard_reset) {
7304		ioa_cfg->needs_hard_reset = 0;
7305		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7306	} else
7307		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7308					IPR_SHUTDOWN_NONE);
7309
7310	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7311	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7312	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7313
7314	if (ioa_cfg->ioa_is_dead) {
7315		rc = -EIO;
7316	} else if (ipr_invalid_adapter(ioa_cfg)) {
7317		if (!ipr_testmode)
7318			rc = -EIO;
7319
7320		dev_err(&ioa_cfg->pdev->dev,
7321			"Adapter not supported in this hardware configuration.\n");
7322	}
7323
7324	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7325
7326	LEAVE;
7327	return rc;
7328}
7329
7330/**
7331 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7332 * @ioa_cfg:	ioa config struct
7333 *
7334 * Return value:
7335 * 	none
7336 **/
7337static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7338{
7339	int i;
7340
7341	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7342		if (ioa_cfg->ipr_cmnd_list[i])
7343			pci_pool_free(ioa_cfg->ipr_cmd_pool,
7344				      ioa_cfg->ipr_cmnd_list[i],
7345				      ioa_cfg->ipr_cmnd_list_dma[i]);
7346
7347		ioa_cfg->ipr_cmnd_list[i] = NULL;
7348	}
7349
7350	if (ioa_cfg->ipr_cmd_pool)
7351		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7352
7353	ioa_cfg->ipr_cmd_pool = NULL;
7354}
7355
7356/**
7357 * ipr_free_mem - Frees memory allocated for an adapter
7358 * @ioa_cfg:	ioa cfg struct
7359 *
7360 * Return value:
7361 * 	nothing
7362 **/
7363static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7364{
7365	int i;
7366
7367	kfree(ioa_cfg->res_entries);
7368	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7369			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7370	ipr_free_cmd_blks(ioa_cfg);
7371	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7372			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7373	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7374			    ioa_cfg->cfg_table,
7375			    ioa_cfg->cfg_table_dma);
7376
7377	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7378		pci_free_consistent(ioa_cfg->pdev,
7379				    sizeof(struct ipr_hostrcb),
7380				    ioa_cfg->hostrcb[i],
7381				    ioa_cfg->hostrcb_dma[i]);
7382	}
7383
7384	ipr_free_dump(ioa_cfg);
7385	kfree(ioa_cfg->trace);
7386}
7387
7388/**
7389 * ipr_free_all_resources - Free all allocated resources for an adapter.
7390 * @ipr_cmd:	ipr command struct
7391 *
7392 * This function frees all allocated resources for the
7393 * specified adapter.
7394 *
7395 * Return value:
7396 * 	none
7397 **/
7398static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7399{
7400	struct pci_dev *pdev = ioa_cfg->pdev;
7401
7402	ENTER;
7403	free_irq(pdev->irq, ioa_cfg);
7404	pci_disable_msi(pdev);
7405	iounmap(ioa_cfg->hdw_dma_regs);
7406	pci_release_regions(pdev);
7407	ipr_free_mem(ioa_cfg);
7408	scsi_host_put(ioa_cfg->host);
7409	pci_disable_device(pdev);
7410	LEAVE;
7411}
7412
7413/**
7414 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7415 * @ioa_cfg:	ioa config struct
7416 *
7417 * Return value:
7418 * 	0 on success / -ENOMEM on allocation failure
7419 **/
7420static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7421{
7422	struct ipr_cmnd *ipr_cmd;
7423	struct ipr_ioarcb *ioarcb;
7424	dma_addr_t dma_addr;
7425	int i;
7426
7427	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7428						 sizeof(struct ipr_cmnd), 16, 0);
7429
7430	if (!ioa_cfg->ipr_cmd_pool)
7431		return -ENOMEM;
7432
7433	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7434		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7435
7436		if (!ipr_cmd) {
7437			ipr_free_cmd_blks(ioa_cfg);
7438			return -ENOMEM;
7439		}
7440
7441		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7442		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7443		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7444
7445		ioarcb = &ipr_cmd->ioarcb;
7446		ipr_cmd->dma_addr = dma_addr;
7447		if (ioa_cfg->sis64)
7448			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
7449		else
7450			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7451
7452		ioarcb->host_response_handle = cpu_to_be32(i << 2);
7453		if (ioa_cfg->sis64) {
7454			ioarcb->u.sis64_addr_data.data_ioadl_addr =
7455				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
7456			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
7457				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7458		} else {
7459			ioarcb->write_ioadl_addr =
7460				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
7461			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7462			ioarcb->ioasa_host_pci_addr =
7463				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7464		}
7465		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7466		ipr_cmd->cmd_index = i;
7467		ipr_cmd->ioa_cfg = ioa_cfg;
7468		ipr_cmd->sense_buffer_dma = dma_addr +
7469			offsetof(struct ipr_cmnd, sense_buffer);
7470
7471		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7472	}
7473
7474	return 0;
7475}
7476
7477/**
7478 * ipr_alloc_mem - Allocate memory for an adapter
7479 * @ioa_cfg:	ioa config struct
7480 *
7481 * Return value:
7482 * 	0 on success / non-zero for error
7483 **/
7484static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7485{
7486	struct pci_dev *pdev = ioa_cfg->pdev;
7487	int i, rc = -ENOMEM;
7488
7489	ENTER;
7490	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7491				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7492
7493	if (!ioa_cfg->res_entries)
7494		goto out;
7495
7496	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7497		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7498
7499	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7500						sizeof(struct ipr_misc_cbs),
7501						&ioa_cfg->vpd_cbs_dma);
7502
7503	if (!ioa_cfg->vpd_cbs)
7504		goto out_free_res_entries;
7505
7506	if (ipr_alloc_cmd_blks(ioa_cfg))
7507		goto out_free_vpd_cbs;
7508
7509	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7510						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7511						 &ioa_cfg->host_rrq_dma);
7512
7513	if (!ioa_cfg->host_rrq)
7514		goto out_ipr_free_cmd_blocks;
7515
7516	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7517						  sizeof(struct ipr_config_table),
7518						  &ioa_cfg->cfg_table_dma);
7519
7520	if (!ioa_cfg->cfg_table)
7521		goto out_free_host_rrq;
7522
7523	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7524		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7525							   sizeof(struct ipr_hostrcb),
7526							   &ioa_cfg->hostrcb_dma[i]);
7527
7528		if (!ioa_cfg->hostrcb[i])
7529			goto out_free_hostrcb_dma;
7530
7531		ioa_cfg->hostrcb[i]->hostrcb_dma =
7532			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7533		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7534		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7535	}
7536
7537	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7538				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7539
7540	if (!ioa_cfg->trace)
7541		goto out_free_hostrcb_dma;
7542
7543	rc = 0;
7544out:
7545	LEAVE;
7546	return rc;
7547
7548out_free_hostrcb_dma:
7549	while (i-- > 0) {
7550		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7551				    ioa_cfg->hostrcb[i],
7552				    ioa_cfg->hostrcb_dma[i]);
7553	}
7554	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7555			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7556out_free_host_rrq:
7557	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7558			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7559out_ipr_free_cmd_blocks:
7560	ipr_free_cmd_blks(ioa_cfg);
7561out_free_vpd_cbs:
7562	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7563			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7564out_free_res_entries:
7565	kfree(ioa_cfg->res_entries);
7566	goto out;
7567}
7568
7569/**
7570 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7571 * @ioa_cfg:	ioa config struct
7572 *
7573 * Return value:
7574 * 	none
7575 **/
7576static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7577{
7578	int i;
7579
7580	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7581		ioa_cfg->bus_attr[i].bus = i;
7582		ioa_cfg->bus_attr[i].qas_enabled = 0;
7583		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7584		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7585			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7586		else
7587			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7588	}
7589}
7590
7591/**
7592 * ipr_init_ioa_cfg - Initialize IOA config struct
7593 * @ioa_cfg:	ioa config struct
7594 * @host:		scsi host struct
7595 * @pdev:		PCI dev struct
7596 *
7597 * Return value:
7598 * 	none
7599 **/
7600static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7601				       struct Scsi_Host *host, struct pci_dev *pdev)
7602{
7603	const struct ipr_interrupt_offsets *p;
7604	struct ipr_interrupts *t;
7605	void __iomem *base;
7606
7607	ioa_cfg->host = host;
7608	ioa_cfg->pdev = pdev;
7609	ioa_cfg->log_level = ipr_log_level;
7610	ioa_cfg->doorbell = IPR_DOORBELL;
7611	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7612	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7613	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7614	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7615	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7616	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7617	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7618	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7619
7620	INIT_LIST_HEAD(&ioa_cfg->free_q);
7621	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7622	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7623	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7624	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7625	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7626	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7627	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7628	init_waitqueue_head(&ioa_cfg->msi_wait_q);
7629	ioa_cfg->sdt_state = INACTIVE;
7630	if (ipr_enable_cache)
7631		ioa_cfg->cache_state = CACHE_ENABLED;
7632	else
7633		ioa_cfg->cache_state = CACHE_DISABLED;
7634
7635	ipr_initialize_bus_attr(ioa_cfg);
7636
7637	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7638	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7639	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7640	host->unique_id = host->host_no;
7641	host->max_cmd_len = IPR_MAX_CDB_LEN;
7642	pci_set_drvdata(pdev, ioa_cfg);
7643
7644	p = &ioa_cfg->chip_cfg->regs;
7645	t = &ioa_cfg->regs;
7646	base = ioa_cfg->hdw_dma_regs;
7647
7648	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7649	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7650	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7651	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7652	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7653	t->ioarrin_reg = base + p->ioarrin_reg;
7654	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7655	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7656	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7657}
7658
7659/**
7660 * ipr_get_chip_info - Find adapter chip information
7661 * @dev_id:		PCI device id struct
7662 *
7663 * Return value:
7664 * 	ptr to chip information on success / NULL on failure
7665 **/
7666static const struct ipr_chip_t * __devinit
7667ipr_get_chip_info(const struct pci_device_id *dev_id)
7668{
7669	int i;
7670
7671	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7672		if (ipr_chip[i].vendor == dev_id->vendor &&
7673		    ipr_chip[i].device == dev_id->device)
7674			return &ipr_chip[i];
7675	return NULL;
7676}
7677
7678/**
7679 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
7680 * @pdev:		PCI device struct
7681 *
7682 * Description: Simply set the msi_received flag to 1 indicating that
7683 * Message Signaled Interrupts are supported.
7684 *
7685 * Return value:
7686 * 	0 on success / non-zero on failure
7687 **/
7688static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
7689{
7690	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
7691	unsigned long lock_flags = 0;
7692	irqreturn_t rc = IRQ_HANDLED;
7693
7694	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7695
7696	ioa_cfg->msi_received = 1;
7697	wake_up(&ioa_cfg->msi_wait_q);
7698
7699	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7700	return rc;
7701}
7702
7703/**
7704 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
7705 * @pdev:		PCI device struct
7706 *
7707 * Description: The return value from pci_enable_msi() can not always be
7708 * trusted.  This routine sets up and initiates a test interrupt to determine
7709 * if the interrupt is received via the ipr_test_intr() service routine.
7710 * If the tests fails, the driver will fall back to LSI.
7711 *
7712 * Return value:
7713 * 	0 on success / non-zero on failure
7714 **/
7715static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7716				  struct pci_dev *pdev)
7717{
7718	int rc;
7719	volatile u32 int_reg;
7720	unsigned long lock_flags = 0;
7721
7722	ENTER;
7723
7724	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7725	init_waitqueue_head(&ioa_cfg->msi_wait_q);
7726	ioa_cfg->msi_received = 0;
7727	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7728	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7729	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7730	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7731
7732	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
7733	if (rc) {
7734		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
7735		return rc;
7736	} else if (ipr_debug)
7737		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
7738
7739	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
7740	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7741	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
7742	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7743
7744	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7745	if (!ioa_cfg->msi_received) {
7746		/* MSI test failed */
7747		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
7748		rc = -EOPNOTSUPP;
7749	} else if (ipr_debug)
7750		dev_info(&pdev->dev, "MSI test succeeded.\n");
7751
7752	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7753
7754	free_irq(pdev->irq, ioa_cfg);
7755
7756	LEAVE;
7757
7758	return rc;
7759}
7760
7761/**
7762 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7763 * @pdev:		PCI device struct
7764 * @dev_id:		PCI device id struct
7765 *
7766 * Return value:
7767 * 	0 on success / non-zero on failure
7768 **/
7769static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7770				   const struct pci_device_id *dev_id)
7771{
7772	struct ipr_ioa_cfg *ioa_cfg;
7773	struct Scsi_Host *host;
7774	unsigned long ipr_regs_pci;
7775	void __iomem *ipr_regs;
7776	int rc = PCIBIOS_SUCCESSFUL;
7777	volatile u32 mask, uproc, interrupts;
7778
7779	ENTER;
7780
7781	if ((rc = pci_enable_device(pdev))) {
7782		dev_err(&pdev->dev, "Cannot enable adapter\n");
7783		goto out;
7784	}
7785
7786	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7787
7788	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7789
7790	if (!host) {
7791		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7792		rc = -ENOMEM;
7793		goto out_disable;
7794	}
7795
7796	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7797	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7798	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7799		      sata_port_info.flags, &ipr_sata_ops);
7800
7801	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
7802
7803	if (!ioa_cfg->ipr_chip) {
7804		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7805			dev_id->vendor, dev_id->device);
7806		goto out_scsi_host_put;
7807	}
7808
7809	/* set SIS 32 or SIS 64 */
7810	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
7811	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7812
7813	if (ipr_transop_timeout)
7814		ioa_cfg->transop_timeout = ipr_transop_timeout;
7815	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7816		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7817	else
7818		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7819
7820	ioa_cfg->revid = pdev->revision;
7821
7822	ipr_regs_pci = pci_resource_start(pdev, 0);
7823
7824	rc = pci_request_regions(pdev, IPR_NAME);
7825	if (rc < 0) {
7826		dev_err(&pdev->dev,
7827			"Couldn't register memory range of registers\n");
7828		goto out_scsi_host_put;
7829	}
7830
7831	ipr_regs = pci_ioremap_bar(pdev, 0);
7832
7833	if (!ipr_regs) {
7834		dev_err(&pdev->dev,
7835			"Couldn't map memory range of registers\n");
7836		rc = -ENOMEM;
7837		goto out_release_regions;
7838	}
7839
7840	ioa_cfg->hdw_dma_regs = ipr_regs;
7841	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7842	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7843
7844	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7845
7846	pci_set_master(pdev);
7847
7848	if (ioa_cfg->sis64) {
7849		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7850		if (rc < 0) {
7851			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
7852			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7853		}
7854
7855	} else
7856		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7857
7858	if (rc < 0) {
7859		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7860		goto cleanup_nomem;
7861	}
7862
7863	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7864				   ioa_cfg->chip_cfg->cache_line_size);
7865
7866	if (rc != PCIBIOS_SUCCESSFUL) {
7867		dev_err(&pdev->dev, "Write of cache line size failed\n");
7868		rc = -EIO;
7869		goto cleanup_nomem;
7870	}
7871
7872	/* Enable MSI style interrupts if they are supported. */
7873	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
7874		rc = ipr_test_msi(ioa_cfg, pdev);
7875		if (rc == -EOPNOTSUPP)
7876			pci_disable_msi(pdev);
7877		else if (rc)
7878			goto out_msi_disable;
7879		else
7880			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
7881	} else if (ipr_debug)
7882		dev_info(&pdev->dev, "Cannot enable MSI.\n");
7883
7884	/* Save away PCI config space for use following IOA reset */
7885	rc = pci_save_state(pdev);
7886
7887	if (rc != PCIBIOS_SUCCESSFUL) {
7888		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7889		rc = -EIO;
7890		goto cleanup_nomem;
7891	}
7892
7893	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7894		goto cleanup_nomem;
7895
7896	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7897		goto cleanup_nomem;
7898
7899	rc = ipr_alloc_mem(ioa_cfg);
7900	if (rc < 0) {
7901		dev_err(&pdev->dev,
7902			"Couldn't allocate enough memory for device driver!\n");
7903		goto cleanup_nomem;
7904	}
7905
7906	/*
7907	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7908	 * the card is in an unknown state and needs a hard reset
7909	 */
7910	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7911	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
7912	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7913	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7914		ioa_cfg->needs_hard_reset = 1;
7915	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
7916		ioa_cfg->needs_hard_reset = 1;
7917	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
7918		ioa_cfg->ioa_unit_checked = 1;
7919
7920	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7921	rc = request_irq(pdev->irq, ipr_isr,
7922			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
7923			 IPR_NAME, ioa_cfg);
7924
7925	if (rc) {
7926		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7927			pdev->irq, rc);
7928		goto cleanup_nolog;
7929	}
7930
7931	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
7932	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
7933		ioa_cfg->needs_warm_reset = 1;
7934		ioa_cfg->reset = ipr_reset_slot_reset;
7935	} else
7936		ioa_cfg->reset = ipr_reset_start_bist;
7937
7938	spin_lock(&ipr_driver_lock);
7939	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7940	spin_unlock(&ipr_driver_lock);
7941
7942	LEAVE;
7943out:
7944	return rc;
7945
7946cleanup_nolog:
7947	ipr_free_mem(ioa_cfg);
7948cleanup_nomem:
7949	iounmap(ipr_regs);
7950out_msi_disable:
7951	pci_disable_msi(pdev);
7952out_release_regions:
7953	pci_release_regions(pdev);
7954out_scsi_host_put:
7955	scsi_host_put(host);
7956out_disable:
7957	pci_disable_device(pdev);
7958	goto out;
7959}
7960
7961/**
7962 * ipr_scan_vsets - Scans for VSET devices
7963 * @ioa_cfg:	ioa config struct
7964 *
7965 * Description: Since the VSET resources do not follow SAM in that we can have
7966 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7967 *
7968 * Return value:
7969 * 	none
7970 **/
7971static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7972{
7973	int target, lun;
7974
7975	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7976		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7977			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7978}
7979
7980/**
7981 * ipr_initiate_ioa_bringdown - Bring down an adapter
7982 * @ioa_cfg:		ioa config struct
7983 * @shutdown_type:	shutdown type
7984 *
7985 * Description: This function will initiate bringing down the adapter.
7986 * This consists of issuing an IOA shutdown to the adapter
7987 * to flush the cache, and running BIST.
7988 * If the caller needs to wait on the completion of the reset,
7989 * the caller must sleep on the reset_wait_q.
7990 *
7991 * Return value:
7992 * 	none
7993 **/
7994static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7995				       enum ipr_shutdown_type shutdown_type)
7996{
7997	ENTER;
7998	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7999		ioa_cfg->sdt_state = ABORT_DUMP;
8000	ioa_cfg->reset_retries = 0;
8001	ioa_cfg->in_ioa_bringdown = 1;
8002	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8003	LEAVE;
8004}
8005
8006/**
8007 * __ipr_remove - Remove a single adapter
8008 * @pdev:	pci device struct
8009 *
8010 * Adapter hot plug remove entry point.
8011 *
8012 * Return value:
8013 * 	none
8014 **/
8015static void __ipr_remove(struct pci_dev *pdev)
8016{
8017	unsigned long host_lock_flags = 0;
8018	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8019	ENTER;
8020
8021	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8022	while(ioa_cfg->in_reset_reload) {
8023		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8024		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8025		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8026	}
8027
8028	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8029
8030	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8031	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8032	flush_scheduled_work();
8033	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8034
8035	spin_lock(&ipr_driver_lock);
8036	list_del(&ioa_cfg->queue);
8037	spin_unlock(&ipr_driver_lock);
8038
8039	if (ioa_cfg->sdt_state == ABORT_DUMP)
8040		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8041	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8042
8043	ipr_free_all_resources(ioa_cfg);
8044
8045	LEAVE;
8046}
8047
8048/**
8049 * ipr_remove - IOA hot plug remove entry point
8050 * @pdev:	pci device struct
8051 *
8052 * Adapter hot plug remove entry point.
8053 *
8054 * Return value:
8055 * 	none
8056 **/
8057static void __devexit ipr_remove(struct pci_dev *pdev)
8058{
8059	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8060
8061	ENTER;
8062
8063	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8064			      &ipr_trace_attr);
8065	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8066			     &ipr_dump_attr);
8067	scsi_remove_host(ioa_cfg->host);
8068
8069	__ipr_remove(pdev);
8070
8071	LEAVE;
8072}
8073
8074/**
8075 * ipr_probe - Adapter hot plug add entry point
8076 *
8077 * Return value:
8078 * 	0 on success / non-zero on failure
8079 **/
8080static int __devinit ipr_probe(struct pci_dev *pdev,
8081			       const struct pci_device_id *dev_id)
8082{
8083	struct ipr_ioa_cfg *ioa_cfg;
8084	int rc;
8085
8086	rc = ipr_probe_ioa(pdev, dev_id);
8087
8088	if (rc)
8089		return rc;
8090
8091	ioa_cfg = pci_get_drvdata(pdev);
8092	rc = ipr_probe_ioa_part2(ioa_cfg);
8093
8094	if (rc) {
8095		__ipr_remove(pdev);
8096		return rc;
8097	}
8098
8099	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8100
8101	if (rc) {
8102		__ipr_remove(pdev);
8103		return rc;
8104	}
8105
8106	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8107				   &ipr_trace_attr);
8108
8109	if (rc) {
8110		scsi_remove_host(ioa_cfg->host);
8111		__ipr_remove(pdev);
8112		return rc;
8113	}
8114
8115	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8116				   &ipr_dump_attr);
8117
8118	if (rc) {
8119		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8120				      &ipr_trace_attr);
8121		scsi_remove_host(ioa_cfg->host);
8122		__ipr_remove(pdev);
8123		return rc;
8124	}
8125
8126	scsi_scan_host(ioa_cfg->host);
8127	ipr_scan_vsets(ioa_cfg);
8128	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8129	ioa_cfg->allow_ml_add_del = 1;
8130	ioa_cfg->host->max_channel = IPR_VSET_BUS;
8131	schedule_work(&ioa_cfg->work_q);
8132	return 0;
8133}
8134
8135/**
8136 * ipr_shutdown - Shutdown handler.
8137 * @pdev:	pci device struct
8138 *
8139 * This function is invoked upon system shutdown/reboot. It will issue
8140 * an adapter shutdown to the adapter to flush the write cache.
8141 *
8142 * Return value:
8143 * 	none
8144 **/
8145static void ipr_shutdown(struct pci_dev *pdev)
8146{
8147	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8148	unsigned long lock_flags = 0;
8149
8150	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8151	while(ioa_cfg->in_reset_reload) {
8152		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8153		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8154		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8155	}
8156
8157	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8158	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8159	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8160}
8161
8162static struct pci_device_id ipr_pci_table[] __devinitdata = {
8163	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8164		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8165	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8166		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8167	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8168		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8169	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8170		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8171	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8172		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8173	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8174		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8175	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8176		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8177	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8178		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8179		IPR_USE_LONG_TRANSOP_TIMEOUT },
8180	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8181	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8182	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8183	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8184	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8185	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8186	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8187	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8188	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8189	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8190	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8191	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8192	      IPR_USE_LONG_TRANSOP_TIMEOUT},
8193	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8194	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8195	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8196	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8197	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8198	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8199	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8200	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8201	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8202	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8203	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8204	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8205	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8206	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8207	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8208		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8209	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8210		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8211	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8212		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8213		IPR_USE_LONG_TRANSOP_TIMEOUT },
8214	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8215		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8216		IPR_USE_LONG_TRANSOP_TIMEOUT },
8217	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8218		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8219		IPR_USE_LONG_TRANSOP_TIMEOUT },
8220	{ }
8221};
8222MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8223
8224static struct pci_error_handlers ipr_err_handler = {
8225	.error_detected = ipr_pci_error_detected,
8226	.slot_reset = ipr_pci_slot_reset,
8227};
8228
8229static struct pci_driver ipr_driver = {
8230	.name = IPR_NAME,
8231	.id_table = ipr_pci_table,
8232	.probe = ipr_probe,
8233	.remove = __devexit_p(ipr_remove),
8234	.shutdown = ipr_shutdown,
8235	.err_handler = &ipr_err_handler,
8236};
8237
8238/**
8239 * ipr_init - Module entry point
8240 *
8241 * Return value:
8242 * 	0 on success / negative value on failure
8243 **/
8244static int __init ipr_init(void)
8245{
8246	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8247		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8248
8249	return pci_register_driver(&ipr_driver);
8250}
8251
8252/**
8253 * ipr_exit - Module unload
8254 *
8255 * Module unload entry point.
8256 *
8257 * Return value:
8258 * 	none
8259 **/
8260static void __exit ipr_exit(void)
8261{
8262	pci_unregister_driver(&ipr_driver);
8263}
8264
8265module_init(ipr_init);
8266module_exit(ipr_exit);
8267