ipr.c revision e94b1766097d53e6f3ccfb36c8baa562ffeda3fc
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <linux/libata.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include "ipr.h"
83
84/*
85 *   Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
93static unsigned int ipr_enable_cache = 1;
94static unsigned int ipr_debug = 0;
95static int ipr_auto_create = 1;
96static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101		.mailbox = 0x0042C,
102		.cache_line_size = 0x20,
103		{
104			.set_interrupt_mask_reg = 0x0022C,
105			.clr_interrupt_mask_reg = 0x00230,
106			.sense_interrupt_mask_reg = 0x0022C,
107			.clr_interrupt_reg = 0x00228,
108			.sense_interrupt_reg = 0x00224,
109			.ioarrin_reg = 0x00404,
110			.sense_uproc_interrupt_reg = 0x00214,
111			.set_uproc_interrupt_reg = 0x00214,
112			.clr_uproc_interrupt_reg = 0x00218
113		}
114	},
115	{ /* Snipe and Scamp */
116		.mailbox = 0x0052C,
117		.cache_line_size = 0x20,
118		{
119			.set_interrupt_mask_reg = 0x00288,
120			.clr_interrupt_mask_reg = 0x0028C,
121			.sense_interrupt_mask_reg = 0x00288,
122			.clr_interrupt_reg = 0x00284,
123			.sense_interrupt_reg = 0x00280,
124			.ioarrin_reg = 0x00504,
125			.sense_uproc_interrupt_reg = 0x00290,
126			.set_uproc_interrupt_reg = 0x00290,
127			.clr_uproc_interrupt_reg = 0x00294
128		}
129	},
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
138	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140};
141
142static int ipr_max_bus_speeds [] = {
143	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144};
145
146MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148module_param_named(max_speed, ipr_max_speed, uint, 0);
149MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150module_param_named(log_level, ipr_log_level, uint, 0);
151MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152module_param_named(testmode, ipr_testmode, int, 0);
153MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154module_param_named(fastfail, ipr_fastfail, int, 0);
155MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162module_param_named(auto_create, ipr_auto_create, int, 0);
163MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION);
166
167/*  A constant array of IOASCs/URCs/Error Messages */
168static const
169struct ipr_error_table_t ipr_error_table[] = {
170	{0x00000000, 1, 1,
171	"8155: An unknown error was received"},
172	{0x00330000, 0, 0,
173	"Soft underlength error"},
174	{0x005A0000, 0, 0,
175	"Command to be cancelled not found"},
176	{0x00808000, 0, 0,
177	"Qualified success"},
178	{0x01080000, 1, 1,
179	"FFFE: Soft device bus error recovered by the IOA"},
180	{0x01088100, 0, 1,
181	"4101: Soft device bus fabric error"},
182	{0x01170600, 0, 1,
183	"FFF9: Device sector reassign successful"},
184	{0x01170900, 0, 1,
185	"FFF7: Media error recovered by device rewrite procedures"},
186	{0x01180200, 0, 1,
187	"7001: IOA sector reassignment successful"},
188	{0x01180500, 0, 1,
189	"FFF9: Soft media error. Sector reassignment recommended"},
190	{0x01180600, 0, 1,
191	"FFF7: Media error recovered by IOA rewrite procedures"},
192	{0x01418000, 0, 1,
193	"FF3D: Soft PCI bus error recovered by the IOA"},
194	{0x01440000, 1, 1,
195	"FFF6: Device hardware error recovered by the IOA"},
196	{0x01448100, 0, 1,
197	"FFF6: Device hardware error recovered by the device"},
198	{0x01448200, 1, 1,
199	"FF3D: Soft IOA error recovered by the IOA"},
200	{0x01448300, 0, 1,
201	"FFFA: Undefined device response recovered by the IOA"},
202	{0x014A0000, 1, 1,
203	"FFF6: Device bus error, message or command phase"},
204	{0x014A8000, 0, 1,
205	"FFFE: Task Management Function failed"},
206	{0x015D0000, 0, 1,
207	"FFF6: Failure prediction threshold exceeded"},
208	{0x015D9200, 0, 1,
209	"8009: Impending cache battery pack failure"},
210	{0x02040400, 0, 0,
211	"34FF: Disk device format in progress"},
212	{0x023F0000, 0, 0,
213	"Synchronization required"},
214	{0x024E0000, 0, 0,
215	"No ready, IOA shutdown"},
216	{0x025A0000, 0, 0,
217	"Not ready, IOA has been shutdown"},
218	{0x02670100, 0, 1,
219	"3020: Storage subsystem configuration error"},
220	{0x03110B00, 0, 0,
221	"FFF5: Medium error, data unreadable, recommend reassign"},
222	{0x03110C00, 0, 0,
223	"7000: Medium error, data unreadable, do not reassign"},
224	{0x03310000, 0, 1,
225	"FFF3: Disk media format bad"},
226	{0x04050000, 0, 1,
227	"3002: Addressed device failed to respond to selection"},
228	{0x04080000, 1, 1,
229	"3100: Device bus error"},
230	{0x04080100, 0, 1,
231	"3109: IOA timed out a device command"},
232	{0x04088000, 0, 0,
233	"3120: SCSI bus is not operational"},
234	{0x04088100, 0, 1,
235	"4100: Hard device bus fabric error"},
236	{0x04118000, 0, 1,
237	"9000: IOA reserved area data check"},
238	{0x04118100, 0, 1,
239	"9001: IOA reserved area invalid data pattern"},
240	{0x04118200, 0, 1,
241	"9002: IOA reserved area LRC error"},
242	{0x04320000, 0, 1,
243	"102E: Out of alternate sectors for disk storage"},
244	{0x04330000, 1, 1,
245	"FFF4: Data transfer underlength error"},
246	{0x04338000, 1, 1,
247	"FFF4: Data transfer overlength error"},
248	{0x043E0100, 0, 1,
249	"3400: Logical unit failure"},
250	{0x04408500, 0, 1,
251	"FFF4: Device microcode is corrupt"},
252	{0x04418000, 1, 1,
253	"8150: PCI bus error"},
254	{0x04430000, 1, 0,
255	"Unsupported device bus message received"},
256	{0x04440000, 1, 1,
257	"FFF4: Disk device problem"},
258	{0x04448200, 1, 1,
259	"8150: Permanent IOA failure"},
260	{0x04448300, 0, 1,
261	"3010: Disk device returned wrong response to IOA"},
262	{0x04448400, 0, 1,
263	"8151: IOA microcode error"},
264	{0x04448500, 0, 0,
265	"Device bus status error"},
266	{0x04448600, 0, 1,
267	"8157: IOA error requiring IOA reset to recover"},
268	{0x04448700, 0, 0,
269	"ATA device status error"},
270	{0x04490000, 0, 0,
271	"Message reject received from the device"},
272	{0x04449200, 0, 1,
273	"8008: A permanent cache battery pack failure occurred"},
274	{0x0444A000, 0, 1,
275	"9090: Disk unit has been modified after the last known status"},
276	{0x0444A200, 0, 1,
277	"9081: IOA detected device error"},
278	{0x0444A300, 0, 1,
279	"9082: IOA detected device error"},
280	{0x044A0000, 1, 1,
281	"3110: Device bus error, message or command phase"},
282	{0x044A8000, 1, 1,
283	"3110: SAS Command / Task Management Function failed"},
284	{0x04670400, 0, 1,
285	"9091: Incorrect hardware configuration change has been detected"},
286	{0x04678000, 0, 1,
287	"9073: Invalid multi-adapter configuration"},
288	{0x04678100, 0, 1,
289	"4010: Incorrect connection between cascaded expanders"},
290	{0x04678200, 0, 1,
291	"4020: Connections exceed IOA design limits"},
292	{0x04678300, 0, 1,
293	"4030: Incorrect multipath connection"},
294	{0x04679000, 0, 1,
295	"4110: Unsupported enclosure function"},
296	{0x046E0000, 0, 1,
297	"FFF4: Command to logical unit failed"},
298	{0x05240000, 1, 0,
299	"Illegal request, invalid request type or request packet"},
300	{0x05250000, 0, 0,
301	"Illegal request, invalid resource handle"},
302	{0x05258000, 0, 0,
303	"Illegal request, commands not allowed to this device"},
304	{0x05258100, 0, 0,
305	"Illegal request, command not allowed to a secondary adapter"},
306	{0x05260000, 0, 0,
307	"Illegal request, invalid field in parameter list"},
308	{0x05260100, 0, 0,
309	"Illegal request, parameter not supported"},
310	{0x05260200, 0, 0,
311	"Illegal request, parameter value invalid"},
312	{0x052C0000, 0, 0,
313	"Illegal request, command sequence error"},
314	{0x052C8000, 1, 0,
315	"Illegal request, dual adapter support not enabled"},
316	{0x06040500, 0, 1,
317	"9031: Array protection temporarily suspended, protection resuming"},
318	{0x06040600, 0, 1,
319	"9040: Array protection temporarily suspended, protection resuming"},
320	{0x06288000, 0, 1,
321	"3140: Device bus not ready to ready transition"},
322	{0x06290000, 0, 1,
323	"FFFB: SCSI bus was reset"},
324	{0x06290500, 0, 0,
325	"FFFE: SCSI bus transition to single ended"},
326	{0x06290600, 0, 0,
327	"FFFE: SCSI bus transition to LVD"},
328	{0x06298000, 0, 1,
329	"FFFB: SCSI bus was reset by another initiator"},
330	{0x063F0300, 0, 1,
331	"3029: A device replacement has occurred"},
332	{0x064C8000, 0, 1,
333	"9051: IOA cache data exists for a missing or failed device"},
334	{0x064C8100, 0, 1,
335	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
336	{0x06670100, 0, 1,
337	"9025: Disk unit is not supported at its physical location"},
338	{0x06670600, 0, 1,
339	"3020: IOA detected a SCSI bus configuration error"},
340	{0x06678000, 0, 1,
341	"3150: SCSI bus configuration error"},
342	{0x06678100, 0, 1,
343	"9074: Asymmetric advanced function disk configuration"},
344	{0x06678300, 0, 1,
345	"4040: Incomplete multipath connection between IOA and enclosure"},
346	{0x06678400, 0, 1,
347	"4041: Incomplete multipath connection between enclosure and device"},
348	{0x06678500, 0, 1,
349	"9075: Incomplete multipath connection between IOA and remote IOA"},
350	{0x06678600, 0, 1,
351	"9076: Configuration error, missing remote IOA"},
352	{0x06679100, 0, 1,
353	"4050: Enclosure does not support a required multipath function"},
354	{0x06690200, 0, 1,
355	"9041: Array protection temporarily suspended"},
356	{0x06698200, 0, 1,
357	"9042: Corrupt array parity detected on specified device"},
358	{0x066B0200, 0, 1,
359	"9030: Array no longer protected due to missing or failed disk unit"},
360	{0x066B8000, 0, 1,
361	"9071: Link operational transition"},
362	{0x066B8100, 0, 1,
363	"9072: Link not operational transition"},
364	{0x066B8200, 0, 1,
365	"9032: Array exposed but still protected"},
366	{0x066B9100, 0, 1,
367	"4061: Multipath redundancy level got better"},
368	{0x066B9200, 0, 1,
369	"4060: Multipath redundancy level got worse"},
370	{0x07270000, 0, 0,
371	"Failure due to other device"},
372	{0x07278000, 0, 1,
373	"9008: IOA does not support functions expected by devices"},
374	{0x07278100, 0, 1,
375	"9010: Cache data associated with attached devices cannot be found"},
376	{0x07278200, 0, 1,
377	"9011: Cache data belongs to devices other than those attached"},
378	{0x07278400, 0, 1,
379	"9020: Array missing 2 or more devices with only 1 device present"},
380	{0x07278500, 0, 1,
381	"9021: Array missing 2 or more devices with 2 or more devices present"},
382	{0x07278600, 0, 1,
383	"9022: Exposed array is missing a required device"},
384	{0x07278700, 0, 1,
385	"9023: Array member(s) not at required physical locations"},
386	{0x07278800, 0, 1,
387	"9024: Array not functional due to present hardware configuration"},
388	{0x07278900, 0, 1,
389	"9026: Array not functional due to present hardware configuration"},
390	{0x07278A00, 0, 1,
391	"9027: Array is missing a device and parity is out of sync"},
392	{0x07278B00, 0, 1,
393	"9028: Maximum number of arrays already exist"},
394	{0x07278C00, 0, 1,
395	"9050: Required cache data cannot be located for a disk unit"},
396	{0x07278D00, 0, 1,
397	"9052: Cache data exists for a device that has been modified"},
398	{0x07278F00, 0, 1,
399	"9054: IOA resources not available due to previous problems"},
400	{0x07279100, 0, 1,
401	"9092: Disk unit requires initialization before use"},
402	{0x07279200, 0, 1,
403	"9029: Incorrect hardware configuration change has been detected"},
404	{0x07279600, 0, 1,
405	"9060: One or more disk pairs are missing from an array"},
406	{0x07279700, 0, 1,
407	"9061: One or more disks are missing from an array"},
408	{0x07279800, 0, 1,
409	"9062: One or more disks are missing from an array"},
410	{0x07279900, 0, 1,
411	"9063: Maximum number of functional arrays has been exceeded"},
412	{0x0B260000, 0, 0,
413	"Aborted command, invalid descriptor"},
414	{0x0B5A0000, 0, 0,
415	"Command terminated by host"}
416};
417
418static const struct ipr_ses_table_entry ipr_ses_table[] = {
419	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
420	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
421	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
422	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
423	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
424	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
425	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
426	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
427	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
429	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
430	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
431	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
432};
433
434/*
435 *  Function Prototypes
436 */
437static int ipr_reset_alert(struct ipr_cmnd *);
438static void ipr_process_ccn(struct ipr_cmnd *);
439static void ipr_process_error(struct ipr_cmnd *);
440static void ipr_reset_ioa_job(struct ipr_cmnd *);
441static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
442				   enum ipr_shutdown_type);
443
444#ifdef CONFIG_SCSI_IPR_TRACE
445/**
446 * ipr_trc_hook - Add a trace entry to the driver trace
447 * @ipr_cmd:	ipr command struct
448 * @type:		trace type
449 * @add_data:	additional data
450 *
451 * Return value:
452 * 	none
453 **/
454static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
455			 u8 type, u32 add_data)
456{
457	struct ipr_trace_entry *trace_entry;
458	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
459
460	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
461	trace_entry->time = jiffies;
462	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
463	trace_entry->type = type;
464	trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
465	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
466	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
467	trace_entry->u.add_data = add_data;
468}
469#else
470#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
471#endif
472
473/**
474 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
475 * @ipr_cmd:	ipr command struct
476 *
477 * Return value:
478 * 	none
479 **/
480static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481{
482	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
483	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
484
485	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486	ioarcb->write_data_transfer_length = 0;
487	ioarcb->read_data_transfer_length = 0;
488	ioarcb->write_ioadl_len = 0;
489	ioarcb->read_ioadl_len = 0;
490	ioasa->ioasc = 0;
491	ioasa->residual_data_len = 0;
492	ioasa->u.gata.status = 0;
493
494	ipr_cmd->scsi_cmd = NULL;
495	ipr_cmd->qc = NULL;
496	ipr_cmd->sense_buffer[0] = 0;
497	ipr_cmd->dma_use_sg = 0;
498}
499
500/**
501 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
502 * @ipr_cmd:	ipr command struct
503 *
504 * Return value:
505 * 	none
506 **/
507static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
508{
509	ipr_reinit_ipr_cmnd(ipr_cmd);
510	ipr_cmd->u.scratch = 0;
511	ipr_cmd->sibling = NULL;
512	init_timer(&ipr_cmd->timer);
513}
514
515/**
516 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
517 * @ioa_cfg:	ioa config struct
518 *
519 * Return value:
520 * 	pointer to ipr command struct
521 **/
522static
523struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
524{
525	struct ipr_cmnd *ipr_cmd;
526
527	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
528	list_del(&ipr_cmd->queue);
529	ipr_init_ipr_cmnd(ipr_cmd);
530
531	return ipr_cmd;
532}
533
534/**
535 * ipr_unmap_sglist - Unmap scatterlist if mapped
536 * @ioa_cfg:	ioa config struct
537 * @ipr_cmd:	ipr command struct
538 *
539 * Return value:
540 * 	nothing
541 **/
542static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
543			     struct ipr_cmnd *ipr_cmd)
544{
545	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
546
547	if (ipr_cmd->dma_use_sg) {
548		if (scsi_cmd->use_sg > 0) {
549			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
550				     scsi_cmd->use_sg,
551				     scsi_cmd->sc_data_direction);
552		} else {
553			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
554					 scsi_cmd->request_bufflen,
555					 scsi_cmd->sc_data_direction);
556		}
557	}
558}
559
560/**
561 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
562 * @ioa_cfg:	ioa config struct
563 * @clr_ints:     interrupts to clear
564 *
565 * This function masks all interrupts on the adapter, then clears the
566 * interrupts specified in the mask
567 *
568 * Return value:
569 * 	none
570 **/
571static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
572					  u32 clr_ints)
573{
574	volatile u32 int_reg;
575
576	/* Stop new interrupts */
577	ioa_cfg->allow_interrupts = 0;
578
579	/* Set interrupt mask to stop all new interrupts */
580	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
581
582	/* Clear any pending interrupts */
583	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
584	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
585}
586
587/**
588 * ipr_save_pcix_cmd_reg - Save PCI-X command register
589 * @ioa_cfg:	ioa config struct
590 *
591 * Return value:
592 * 	0 on success / -EIO on failure
593 **/
594static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
595{
596	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
597
598	if (pcix_cmd_reg == 0) {
599		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
600		return -EIO;
601	}
602
603	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
604				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
605		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
606		return -EIO;
607	}
608
609	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
610	return 0;
611}
612
613/**
614 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
615 * @ioa_cfg:	ioa config struct
616 *
617 * Return value:
618 * 	0 on success / -EIO on failure
619 **/
620static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
621{
622	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
623
624	if (pcix_cmd_reg) {
625		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
626					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
627			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
628			return -EIO;
629		}
630	} else {
631		dev_err(&ioa_cfg->pdev->dev,
632			"Failed to setup PCI-X command register\n");
633		return -EIO;
634	}
635
636	return 0;
637}
638
639/**
640 * ipr_sata_eh_done - done function for aborted SATA commands
641 * @ipr_cmd:	ipr command struct
642 *
643 * This function is invoked for ops generated to SATA
644 * devices which are being aborted.
645 *
646 * Return value:
647 * 	none
648 **/
649static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
650{
651	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
652	struct ata_queued_cmd *qc = ipr_cmd->qc;
653	struct ipr_sata_port *sata_port = qc->ap->private_data;
654
655	qc->err_mask |= AC_ERR_OTHER;
656	sata_port->ioasa.status |= ATA_BUSY;
657	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
658	ata_qc_complete(qc);
659}
660
661/**
662 * ipr_scsi_eh_done - mid-layer done function for aborted ops
663 * @ipr_cmd:	ipr command struct
664 *
665 * This function is invoked by the interrupt handler for
666 * ops generated by the SCSI mid-layer which are being aborted.
667 *
668 * Return value:
669 * 	none
670 **/
671static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
672{
673	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
674	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
675
676	scsi_cmd->result |= (DID_ERROR << 16);
677
678	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
679	scsi_cmd->scsi_done(scsi_cmd);
680	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
681}
682
683/**
684 * ipr_fail_all_ops - Fails all outstanding ops.
685 * @ioa_cfg:	ioa config struct
686 *
687 * This function fails all outstanding ops.
688 *
689 * Return value:
690 * 	none
691 **/
692static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
693{
694	struct ipr_cmnd *ipr_cmd, *temp;
695
696	ENTER;
697	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
698		list_del(&ipr_cmd->queue);
699
700		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
701		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
702
703		if (ipr_cmd->scsi_cmd)
704			ipr_cmd->done = ipr_scsi_eh_done;
705		else if (ipr_cmd->qc)
706			ipr_cmd->done = ipr_sata_eh_done;
707
708		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
709		del_timer(&ipr_cmd->timer);
710		ipr_cmd->done(ipr_cmd);
711	}
712
713	LEAVE;
714}
715
716/**
717 * ipr_do_req -  Send driver initiated requests.
718 * @ipr_cmd:		ipr command struct
719 * @done:			done function
720 * @timeout_func:	timeout function
721 * @timeout:		timeout value
722 *
723 * This function sends the specified command to the adapter with the
724 * timeout given. The done function is invoked on command completion.
725 *
726 * Return value:
727 * 	none
728 **/
729static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
730		       void (*done) (struct ipr_cmnd *),
731		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
732{
733	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
734
735	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
736
737	ipr_cmd->done = done;
738
739	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
740	ipr_cmd->timer.expires = jiffies + timeout;
741	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
742
743	add_timer(&ipr_cmd->timer);
744
745	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
746
747	mb();
748	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
749	       ioa_cfg->regs.ioarrin_reg);
750}
751
752/**
753 * ipr_internal_cmd_done - Op done function for an internally generated op.
754 * @ipr_cmd:	ipr command struct
755 *
756 * This function is the op done function for an internally generated,
757 * blocking op. It simply wakes the sleeping thread.
758 *
759 * Return value:
760 * 	none
761 **/
762static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
763{
764	if (ipr_cmd->sibling)
765		ipr_cmd->sibling = NULL;
766	else
767		complete(&ipr_cmd->completion);
768}
769
770/**
771 * ipr_send_blocking_cmd - Send command and sleep on its completion.
772 * @ipr_cmd:	ipr command struct
773 * @timeout_func:	function to invoke if command times out
774 * @timeout:	timeout
775 *
776 * Return value:
777 * 	none
778 **/
779static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
780				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
781				  u32 timeout)
782{
783	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
784
785	init_completion(&ipr_cmd->completion);
786	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
787
788	spin_unlock_irq(ioa_cfg->host->host_lock);
789	wait_for_completion(&ipr_cmd->completion);
790	spin_lock_irq(ioa_cfg->host->host_lock);
791}
792
793/**
794 * ipr_send_hcam - Send an HCAM to the adapter.
795 * @ioa_cfg:	ioa config struct
796 * @type:		HCAM type
797 * @hostrcb:	hostrcb struct
798 *
799 * This function will send a Host Controlled Async command to the adapter.
800 * If HCAMs are currently not allowed to be issued to the adapter, it will
801 * place the hostrcb on the free queue.
802 *
803 * Return value:
804 * 	none
805 **/
806static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
807			  struct ipr_hostrcb *hostrcb)
808{
809	struct ipr_cmnd *ipr_cmd;
810	struct ipr_ioarcb *ioarcb;
811
812	if (ioa_cfg->allow_cmds) {
813		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
814		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
815		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
816
817		ipr_cmd->u.hostrcb = hostrcb;
818		ioarcb = &ipr_cmd->ioarcb;
819
820		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
821		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
822		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
823		ioarcb->cmd_pkt.cdb[1] = type;
824		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
825		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
826
827		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
828		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
829		ipr_cmd->ioadl[0].flags_and_data_len =
830			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
831		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
832
833		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
834			ipr_cmd->done = ipr_process_ccn;
835		else
836			ipr_cmd->done = ipr_process_error;
837
838		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
839
840		mb();
841		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
842		       ioa_cfg->regs.ioarrin_reg);
843	} else {
844		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
845	}
846}
847
848/**
849 * ipr_init_res_entry - Initialize a resource entry struct.
850 * @res:	resource entry struct
851 *
852 * Return value:
853 * 	none
854 **/
855static void ipr_init_res_entry(struct ipr_resource_entry *res)
856{
857	res->needs_sync_complete = 0;
858	res->in_erp = 0;
859	res->add_to_ml = 0;
860	res->del_from_ml = 0;
861	res->resetting_device = 0;
862	res->sdev = NULL;
863	res->sata_port = NULL;
864}
865
866/**
867 * ipr_handle_config_change - Handle a config change from the adapter
868 * @ioa_cfg:	ioa config struct
869 * @hostrcb:	hostrcb
870 *
871 * Return value:
872 * 	none
873 **/
874static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
875			      struct ipr_hostrcb *hostrcb)
876{
877	struct ipr_resource_entry *res = NULL;
878	struct ipr_config_table_entry *cfgte;
879	u32 is_ndn = 1;
880
881	cfgte = &hostrcb->hcam.u.ccn.cfgte;
882
883	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
884		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
885			    sizeof(cfgte->res_addr))) {
886			is_ndn = 0;
887			break;
888		}
889	}
890
891	if (is_ndn) {
892		if (list_empty(&ioa_cfg->free_res_q)) {
893			ipr_send_hcam(ioa_cfg,
894				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
895				      hostrcb);
896			return;
897		}
898
899		res = list_entry(ioa_cfg->free_res_q.next,
900				 struct ipr_resource_entry, queue);
901
902		list_del(&res->queue);
903		ipr_init_res_entry(res);
904		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
905	}
906
907	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
908
909	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
910		if (res->sdev) {
911			res->del_from_ml = 1;
912			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
913			if (ioa_cfg->allow_ml_add_del)
914				schedule_work(&ioa_cfg->work_q);
915		} else
916			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
917	} else if (!res->sdev) {
918		res->add_to_ml = 1;
919		if (ioa_cfg->allow_ml_add_del)
920			schedule_work(&ioa_cfg->work_q);
921	}
922
923	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
924}
925
926/**
927 * ipr_process_ccn - Op done function for a CCN.
928 * @ipr_cmd:	ipr command struct
929 *
930 * This function is the op done function for a configuration
931 * change notification host controlled async from the adapter.
932 *
933 * Return value:
934 * 	none
935 **/
936static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
937{
938	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
939	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
940	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
941
942	list_del(&hostrcb->queue);
943	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
944
945	if (ioasc) {
946		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
947			dev_err(&ioa_cfg->pdev->dev,
948				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
949
950		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
951	} else {
952		ipr_handle_config_change(ioa_cfg, hostrcb);
953	}
954}
955
956/**
957 * ipr_log_vpd - Log the passed VPD to the error log.
958 * @vpd:		vendor/product id/sn struct
959 *
960 * Return value:
961 * 	none
962 **/
963static void ipr_log_vpd(struct ipr_vpd *vpd)
964{
965	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
966		    + IPR_SERIAL_NUM_LEN];
967
968	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
969	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
970	       IPR_PROD_ID_LEN);
971	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
972	ipr_err("Vendor/Product ID: %s\n", buffer);
973
974	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
975	buffer[IPR_SERIAL_NUM_LEN] = '\0';
976	ipr_err("    Serial Number: %s\n", buffer);
977}
978
979/**
980 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
981 * @vpd:		vendor/product id/sn/wwn struct
982 *
983 * Return value:
984 * 	none
985 **/
986static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
987{
988	ipr_log_vpd(&vpd->vpd);
989	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
990		be32_to_cpu(vpd->wwid[1]));
991}
992
993/**
994 * ipr_log_enhanced_cache_error - Log a cache error.
995 * @ioa_cfg:	ioa config struct
996 * @hostrcb:	hostrcb struct
997 *
998 * Return value:
999 * 	none
1000 **/
1001static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1002					 struct ipr_hostrcb *hostrcb)
1003{
1004	struct ipr_hostrcb_type_12_error *error =
1005		&hostrcb->hcam.u.error.u.type_12_error;
1006
1007	ipr_err("-----Current Configuration-----\n");
1008	ipr_err("Cache Directory Card Information:\n");
1009	ipr_log_ext_vpd(&error->ioa_vpd);
1010	ipr_err("Adapter Card Information:\n");
1011	ipr_log_ext_vpd(&error->cfc_vpd);
1012
1013	ipr_err("-----Expected Configuration-----\n");
1014	ipr_err("Cache Directory Card Information:\n");
1015	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1016	ipr_err("Adapter Card Information:\n");
1017	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1018
1019	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1020		     be32_to_cpu(error->ioa_data[0]),
1021		     be32_to_cpu(error->ioa_data[1]),
1022		     be32_to_cpu(error->ioa_data[2]));
1023}
1024
1025/**
1026 * ipr_log_cache_error - Log a cache error.
1027 * @ioa_cfg:	ioa config struct
1028 * @hostrcb:	hostrcb struct
1029 *
1030 * Return value:
1031 * 	none
1032 **/
1033static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1034				struct ipr_hostrcb *hostrcb)
1035{
1036	struct ipr_hostrcb_type_02_error *error =
1037		&hostrcb->hcam.u.error.u.type_02_error;
1038
1039	ipr_err("-----Current Configuration-----\n");
1040	ipr_err("Cache Directory Card Information:\n");
1041	ipr_log_vpd(&error->ioa_vpd);
1042	ipr_err("Adapter Card Information:\n");
1043	ipr_log_vpd(&error->cfc_vpd);
1044
1045	ipr_err("-----Expected Configuration-----\n");
1046	ipr_err("Cache Directory Card Information:\n");
1047	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1048	ipr_err("Adapter Card Information:\n");
1049	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1050
1051	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1052		     be32_to_cpu(error->ioa_data[0]),
1053		     be32_to_cpu(error->ioa_data[1]),
1054		     be32_to_cpu(error->ioa_data[2]));
1055}
1056
1057/**
1058 * ipr_log_enhanced_config_error - Log a configuration error.
1059 * @ioa_cfg:	ioa config struct
1060 * @hostrcb:	hostrcb struct
1061 *
1062 * Return value:
1063 * 	none
1064 **/
1065static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1066					  struct ipr_hostrcb *hostrcb)
1067{
1068	int errors_logged, i;
1069	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1070	struct ipr_hostrcb_type_13_error *error;
1071
1072	error = &hostrcb->hcam.u.error.u.type_13_error;
1073	errors_logged = be32_to_cpu(error->errors_logged);
1074
1075	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1076		be32_to_cpu(error->errors_detected), errors_logged);
1077
1078	dev_entry = error->dev;
1079
1080	for (i = 0; i < errors_logged; i++, dev_entry++) {
1081		ipr_err_separator;
1082
1083		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1084		ipr_log_ext_vpd(&dev_entry->vpd);
1085
1086		ipr_err("-----New Device Information-----\n");
1087		ipr_log_ext_vpd(&dev_entry->new_vpd);
1088
1089		ipr_err("Cache Directory Card Information:\n");
1090		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1091
1092		ipr_err("Adapter Card Information:\n");
1093		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1094	}
1095}
1096
1097/**
1098 * ipr_log_config_error - Log a configuration error.
1099 * @ioa_cfg:	ioa config struct
1100 * @hostrcb:	hostrcb struct
1101 *
1102 * Return value:
1103 * 	none
1104 **/
1105static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1106				 struct ipr_hostrcb *hostrcb)
1107{
1108	int errors_logged, i;
1109	struct ipr_hostrcb_device_data_entry *dev_entry;
1110	struct ipr_hostrcb_type_03_error *error;
1111
1112	error = &hostrcb->hcam.u.error.u.type_03_error;
1113	errors_logged = be32_to_cpu(error->errors_logged);
1114
1115	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1116		be32_to_cpu(error->errors_detected), errors_logged);
1117
1118	dev_entry = error->dev;
1119
1120	for (i = 0; i < errors_logged; i++, dev_entry++) {
1121		ipr_err_separator;
1122
1123		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1124		ipr_log_vpd(&dev_entry->vpd);
1125
1126		ipr_err("-----New Device Information-----\n");
1127		ipr_log_vpd(&dev_entry->new_vpd);
1128
1129		ipr_err("Cache Directory Card Information:\n");
1130		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1131
1132		ipr_err("Adapter Card Information:\n");
1133		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1134
1135		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1136			be32_to_cpu(dev_entry->ioa_data[0]),
1137			be32_to_cpu(dev_entry->ioa_data[1]),
1138			be32_to_cpu(dev_entry->ioa_data[2]),
1139			be32_to_cpu(dev_entry->ioa_data[3]),
1140			be32_to_cpu(dev_entry->ioa_data[4]));
1141	}
1142}
1143
1144/**
1145 * ipr_log_enhanced_array_error - Log an array configuration error.
1146 * @ioa_cfg:	ioa config struct
1147 * @hostrcb:	hostrcb struct
1148 *
1149 * Return value:
1150 * 	none
1151 **/
1152static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1153					 struct ipr_hostrcb *hostrcb)
1154{
1155	int i, num_entries;
1156	struct ipr_hostrcb_type_14_error *error;
1157	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1158	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1159
1160	error = &hostrcb->hcam.u.error.u.type_14_error;
1161
1162	ipr_err_separator;
1163
1164	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1165		error->protection_level,
1166		ioa_cfg->host->host_no,
1167		error->last_func_vset_res_addr.bus,
1168		error->last_func_vset_res_addr.target,
1169		error->last_func_vset_res_addr.lun);
1170
1171	ipr_err_separator;
1172
1173	array_entry = error->array_member;
1174	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1175			    sizeof(error->array_member));
1176
1177	for (i = 0; i < num_entries; i++, array_entry++) {
1178		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1179			continue;
1180
1181		if (be32_to_cpu(error->exposed_mode_adn) == i)
1182			ipr_err("Exposed Array Member %d:\n", i);
1183		else
1184			ipr_err("Array Member %d:\n", i);
1185
1186		ipr_log_ext_vpd(&array_entry->vpd);
1187		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1188		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1189				 "Expected Location");
1190
1191		ipr_err_separator;
1192	}
1193}
1194
1195/**
1196 * ipr_log_array_error - Log an array configuration error.
1197 * @ioa_cfg:	ioa config struct
1198 * @hostrcb:	hostrcb struct
1199 *
1200 * Return value:
1201 * 	none
1202 **/
1203static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1204				struct ipr_hostrcb *hostrcb)
1205{
1206	int i;
1207	struct ipr_hostrcb_type_04_error *error;
1208	struct ipr_hostrcb_array_data_entry *array_entry;
1209	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1210
1211	error = &hostrcb->hcam.u.error.u.type_04_error;
1212
1213	ipr_err_separator;
1214
1215	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1216		error->protection_level,
1217		ioa_cfg->host->host_no,
1218		error->last_func_vset_res_addr.bus,
1219		error->last_func_vset_res_addr.target,
1220		error->last_func_vset_res_addr.lun);
1221
1222	ipr_err_separator;
1223
1224	array_entry = error->array_member;
1225
1226	for (i = 0; i < 18; i++) {
1227		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1228			continue;
1229
1230		if (be32_to_cpu(error->exposed_mode_adn) == i)
1231			ipr_err("Exposed Array Member %d:\n", i);
1232		else
1233			ipr_err("Array Member %d:\n", i);
1234
1235		ipr_log_vpd(&array_entry->vpd);
1236
1237		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1238		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1239				 "Expected Location");
1240
1241		ipr_err_separator;
1242
1243		if (i == 9)
1244			array_entry = error->array_member2;
1245		else
1246			array_entry++;
1247	}
1248}
1249
1250/**
1251 * ipr_log_hex_data - Log additional hex IOA error data.
1252 * @ioa_cfg:	ioa config struct
1253 * @data:		IOA error data
1254 * @len:		data length
1255 *
1256 * Return value:
1257 * 	none
1258 **/
1259static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1260{
1261	int i;
1262
1263	if (len == 0)
1264		return;
1265
1266	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1267		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1268
1269	for (i = 0; i < len / 4; i += 4) {
1270		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1271			be32_to_cpu(data[i]),
1272			be32_to_cpu(data[i+1]),
1273			be32_to_cpu(data[i+2]),
1274			be32_to_cpu(data[i+3]));
1275	}
1276}
1277
1278/**
1279 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1280 * @ioa_cfg:	ioa config struct
1281 * @hostrcb:	hostrcb struct
1282 *
1283 * Return value:
1284 * 	none
1285 **/
1286static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1287					    struct ipr_hostrcb *hostrcb)
1288{
1289	struct ipr_hostrcb_type_17_error *error;
1290
1291	error = &hostrcb->hcam.u.error.u.type_17_error;
1292	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1293
1294	ipr_err("%s\n", error->failure_reason);
1295	ipr_err("Remote Adapter VPD:\n");
1296	ipr_log_ext_vpd(&error->vpd);
1297	ipr_log_hex_data(ioa_cfg, error->data,
1298			 be32_to_cpu(hostrcb->hcam.length) -
1299			 (offsetof(struct ipr_hostrcb_error, u) +
1300			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1301}
1302
1303/**
1304 * ipr_log_dual_ioa_error - Log a dual adapter error.
1305 * @ioa_cfg:	ioa config struct
1306 * @hostrcb:	hostrcb struct
1307 *
1308 * Return value:
1309 * 	none
1310 **/
1311static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1312				   struct ipr_hostrcb *hostrcb)
1313{
1314	struct ipr_hostrcb_type_07_error *error;
1315
1316	error = &hostrcb->hcam.u.error.u.type_07_error;
1317	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1318
1319	ipr_err("%s\n", error->failure_reason);
1320	ipr_err("Remote Adapter VPD:\n");
1321	ipr_log_vpd(&error->vpd);
1322	ipr_log_hex_data(ioa_cfg, error->data,
1323			 be32_to_cpu(hostrcb->hcam.length) -
1324			 (offsetof(struct ipr_hostrcb_error, u) +
1325			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1326}
1327
1328static const struct {
1329	u8 active;
1330	char *desc;
1331} path_active_desc[] = {
1332	{ IPR_PATH_NO_INFO, "Path" },
1333	{ IPR_PATH_ACTIVE, "Active path" },
1334	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1335};
1336
1337static const struct {
1338	u8 state;
1339	char *desc;
1340} path_state_desc[] = {
1341	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1342	{ IPR_PATH_HEALTHY, "is healthy" },
1343	{ IPR_PATH_DEGRADED, "is degraded" },
1344	{ IPR_PATH_FAILED, "is failed" }
1345};
1346
1347/**
1348 * ipr_log_fabric_path - Log a fabric path error
1349 * @hostrcb:	hostrcb struct
1350 * @fabric:		fabric descriptor
1351 *
1352 * Return value:
1353 * 	none
1354 **/
1355static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1356				struct ipr_hostrcb_fabric_desc *fabric)
1357{
1358	int i, j;
1359	u8 path_state = fabric->path_state;
1360	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1361	u8 state = path_state & IPR_PATH_STATE_MASK;
1362
1363	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1364		if (path_active_desc[i].active != active)
1365			continue;
1366
1367		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1368			if (path_state_desc[j].state != state)
1369				continue;
1370
1371			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1372				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1373					     path_active_desc[i].desc, path_state_desc[j].desc,
1374					     fabric->ioa_port);
1375			} else if (fabric->cascaded_expander == 0xff) {
1376				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1377					     path_active_desc[i].desc, path_state_desc[j].desc,
1378					     fabric->ioa_port, fabric->phy);
1379			} else if (fabric->phy == 0xff) {
1380				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1381					     path_active_desc[i].desc, path_state_desc[j].desc,
1382					     fabric->ioa_port, fabric->cascaded_expander);
1383			} else {
1384				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1385					     path_active_desc[i].desc, path_state_desc[j].desc,
1386					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1387			}
1388			return;
1389		}
1390	}
1391
1392	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1393		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1394}
1395
1396static const struct {
1397	u8 type;
1398	char *desc;
1399} path_type_desc[] = {
1400	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1401	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1402	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1403	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1404};
1405
1406static const struct {
1407	u8 status;
1408	char *desc;
1409} path_status_desc[] = {
1410	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1411	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1412	{ IPR_PATH_CFG_FAILED, "Failed" },
1413	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1414	{ IPR_PATH_NOT_DETECTED, "Missing" },
1415	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1416};
1417
1418static const char *link_rate[] = {
1419	"unknown",
1420	"disabled",
1421	"phy reset problem",
1422	"spinup hold",
1423	"port selector",
1424	"unknown",
1425	"unknown",
1426	"unknown",
1427	"1.5Gbps",
1428	"3.0Gbps",
1429	"unknown",
1430	"unknown",
1431	"unknown",
1432	"unknown",
1433	"unknown",
1434	"unknown"
1435};
1436
1437/**
1438 * ipr_log_path_elem - Log a fabric path element.
1439 * @hostrcb:	hostrcb struct
1440 * @cfg:		fabric path element struct
1441 *
1442 * Return value:
1443 * 	none
1444 **/
1445static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1446			      struct ipr_hostrcb_config_element *cfg)
1447{
1448	int i, j;
1449	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1450	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1451
1452	if (type == IPR_PATH_CFG_NOT_EXIST)
1453		return;
1454
1455	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1456		if (path_type_desc[i].type != type)
1457			continue;
1458
1459		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1460			if (path_status_desc[j].status != status)
1461				continue;
1462
1463			if (type == IPR_PATH_CFG_IOA_PORT) {
1464				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1465					     path_status_desc[j].desc, path_type_desc[i].desc,
1466					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1467					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1468			} else {
1469				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1470					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1471						     path_status_desc[j].desc, path_type_desc[i].desc,
1472						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1473						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1474				} else if (cfg->cascaded_expander == 0xff) {
1475					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1476						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1477						     path_type_desc[i].desc, cfg->phy,
1478						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1479						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1480				} else if (cfg->phy == 0xff) {
1481					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1482						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1483						     path_type_desc[i].desc, cfg->cascaded_expander,
1484						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1485						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1486				} else {
1487					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1488						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1489						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1490						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1491						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1492				}
1493			}
1494			return;
1495		}
1496	}
1497
1498	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1499		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1500		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1501		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1502}
1503
1504/**
1505 * ipr_log_fabric_error - Log a fabric error.
1506 * @ioa_cfg:	ioa config struct
1507 * @hostrcb:	hostrcb struct
1508 *
1509 * Return value:
1510 * 	none
1511 **/
1512static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1513				 struct ipr_hostrcb *hostrcb)
1514{
1515	struct ipr_hostrcb_type_20_error *error;
1516	struct ipr_hostrcb_fabric_desc *fabric;
1517	struct ipr_hostrcb_config_element *cfg;
1518	int i, add_len;
1519
1520	error = &hostrcb->hcam.u.error.u.type_20_error;
1521	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1522	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1523
1524	add_len = be32_to_cpu(hostrcb->hcam.length) -
1525		(offsetof(struct ipr_hostrcb_error, u) +
1526		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1527
1528	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1529		ipr_log_fabric_path(hostrcb, fabric);
1530		for_each_fabric_cfg(fabric, cfg)
1531			ipr_log_path_elem(hostrcb, cfg);
1532
1533		add_len -= be16_to_cpu(fabric->length);
1534		fabric = (struct ipr_hostrcb_fabric_desc *)
1535			((unsigned long)fabric + be16_to_cpu(fabric->length));
1536	}
1537
1538	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1539}
1540
1541/**
1542 * ipr_log_generic_error - Log an adapter error.
1543 * @ioa_cfg:	ioa config struct
1544 * @hostrcb:	hostrcb struct
1545 *
1546 * Return value:
1547 * 	none
1548 **/
1549static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1550				  struct ipr_hostrcb *hostrcb)
1551{
1552	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1553			 be32_to_cpu(hostrcb->hcam.length));
1554}
1555
1556/**
1557 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1558 * @ioasc:	IOASC
1559 *
1560 * This function will return the index of into the ipr_error_table
1561 * for the specified IOASC. If the IOASC is not in the table,
1562 * 0 will be returned, which points to the entry used for unknown errors.
1563 *
1564 * Return value:
1565 * 	index into the ipr_error_table
1566 **/
1567static u32 ipr_get_error(u32 ioasc)
1568{
1569	int i;
1570
1571	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1572		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1573			return i;
1574
1575	return 0;
1576}
1577
1578/**
1579 * ipr_handle_log_data - Log an adapter error.
1580 * @ioa_cfg:	ioa config struct
1581 * @hostrcb:	hostrcb struct
1582 *
1583 * This function logs an adapter error to the system.
1584 *
1585 * Return value:
1586 * 	none
1587 **/
1588static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1589				struct ipr_hostrcb *hostrcb)
1590{
1591	u32 ioasc;
1592	int error_index;
1593
1594	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1595		return;
1596
1597	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1598		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1599
1600	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1601
1602	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1603	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1604		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1605		scsi_report_bus_reset(ioa_cfg->host,
1606				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1607	}
1608
1609	error_index = ipr_get_error(ioasc);
1610
1611	if (!ipr_error_table[error_index].log_hcam)
1612		return;
1613
1614	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1615
1616	/* Set indication we have logged an error */
1617	ioa_cfg->errors_logged++;
1618
1619	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1620		return;
1621	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1622		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1623
1624	switch (hostrcb->hcam.overlay_id) {
1625	case IPR_HOST_RCB_OVERLAY_ID_2:
1626		ipr_log_cache_error(ioa_cfg, hostrcb);
1627		break;
1628	case IPR_HOST_RCB_OVERLAY_ID_3:
1629		ipr_log_config_error(ioa_cfg, hostrcb);
1630		break;
1631	case IPR_HOST_RCB_OVERLAY_ID_4:
1632	case IPR_HOST_RCB_OVERLAY_ID_6:
1633		ipr_log_array_error(ioa_cfg, hostrcb);
1634		break;
1635	case IPR_HOST_RCB_OVERLAY_ID_7:
1636		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1637		break;
1638	case IPR_HOST_RCB_OVERLAY_ID_12:
1639		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1640		break;
1641	case IPR_HOST_RCB_OVERLAY_ID_13:
1642		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1643		break;
1644	case IPR_HOST_RCB_OVERLAY_ID_14:
1645	case IPR_HOST_RCB_OVERLAY_ID_16:
1646		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1647		break;
1648	case IPR_HOST_RCB_OVERLAY_ID_17:
1649		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1650		break;
1651	case IPR_HOST_RCB_OVERLAY_ID_20:
1652		ipr_log_fabric_error(ioa_cfg, hostrcb);
1653		break;
1654	case IPR_HOST_RCB_OVERLAY_ID_1:
1655	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1656	default:
1657		ipr_log_generic_error(ioa_cfg, hostrcb);
1658		break;
1659	}
1660}
1661
1662/**
1663 * ipr_process_error - Op done function for an adapter error log.
1664 * @ipr_cmd:	ipr command struct
1665 *
1666 * This function is the op done function for an error log host
1667 * controlled async from the adapter. It will log the error and
1668 * send the HCAM back to the adapter.
1669 *
1670 * Return value:
1671 * 	none
1672 **/
1673static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1674{
1675	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1676	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1677	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1678
1679	list_del(&hostrcb->queue);
1680	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1681
1682	if (!ioasc) {
1683		ipr_handle_log_data(ioa_cfg, hostrcb);
1684	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1685		dev_err(&ioa_cfg->pdev->dev,
1686			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1687	}
1688
1689	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1690}
1691
1692/**
1693 * ipr_timeout -  An internally generated op has timed out.
1694 * @ipr_cmd:	ipr command struct
1695 *
1696 * This function blocks host requests and initiates an
1697 * adapter reset.
1698 *
1699 * Return value:
1700 * 	none
1701 **/
1702static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1703{
1704	unsigned long lock_flags = 0;
1705	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1706
1707	ENTER;
1708	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1709
1710	ioa_cfg->errors_logged++;
1711	dev_err(&ioa_cfg->pdev->dev,
1712		"Adapter being reset due to command timeout.\n");
1713
1714	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1715		ioa_cfg->sdt_state = GET_DUMP;
1716
1717	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1718		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1719
1720	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1721	LEAVE;
1722}
1723
1724/**
1725 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1726 * @ipr_cmd:	ipr command struct
1727 *
1728 * This function blocks host requests and initiates an
1729 * adapter reset.
1730 *
1731 * Return value:
1732 * 	none
1733 **/
1734static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1735{
1736	unsigned long lock_flags = 0;
1737	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1738
1739	ENTER;
1740	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1741
1742	ioa_cfg->errors_logged++;
1743	dev_err(&ioa_cfg->pdev->dev,
1744		"Adapter timed out transitioning to operational.\n");
1745
1746	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1747		ioa_cfg->sdt_state = GET_DUMP;
1748
1749	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1750		if (ipr_fastfail)
1751			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1752		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1753	}
1754
1755	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1756	LEAVE;
1757}
1758
1759/**
1760 * ipr_reset_reload - Reset/Reload the IOA
1761 * @ioa_cfg:		ioa config struct
1762 * @shutdown_type:	shutdown type
1763 *
1764 * This function resets the adapter and re-initializes it.
1765 * This function assumes that all new host commands have been stopped.
1766 * Return value:
1767 * 	SUCCESS / FAILED
1768 **/
1769static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1770			    enum ipr_shutdown_type shutdown_type)
1771{
1772	if (!ioa_cfg->in_reset_reload)
1773		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1774
1775	spin_unlock_irq(ioa_cfg->host->host_lock);
1776	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1777	spin_lock_irq(ioa_cfg->host->host_lock);
1778
1779	/* If we got hit with a host reset while we were already resetting
1780	 the adapter for some reason, and the reset failed. */
1781	if (ioa_cfg->ioa_is_dead) {
1782		ipr_trace;
1783		return FAILED;
1784	}
1785
1786	return SUCCESS;
1787}
1788
1789/**
1790 * ipr_find_ses_entry - Find matching SES in SES table
1791 * @res:	resource entry struct of SES
1792 *
1793 * Return value:
1794 * 	pointer to SES table entry / NULL on failure
1795 **/
1796static const struct ipr_ses_table_entry *
1797ipr_find_ses_entry(struct ipr_resource_entry *res)
1798{
1799	int i, j, matches;
1800	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1801
1802	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1803		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1804			if (ste->compare_product_id_byte[j] == 'X') {
1805				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1806					matches++;
1807				else
1808					break;
1809			} else
1810				matches++;
1811		}
1812
1813		if (matches == IPR_PROD_ID_LEN)
1814			return ste;
1815	}
1816
1817	return NULL;
1818}
1819
1820/**
1821 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1822 * @ioa_cfg:	ioa config struct
1823 * @bus:		SCSI bus
1824 * @bus_width:	bus width
1825 *
1826 * Return value:
1827 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1828 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1829 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1830 *	max 160MHz = max 320MB/sec).
1831 **/
1832static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1833{
1834	struct ipr_resource_entry *res;
1835	const struct ipr_ses_table_entry *ste;
1836	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1837
1838	/* Loop through each config table entry in the config table buffer */
1839	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1840		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1841			continue;
1842
1843		if (bus != res->cfgte.res_addr.bus)
1844			continue;
1845
1846		if (!(ste = ipr_find_ses_entry(res)))
1847			continue;
1848
1849		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1850	}
1851
1852	return max_xfer_rate;
1853}
1854
1855/**
1856 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1857 * @ioa_cfg:		ioa config struct
1858 * @max_delay:		max delay in micro-seconds to wait
1859 *
1860 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1861 *
1862 * Return value:
1863 * 	0 on success / other on failure
1864 **/
1865static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1866{
1867	volatile u32 pcii_reg;
1868	int delay = 1;
1869
1870	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1871	while (delay < max_delay) {
1872		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1873
1874		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1875			return 0;
1876
1877		/* udelay cannot be used if delay is more than a few milliseconds */
1878		if ((delay / 1000) > MAX_UDELAY_MS)
1879			mdelay(delay / 1000);
1880		else
1881			udelay(delay);
1882
1883		delay += delay;
1884	}
1885	return -EIO;
1886}
1887
1888/**
1889 * ipr_get_ldump_data_section - Dump IOA memory
1890 * @ioa_cfg:			ioa config struct
1891 * @start_addr:			adapter address to dump
1892 * @dest:				destination kernel buffer
1893 * @length_in_words:	length to dump in 4 byte words
1894 *
1895 * Return value:
1896 * 	0 on success / -EIO on failure
1897 **/
1898static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1899				      u32 start_addr,
1900				      __be32 *dest, u32 length_in_words)
1901{
1902	volatile u32 temp_pcii_reg;
1903	int i, delay = 0;
1904
1905	/* Write IOA interrupt reg starting LDUMP state  */
1906	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1907	       ioa_cfg->regs.set_uproc_interrupt_reg);
1908
1909	/* Wait for IO debug acknowledge */
1910	if (ipr_wait_iodbg_ack(ioa_cfg,
1911			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1912		dev_err(&ioa_cfg->pdev->dev,
1913			"IOA dump long data transfer timeout\n");
1914		return -EIO;
1915	}
1916
1917	/* Signal LDUMP interlocked - clear IO debug ack */
1918	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1919	       ioa_cfg->regs.clr_interrupt_reg);
1920
1921	/* Write Mailbox with starting address */
1922	writel(start_addr, ioa_cfg->ioa_mailbox);
1923
1924	/* Signal address valid - clear IOA Reset alert */
1925	writel(IPR_UPROCI_RESET_ALERT,
1926	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1927
1928	for (i = 0; i < length_in_words; i++) {
1929		/* Wait for IO debug acknowledge */
1930		if (ipr_wait_iodbg_ack(ioa_cfg,
1931				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1932			dev_err(&ioa_cfg->pdev->dev,
1933				"IOA dump short data transfer timeout\n");
1934			return -EIO;
1935		}
1936
1937		/* Read data from mailbox and increment destination pointer */
1938		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1939		dest++;
1940
1941		/* For all but the last word of data, signal data received */
1942		if (i < (length_in_words - 1)) {
1943			/* Signal dump data received - Clear IO debug Ack */
1944			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1945			       ioa_cfg->regs.clr_interrupt_reg);
1946		}
1947	}
1948
1949	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1950	writel(IPR_UPROCI_RESET_ALERT,
1951	       ioa_cfg->regs.set_uproc_interrupt_reg);
1952
1953	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1954	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1955
1956	/* Signal dump data received - Clear IO debug Ack */
1957	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1958	       ioa_cfg->regs.clr_interrupt_reg);
1959
1960	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1961	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1962		temp_pcii_reg =
1963		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1964
1965		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1966			return 0;
1967
1968		udelay(10);
1969		delay += 10;
1970	}
1971
1972	return 0;
1973}
1974
1975#ifdef CONFIG_SCSI_IPR_DUMP
1976/**
1977 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1978 * @ioa_cfg:		ioa config struct
1979 * @pci_address:	adapter address
1980 * @length:			length of data to copy
1981 *
1982 * Copy data from PCI adapter to kernel buffer.
1983 * Note: length MUST be a 4 byte multiple
1984 * Return value:
1985 * 	0 on success / other on failure
1986 **/
1987static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1988			unsigned long pci_address, u32 length)
1989{
1990	int bytes_copied = 0;
1991	int cur_len, rc, rem_len, rem_page_len;
1992	__be32 *page;
1993	unsigned long lock_flags = 0;
1994	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1995
1996	while (bytes_copied < length &&
1997	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1998		if (ioa_dump->page_offset >= PAGE_SIZE ||
1999		    ioa_dump->page_offset == 0) {
2000			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2001
2002			if (!page) {
2003				ipr_trace;
2004				return bytes_copied;
2005			}
2006
2007			ioa_dump->page_offset = 0;
2008			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2009			ioa_dump->next_page_index++;
2010		} else
2011			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2012
2013		rem_len = length - bytes_copied;
2014		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2015		cur_len = min(rem_len, rem_page_len);
2016
2017		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2018		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2019			rc = -EIO;
2020		} else {
2021			rc = ipr_get_ldump_data_section(ioa_cfg,
2022							pci_address + bytes_copied,
2023							&page[ioa_dump->page_offset / 4],
2024							(cur_len / sizeof(u32)));
2025		}
2026		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2027
2028		if (!rc) {
2029			ioa_dump->page_offset += cur_len;
2030			bytes_copied += cur_len;
2031		} else {
2032			ipr_trace;
2033			break;
2034		}
2035		schedule();
2036	}
2037
2038	return bytes_copied;
2039}
2040
2041/**
2042 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2043 * @hdr:	dump entry header struct
2044 *
2045 * Return value:
2046 * 	nothing
2047 **/
2048static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2049{
2050	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2051	hdr->num_elems = 1;
2052	hdr->offset = sizeof(*hdr);
2053	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2054}
2055
2056/**
2057 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2058 * @ioa_cfg:	ioa config struct
2059 * @driver_dump:	driver dump struct
2060 *
2061 * Return value:
2062 * 	nothing
2063 **/
2064static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2065				   struct ipr_driver_dump *driver_dump)
2066{
2067	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2068
2069	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2070	driver_dump->ioa_type_entry.hdr.len =
2071		sizeof(struct ipr_dump_ioa_type_entry) -
2072		sizeof(struct ipr_dump_entry_header);
2073	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2074	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2075	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2076	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2077		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2078		ucode_vpd->minor_release[1];
2079	driver_dump->hdr.num_entries++;
2080}
2081
2082/**
2083 * ipr_dump_version_data - Fill in the driver version in the dump.
2084 * @ioa_cfg:	ioa config struct
2085 * @driver_dump:	driver dump struct
2086 *
2087 * Return value:
2088 * 	nothing
2089 **/
2090static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2091				  struct ipr_driver_dump *driver_dump)
2092{
2093	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2094	driver_dump->version_entry.hdr.len =
2095		sizeof(struct ipr_dump_version_entry) -
2096		sizeof(struct ipr_dump_entry_header);
2097	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2098	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2099	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2100	driver_dump->hdr.num_entries++;
2101}
2102
2103/**
2104 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2105 * @ioa_cfg:	ioa config struct
2106 * @driver_dump:	driver dump struct
2107 *
2108 * Return value:
2109 * 	nothing
2110 **/
2111static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2112				   struct ipr_driver_dump *driver_dump)
2113{
2114	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2115	driver_dump->trace_entry.hdr.len =
2116		sizeof(struct ipr_dump_trace_entry) -
2117		sizeof(struct ipr_dump_entry_header);
2118	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2119	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2120	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2121	driver_dump->hdr.num_entries++;
2122}
2123
2124/**
2125 * ipr_dump_location_data - Fill in the IOA location in the dump.
2126 * @ioa_cfg:	ioa config struct
2127 * @driver_dump:	driver dump struct
2128 *
2129 * Return value:
2130 * 	nothing
2131 **/
2132static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2133				   struct ipr_driver_dump *driver_dump)
2134{
2135	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2136	driver_dump->location_entry.hdr.len =
2137		sizeof(struct ipr_dump_location_entry) -
2138		sizeof(struct ipr_dump_entry_header);
2139	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2140	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2141	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2142	driver_dump->hdr.num_entries++;
2143}
2144
2145/**
2146 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2147 * @ioa_cfg:	ioa config struct
2148 * @dump:		dump struct
2149 *
2150 * Return value:
2151 * 	nothing
2152 **/
2153static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2154{
2155	unsigned long start_addr, sdt_word;
2156	unsigned long lock_flags = 0;
2157	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2158	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2159	u32 num_entries, start_off, end_off;
2160	u32 bytes_to_copy, bytes_copied, rc;
2161	struct ipr_sdt *sdt;
2162	int i;
2163
2164	ENTER;
2165
2166	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2167
2168	if (ioa_cfg->sdt_state != GET_DUMP) {
2169		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170		return;
2171	}
2172
2173	start_addr = readl(ioa_cfg->ioa_mailbox);
2174
2175	if (!ipr_sdt_is_fmt2(start_addr)) {
2176		dev_err(&ioa_cfg->pdev->dev,
2177			"Invalid dump table format: %lx\n", start_addr);
2178		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2179		return;
2180	}
2181
2182	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2183
2184	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2185
2186	/* Initialize the overall dump header */
2187	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2188	driver_dump->hdr.num_entries = 1;
2189	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2190	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2191	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2192	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2193
2194	ipr_dump_version_data(ioa_cfg, driver_dump);
2195	ipr_dump_location_data(ioa_cfg, driver_dump);
2196	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2197	ipr_dump_trace_data(ioa_cfg, driver_dump);
2198
2199	/* Update dump_header */
2200	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2201
2202	/* IOA Dump entry */
2203	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2204	ioa_dump->format = IPR_SDT_FMT2;
2205	ioa_dump->hdr.len = 0;
2206	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2207	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2208
2209	/* First entries in sdt are actually a list of dump addresses and
2210	 lengths to gather the real dump data.  sdt represents the pointer
2211	 to the ioa generated dump table.  Dump data will be extracted based
2212	 on entries in this table */
2213	sdt = &ioa_dump->sdt;
2214
2215	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2216					sizeof(struct ipr_sdt) / sizeof(__be32));
2217
2218	/* Smart Dump table is ready to use and the first entry is valid */
2219	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2220		dev_err(&ioa_cfg->pdev->dev,
2221			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2222			rc, be32_to_cpu(sdt->hdr.state));
2223		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2224		ioa_cfg->sdt_state = DUMP_OBTAINED;
2225		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2226		return;
2227	}
2228
2229	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2230
2231	if (num_entries > IPR_NUM_SDT_ENTRIES)
2232		num_entries = IPR_NUM_SDT_ENTRIES;
2233
2234	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2235
2236	for (i = 0; i < num_entries; i++) {
2237		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2238			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2239			break;
2240		}
2241
2242		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2243			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2244			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2245			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2246
2247			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2248				bytes_to_copy = end_off - start_off;
2249				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2250					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2251					continue;
2252				}
2253
2254				/* Copy data from adapter to driver buffers */
2255				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2256							    bytes_to_copy);
2257
2258				ioa_dump->hdr.len += bytes_copied;
2259
2260				if (bytes_copied != bytes_to_copy) {
2261					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2262					break;
2263				}
2264			}
2265		}
2266	}
2267
2268	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2269
2270	/* Update dump_header */
2271	driver_dump->hdr.len += ioa_dump->hdr.len;
2272	wmb();
2273	ioa_cfg->sdt_state = DUMP_OBTAINED;
2274	LEAVE;
2275}
2276
2277#else
2278#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2279#endif
2280
2281/**
2282 * ipr_release_dump - Free adapter dump memory
2283 * @kref:	kref struct
2284 *
2285 * Return value:
2286 *	nothing
2287 **/
2288static void ipr_release_dump(struct kref *kref)
2289{
2290	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2291	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2292	unsigned long lock_flags = 0;
2293	int i;
2294
2295	ENTER;
2296	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2297	ioa_cfg->dump = NULL;
2298	ioa_cfg->sdt_state = INACTIVE;
2299	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2300
2301	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2302		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2303
2304	kfree(dump);
2305	LEAVE;
2306}
2307
2308/**
2309 * ipr_worker_thread - Worker thread
2310 * @work:		ioa config struct
2311 *
2312 * Called at task level from a work thread. This function takes care
2313 * of adding and removing device from the mid-layer as configuration
2314 * changes are detected by the adapter.
2315 *
2316 * Return value:
2317 * 	nothing
2318 **/
2319static void ipr_worker_thread(struct work_struct *work)
2320{
2321	unsigned long lock_flags;
2322	struct ipr_resource_entry *res;
2323	struct scsi_device *sdev;
2324	struct ipr_dump *dump;
2325	struct ipr_ioa_cfg *ioa_cfg =
2326		container_of(work, struct ipr_ioa_cfg, work_q);
2327	u8 bus, target, lun;
2328	int did_work;
2329
2330	ENTER;
2331	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2332
2333	if (ioa_cfg->sdt_state == GET_DUMP) {
2334		dump = ioa_cfg->dump;
2335		if (!dump) {
2336			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2337			return;
2338		}
2339		kref_get(&dump->kref);
2340		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2341		ipr_get_ioa_dump(ioa_cfg, dump);
2342		kref_put(&dump->kref, ipr_release_dump);
2343
2344		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2345		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2346			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2347		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2348		return;
2349	}
2350
2351restart:
2352	do {
2353		did_work = 0;
2354		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2355			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2356			return;
2357		}
2358
2359		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2360			if (res->del_from_ml && res->sdev) {
2361				did_work = 1;
2362				sdev = res->sdev;
2363				if (!scsi_device_get(sdev)) {
2364					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2365					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2366					scsi_remove_device(sdev);
2367					scsi_device_put(sdev);
2368					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2369				}
2370				break;
2371			}
2372		}
2373	} while(did_work);
2374
2375	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2376		if (res->add_to_ml) {
2377			bus = res->cfgte.res_addr.bus;
2378			target = res->cfgte.res_addr.target;
2379			lun = res->cfgte.res_addr.lun;
2380			res->add_to_ml = 0;
2381			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2382			scsi_add_device(ioa_cfg->host, bus, target, lun);
2383			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2384			goto restart;
2385		}
2386	}
2387
2388	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2389	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2390	LEAVE;
2391}
2392
2393#ifdef CONFIG_SCSI_IPR_TRACE
2394/**
2395 * ipr_read_trace - Dump the adapter trace
2396 * @kobj:		kobject struct
2397 * @buf:		buffer
2398 * @off:		offset
2399 * @count:		buffer size
2400 *
2401 * Return value:
2402 *	number of bytes printed to buffer
2403 **/
2404static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2405			      loff_t off, size_t count)
2406{
2407	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2408	struct Scsi_Host *shost = class_to_shost(cdev);
2409	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2410	unsigned long lock_flags = 0;
2411	int size = IPR_TRACE_SIZE;
2412	char *src = (char *)ioa_cfg->trace;
2413
2414	if (off > size)
2415		return 0;
2416	if (off + count > size) {
2417		size -= off;
2418		count = size;
2419	}
2420
2421	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2422	memcpy(buf, &src[off], count);
2423	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2424	return count;
2425}
2426
2427static struct bin_attribute ipr_trace_attr = {
2428	.attr =	{
2429		.name = "trace",
2430		.mode = S_IRUGO,
2431	},
2432	.size = 0,
2433	.read = ipr_read_trace,
2434};
2435#endif
2436
2437static const struct {
2438	enum ipr_cache_state state;
2439	char *name;
2440} cache_state [] = {
2441	{ CACHE_NONE, "none" },
2442	{ CACHE_DISABLED, "disabled" },
2443	{ CACHE_ENABLED, "enabled" }
2444};
2445
2446/**
2447 * ipr_show_write_caching - Show the write caching attribute
2448 * @class_dev:	class device struct
2449 * @buf:		buffer
2450 *
2451 * Return value:
2452 *	number of bytes printed to buffer
2453 **/
2454static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2455{
2456	struct Scsi_Host *shost = class_to_shost(class_dev);
2457	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2458	unsigned long lock_flags = 0;
2459	int i, len = 0;
2460
2461	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2462	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2463		if (cache_state[i].state == ioa_cfg->cache_state) {
2464			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2465			break;
2466		}
2467	}
2468	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2469	return len;
2470}
2471
2472
2473/**
2474 * ipr_store_write_caching - Enable/disable adapter write cache
2475 * @class_dev:	class_device struct
2476 * @buf:		buffer
2477 * @count:		buffer size
2478 *
2479 * This function will enable/disable adapter write cache.
2480 *
2481 * Return value:
2482 * 	count on success / other on failure
2483 **/
2484static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2485					const char *buf, size_t count)
2486{
2487	struct Scsi_Host *shost = class_to_shost(class_dev);
2488	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2489	unsigned long lock_flags = 0;
2490	enum ipr_cache_state new_state = CACHE_INVALID;
2491	int i;
2492
2493	if (!capable(CAP_SYS_ADMIN))
2494		return -EACCES;
2495	if (ioa_cfg->cache_state == CACHE_NONE)
2496		return -EINVAL;
2497
2498	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2499		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2500			new_state = cache_state[i].state;
2501			break;
2502		}
2503	}
2504
2505	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2506		return -EINVAL;
2507
2508	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2509	if (ioa_cfg->cache_state == new_state) {
2510		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2511		return count;
2512	}
2513
2514	ioa_cfg->cache_state = new_state;
2515	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2516		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2517	if (!ioa_cfg->in_reset_reload)
2518		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2519	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2520	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2521
2522	return count;
2523}
2524
2525static struct class_device_attribute ipr_ioa_cache_attr = {
2526	.attr = {
2527		.name =		"write_cache",
2528		.mode =		S_IRUGO | S_IWUSR,
2529	},
2530	.show = ipr_show_write_caching,
2531	.store = ipr_store_write_caching
2532};
2533
2534/**
2535 * ipr_show_fw_version - Show the firmware version
2536 * @class_dev:	class device struct
2537 * @buf:		buffer
2538 *
2539 * Return value:
2540 *	number of bytes printed to buffer
2541 **/
2542static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2543{
2544	struct Scsi_Host *shost = class_to_shost(class_dev);
2545	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2546	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2547	unsigned long lock_flags = 0;
2548	int len;
2549
2550	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2551	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2552		       ucode_vpd->major_release, ucode_vpd->card_type,
2553		       ucode_vpd->minor_release[0],
2554		       ucode_vpd->minor_release[1]);
2555	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556	return len;
2557}
2558
2559static struct class_device_attribute ipr_fw_version_attr = {
2560	.attr = {
2561		.name =		"fw_version",
2562		.mode =		S_IRUGO,
2563	},
2564	.show = ipr_show_fw_version,
2565};
2566
2567/**
2568 * ipr_show_log_level - Show the adapter's error logging level
2569 * @class_dev:	class device struct
2570 * @buf:		buffer
2571 *
2572 * Return value:
2573 * 	number of bytes printed to buffer
2574 **/
2575static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2576{
2577	struct Scsi_Host *shost = class_to_shost(class_dev);
2578	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2579	unsigned long lock_flags = 0;
2580	int len;
2581
2582	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2583	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2584	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2585	return len;
2586}
2587
2588/**
2589 * ipr_store_log_level - Change the adapter's error logging level
2590 * @class_dev:	class device struct
2591 * @buf:		buffer
2592 *
2593 * Return value:
2594 * 	number of bytes printed to buffer
2595 **/
2596static ssize_t ipr_store_log_level(struct class_device *class_dev,
2597				   const char *buf, size_t count)
2598{
2599	struct Scsi_Host *shost = class_to_shost(class_dev);
2600	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2601	unsigned long lock_flags = 0;
2602
2603	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2604	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2605	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2606	return strlen(buf);
2607}
2608
2609static struct class_device_attribute ipr_log_level_attr = {
2610	.attr = {
2611		.name =		"log_level",
2612		.mode =		S_IRUGO | S_IWUSR,
2613	},
2614	.show = ipr_show_log_level,
2615	.store = ipr_store_log_level
2616};
2617
2618/**
2619 * ipr_store_diagnostics - IOA Diagnostics interface
2620 * @class_dev:	class_device struct
2621 * @buf:		buffer
2622 * @count:		buffer size
2623 *
2624 * This function will reset the adapter and wait a reasonable
2625 * amount of time for any errors that the adapter might log.
2626 *
2627 * Return value:
2628 * 	count on success / other on failure
2629 **/
2630static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2631				     const char *buf, size_t count)
2632{
2633	struct Scsi_Host *shost = class_to_shost(class_dev);
2634	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2635	unsigned long lock_flags = 0;
2636	int rc = count;
2637
2638	if (!capable(CAP_SYS_ADMIN))
2639		return -EACCES;
2640
2641	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2642	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2643	ioa_cfg->errors_logged = 0;
2644	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2645
2646	if (ioa_cfg->in_reset_reload) {
2647		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2648		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2649
2650		/* Wait for a second for any errors to be logged */
2651		msleep(1000);
2652	} else {
2653		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2654		return -EIO;
2655	}
2656
2657	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2658	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2659		rc = -EIO;
2660	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2661
2662	return rc;
2663}
2664
2665static struct class_device_attribute ipr_diagnostics_attr = {
2666	.attr = {
2667		.name =		"run_diagnostics",
2668		.mode =		S_IWUSR,
2669	},
2670	.store = ipr_store_diagnostics
2671};
2672
2673/**
2674 * ipr_show_adapter_state - Show the adapter's state
2675 * @class_dev:	class device struct
2676 * @buf:		buffer
2677 *
2678 * Return value:
2679 * 	number of bytes printed to buffer
2680 **/
2681static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2682{
2683	struct Scsi_Host *shost = class_to_shost(class_dev);
2684	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2685	unsigned long lock_flags = 0;
2686	int len;
2687
2688	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2689	if (ioa_cfg->ioa_is_dead)
2690		len = snprintf(buf, PAGE_SIZE, "offline\n");
2691	else
2692		len = snprintf(buf, PAGE_SIZE, "online\n");
2693	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2694	return len;
2695}
2696
2697/**
2698 * ipr_store_adapter_state - Change adapter state
2699 * @class_dev:	class_device struct
2700 * @buf:		buffer
2701 * @count:		buffer size
2702 *
2703 * This function will change the adapter's state.
2704 *
2705 * Return value:
2706 * 	count on success / other on failure
2707 **/
2708static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2709				       const char *buf, size_t count)
2710{
2711	struct Scsi_Host *shost = class_to_shost(class_dev);
2712	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2713	unsigned long lock_flags;
2714	int result = count;
2715
2716	if (!capable(CAP_SYS_ADMIN))
2717		return -EACCES;
2718
2719	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2720	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2721		ioa_cfg->ioa_is_dead = 0;
2722		ioa_cfg->reset_retries = 0;
2723		ioa_cfg->in_ioa_bringdown = 0;
2724		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2725	}
2726	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2727	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2728
2729	return result;
2730}
2731
2732static struct class_device_attribute ipr_ioa_state_attr = {
2733	.attr = {
2734		.name =		"state",
2735		.mode =		S_IRUGO | S_IWUSR,
2736	},
2737	.show = ipr_show_adapter_state,
2738	.store = ipr_store_adapter_state
2739};
2740
2741/**
2742 * ipr_store_reset_adapter - Reset the adapter
2743 * @class_dev:	class_device struct
2744 * @buf:		buffer
2745 * @count:		buffer size
2746 *
2747 * This function will reset the adapter.
2748 *
2749 * Return value:
2750 * 	count on success / other on failure
2751 **/
2752static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2753				       const char *buf, size_t count)
2754{
2755	struct Scsi_Host *shost = class_to_shost(class_dev);
2756	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2757	unsigned long lock_flags;
2758	int result = count;
2759
2760	if (!capable(CAP_SYS_ADMIN))
2761		return -EACCES;
2762
2763	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2764	if (!ioa_cfg->in_reset_reload)
2765		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2766	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2767	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2768
2769	return result;
2770}
2771
2772static struct class_device_attribute ipr_ioa_reset_attr = {
2773	.attr = {
2774		.name =		"reset_host",
2775		.mode =		S_IWUSR,
2776	},
2777	.store = ipr_store_reset_adapter
2778};
2779
2780/**
2781 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2782 * @buf_len:		buffer length
2783 *
2784 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2785 * list to use for microcode download
2786 *
2787 * Return value:
2788 * 	pointer to sglist / NULL on failure
2789 **/
2790static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2791{
2792	int sg_size, order, bsize_elem, num_elem, i, j;
2793	struct ipr_sglist *sglist;
2794	struct scatterlist *scatterlist;
2795	struct page *page;
2796
2797	/* Get the minimum size per scatter/gather element */
2798	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2799
2800	/* Get the actual size per element */
2801	order = get_order(sg_size);
2802
2803	/* Determine the actual number of bytes per element */
2804	bsize_elem = PAGE_SIZE * (1 << order);
2805
2806	/* Determine the actual number of sg entries needed */
2807	if (buf_len % bsize_elem)
2808		num_elem = (buf_len / bsize_elem) + 1;
2809	else
2810		num_elem = buf_len / bsize_elem;
2811
2812	/* Allocate a scatter/gather list for the DMA */
2813	sglist = kzalloc(sizeof(struct ipr_sglist) +
2814			 (sizeof(struct scatterlist) * (num_elem - 1)),
2815			 GFP_KERNEL);
2816
2817	if (sglist == NULL) {
2818		ipr_trace;
2819		return NULL;
2820	}
2821
2822	scatterlist = sglist->scatterlist;
2823
2824	sglist->order = order;
2825	sglist->num_sg = num_elem;
2826
2827	/* Allocate a bunch of sg elements */
2828	for (i = 0; i < num_elem; i++) {
2829		page = alloc_pages(GFP_KERNEL, order);
2830		if (!page) {
2831			ipr_trace;
2832
2833			/* Free up what we already allocated */
2834			for (j = i - 1; j >= 0; j--)
2835				__free_pages(scatterlist[j].page, order);
2836			kfree(sglist);
2837			return NULL;
2838		}
2839
2840		scatterlist[i].page = page;
2841	}
2842
2843	return sglist;
2844}
2845
2846/**
2847 * ipr_free_ucode_buffer - Frees a microcode download buffer
2848 * @p_dnld:		scatter/gather list pointer
2849 *
2850 * Free a DMA'able ucode download buffer previously allocated with
2851 * ipr_alloc_ucode_buffer
2852 *
2853 * Return value:
2854 * 	nothing
2855 **/
2856static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2857{
2858	int i;
2859
2860	for (i = 0; i < sglist->num_sg; i++)
2861		__free_pages(sglist->scatterlist[i].page, sglist->order);
2862
2863	kfree(sglist);
2864}
2865
2866/**
2867 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2868 * @sglist:		scatter/gather list pointer
2869 * @buffer:		buffer pointer
2870 * @len:		buffer length
2871 *
2872 * Copy a microcode image from a user buffer into a buffer allocated by
2873 * ipr_alloc_ucode_buffer
2874 *
2875 * Return value:
2876 * 	0 on success / other on failure
2877 **/
2878static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2879				 u8 *buffer, u32 len)
2880{
2881	int bsize_elem, i, result = 0;
2882	struct scatterlist *scatterlist;
2883	void *kaddr;
2884
2885	/* Determine the actual number of bytes per element */
2886	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2887
2888	scatterlist = sglist->scatterlist;
2889
2890	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2891		kaddr = kmap(scatterlist[i].page);
2892		memcpy(kaddr, buffer, bsize_elem);
2893		kunmap(scatterlist[i].page);
2894
2895		scatterlist[i].length = bsize_elem;
2896
2897		if (result != 0) {
2898			ipr_trace;
2899			return result;
2900		}
2901	}
2902
2903	if (len % bsize_elem) {
2904		kaddr = kmap(scatterlist[i].page);
2905		memcpy(kaddr, buffer, len % bsize_elem);
2906		kunmap(scatterlist[i].page);
2907
2908		scatterlist[i].length = len % bsize_elem;
2909	}
2910
2911	sglist->buffer_len = len;
2912	return result;
2913}
2914
2915/**
2916 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2917 * @ipr_cmd:	ipr command struct
2918 * @sglist:		scatter/gather list
2919 *
2920 * Builds a microcode download IOA data list (IOADL).
2921 *
2922 **/
2923static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2924				  struct ipr_sglist *sglist)
2925{
2926	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2927	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2928	struct scatterlist *scatterlist = sglist->scatterlist;
2929	int i;
2930
2931	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2932	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2933	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2934	ioarcb->write_ioadl_len =
2935		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2936
2937	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2938		ioadl[i].flags_and_data_len =
2939			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2940		ioadl[i].address =
2941			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2942	}
2943
2944	ioadl[i-1].flags_and_data_len |=
2945		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2946}
2947
2948/**
2949 * ipr_update_ioa_ucode - Update IOA's microcode
2950 * @ioa_cfg:	ioa config struct
2951 * @sglist:		scatter/gather list
2952 *
2953 * Initiate an adapter reset to update the IOA's microcode
2954 *
2955 * Return value:
2956 * 	0 on success / -EIO on failure
2957 **/
2958static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2959				struct ipr_sglist *sglist)
2960{
2961	unsigned long lock_flags;
2962
2963	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2964
2965	if (ioa_cfg->ucode_sglist) {
2966		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2967		dev_err(&ioa_cfg->pdev->dev,
2968			"Microcode download already in progress\n");
2969		return -EIO;
2970	}
2971
2972	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2973					sglist->num_sg, DMA_TO_DEVICE);
2974
2975	if (!sglist->num_dma_sg) {
2976		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2977		dev_err(&ioa_cfg->pdev->dev,
2978			"Failed to map microcode download buffer!\n");
2979		return -EIO;
2980	}
2981
2982	ioa_cfg->ucode_sglist = sglist;
2983	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2984	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2985	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2986
2987	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2988	ioa_cfg->ucode_sglist = NULL;
2989	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2990	return 0;
2991}
2992
2993/**
2994 * ipr_store_update_fw - Update the firmware on the adapter
2995 * @class_dev:	class_device struct
2996 * @buf:		buffer
2997 * @count:		buffer size
2998 *
2999 * This function will update the firmware on the adapter.
3000 *
3001 * Return value:
3002 * 	count on success / other on failure
3003 **/
3004static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3005				       const char *buf, size_t count)
3006{
3007	struct Scsi_Host *shost = class_to_shost(class_dev);
3008	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3009	struct ipr_ucode_image_header *image_hdr;
3010	const struct firmware *fw_entry;
3011	struct ipr_sglist *sglist;
3012	char fname[100];
3013	char *src;
3014	int len, result, dnld_size;
3015
3016	if (!capable(CAP_SYS_ADMIN))
3017		return -EACCES;
3018
3019	len = snprintf(fname, 99, "%s", buf);
3020	fname[len-1] = '\0';
3021
3022	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3023		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3024		return -EIO;
3025	}
3026
3027	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3028
3029	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3030	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3031	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3032		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3033		release_firmware(fw_entry);
3034		return -EINVAL;
3035	}
3036
3037	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3038	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3039	sglist = ipr_alloc_ucode_buffer(dnld_size);
3040
3041	if (!sglist) {
3042		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3043		release_firmware(fw_entry);
3044		return -ENOMEM;
3045	}
3046
3047	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3048
3049	if (result) {
3050		dev_err(&ioa_cfg->pdev->dev,
3051			"Microcode buffer copy to DMA buffer failed\n");
3052		goto out;
3053	}
3054
3055	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3056
3057	if (!result)
3058		result = count;
3059out:
3060	ipr_free_ucode_buffer(sglist);
3061	release_firmware(fw_entry);
3062	return result;
3063}
3064
3065static struct class_device_attribute ipr_update_fw_attr = {
3066	.attr = {
3067		.name =		"update_fw",
3068		.mode =		S_IWUSR,
3069	},
3070	.store = ipr_store_update_fw
3071};
3072
3073static struct class_device_attribute *ipr_ioa_attrs[] = {
3074	&ipr_fw_version_attr,
3075	&ipr_log_level_attr,
3076	&ipr_diagnostics_attr,
3077	&ipr_ioa_state_attr,
3078	&ipr_ioa_reset_attr,
3079	&ipr_update_fw_attr,
3080	&ipr_ioa_cache_attr,
3081	NULL,
3082};
3083
3084#ifdef CONFIG_SCSI_IPR_DUMP
3085/**
3086 * ipr_read_dump - Dump the adapter
3087 * @kobj:		kobject struct
3088 * @buf:		buffer
3089 * @off:		offset
3090 * @count:		buffer size
3091 *
3092 * Return value:
3093 *	number of bytes printed to buffer
3094 **/
3095static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3096			      loff_t off, size_t count)
3097{
3098	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3099	struct Scsi_Host *shost = class_to_shost(cdev);
3100	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3101	struct ipr_dump *dump;
3102	unsigned long lock_flags = 0;
3103	char *src;
3104	int len;
3105	size_t rc = count;
3106
3107	if (!capable(CAP_SYS_ADMIN))
3108		return -EACCES;
3109
3110	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3111	dump = ioa_cfg->dump;
3112
3113	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3114		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3115		return 0;
3116	}
3117	kref_get(&dump->kref);
3118	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3119
3120	if (off > dump->driver_dump.hdr.len) {
3121		kref_put(&dump->kref, ipr_release_dump);
3122		return 0;
3123	}
3124
3125	if (off + count > dump->driver_dump.hdr.len) {
3126		count = dump->driver_dump.hdr.len - off;
3127		rc = count;
3128	}
3129
3130	if (count && off < sizeof(dump->driver_dump)) {
3131		if (off + count > sizeof(dump->driver_dump))
3132			len = sizeof(dump->driver_dump) - off;
3133		else
3134			len = count;
3135		src = (u8 *)&dump->driver_dump + off;
3136		memcpy(buf, src, len);
3137		buf += len;
3138		off += len;
3139		count -= len;
3140	}
3141
3142	off -= sizeof(dump->driver_dump);
3143
3144	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3145		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3146			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3147		else
3148			len = count;
3149		src = (u8 *)&dump->ioa_dump + off;
3150		memcpy(buf, src, len);
3151		buf += len;
3152		off += len;
3153		count -= len;
3154	}
3155
3156	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3157
3158	while (count) {
3159		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3160			len = PAGE_ALIGN(off) - off;
3161		else
3162			len = count;
3163		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3164		src += off & ~PAGE_MASK;
3165		memcpy(buf, src, len);
3166		buf += len;
3167		off += len;
3168		count -= len;
3169	}
3170
3171	kref_put(&dump->kref, ipr_release_dump);
3172	return rc;
3173}
3174
3175/**
3176 * ipr_alloc_dump - Prepare for adapter dump
3177 * @ioa_cfg:	ioa config struct
3178 *
3179 * Return value:
3180 *	0 on success / other on failure
3181 **/
3182static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3183{
3184	struct ipr_dump *dump;
3185	unsigned long lock_flags = 0;
3186
3187	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3188
3189	if (!dump) {
3190		ipr_err("Dump memory allocation failed\n");
3191		return -ENOMEM;
3192	}
3193
3194	kref_init(&dump->kref);
3195	dump->ioa_cfg = ioa_cfg;
3196
3197	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3198
3199	if (INACTIVE != ioa_cfg->sdt_state) {
3200		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3201		kfree(dump);
3202		return 0;
3203	}
3204
3205	ioa_cfg->dump = dump;
3206	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3207	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3208		ioa_cfg->dump_taken = 1;
3209		schedule_work(&ioa_cfg->work_q);
3210	}
3211	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3212
3213	return 0;
3214}
3215
3216/**
3217 * ipr_free_dump - Free adapter dump memory
3218 * @ioa_cfg:	ioa config struct
3219 *
3220 * Return value:
3221 *	0 on success / other on failure
3222 **/
3223static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3224{
3225	struct ipr_dump *dump;
3226	unsigned long lock_flags = 0;
3227
3228	ENTER;
3229
3230	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3231	dump = ioa_cfg->dump;
3232	if (!dump) {
3233		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234		return 0;
3235	}
3236
3237	ioa_cfg->dump = NULL;
3238	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3239
3240	kref_put(&dump->kref, ipr_release_dump);
3241
3242	LEAVE;
3243	return 0;
3244}
3245
3246/**
3247 * ipr_write_dump - Setup dump state of adapter
3248 * @kobj:		kobject struct
3249 * @buf:		buffer
3250 * @off:		offset
3251 * @count:		buffer size
3252 *
3253 * Return value:
3254 *	number of bytes printed to buffer
3255 **/
3256static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3257			      loff_t off, size_t count)
3258{
3259	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3260	struct Scsi_Host *shost = class_to_shost(cdev);
3261	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3262	int rc;
3263
3264	if (!capable(CAP_SYS_ADMIN))
3265		return -EACCES;
3266
3267	if (buf[0] == '1')
3268		rc = ipr_alloc_dump(ioa_cfg);
3269	else if (buf[0] == '0')
3270		rc = ipr_free_dump(ioa_cfg);
3271	else
3272		return -EINVAL;
3273
3274	if (rc)
3275		return rc;
3276	else
3277		return count;
3278}
3279
3280static struct bin_attribute ipr_dump_attr = {
3281	.attr =	{
3282		.name = "dump",
3283		.mode = S_IRUSR | S_IWUSR,
3284	},
3285	.size = 0,
3286	.read = ipr_read_dump,
3287	.write = ipr_write_dump
3288};
3289#else
3290static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3291#endif
3292
3293/**
3294 * ipr_change_queue_depth - Change the device's queue depth
3295 * @sdev:	scsi device struct
3296 * @qdepth:	depth to set
3297 *
3298 * Return value:
3299 * 	actual depth set
3300 **/
3301static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3302{
3303	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3304	struct ipr_resource_entry *res;
3305	unsigned long lock_flags = 0;
3306
3307	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3308	res = (struct ipr_resource_entry *)sdev->hostdata;
3309
3310	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3311		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3312	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313
3314	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3315	return sdev->queue_depth;
3316}
3317
3318/**
3319 * ipr_change_queue_type - Change the device's queue type
3320 * @dsev:		scsi device struct
3321 * @tag_type:	type of tags to use
3322 *
3323 * Return value:
3324 * 	actual queue type set
3325 **/
3326static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3327{
3328	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3329	struct ipr_resource_entry *res;
3330	unsigned long lock_flags = 0;
3331
3332	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3333	res = (struct ipr_resource_entry *)sdev->hostdata;
3334
3335	if (res) {
3336		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3337			/*
3338			 * We don't bother quiescing the device here since the
3339			 * adapter firmware does it for us.
3340			 */
3341			scsi_set_tag_type(sdev, tag_type);
3342
3343			if (tag_type)
3344				scsi_activate_tcq(sdev, sdev->queue_depth);
3345			else
3346				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3347		} else
3348			tag_type = 0;
3349	} else
3350		tag_type = 0;
3351
3352	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353	return tag_type;
3354}
3355
3356/**
3357 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3358 * @dev:	device struct
3359 * @buf:	buffer
3360 *
3361 * Return value:
3362 * 	number of bytes printed to buffer
3363 **/
3364static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3365{
3366	struct scsi_device *sdev = to_scsi_device(dev);
3367	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3368	struct ipr_resource_entry *res;
3369	unsigned long lock_flags = 0;
3370	ssize_t len = -ENXIO;
3371
3372	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3373	res = (struct ipr_resource_entry *)sdev->hostdata;
3374	if (res)
3375		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3376	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377	return len;
3378}
3379
3380static struct device_attribute ipr_adapter_handle_attr = {
3381	.attr = {
3382		.name = 	"adapter_handle",
3383		.mode =		S_IRUSR,
3384	},
3385	.show = ipr_show_adapter_handle
3386};
3387
3388static struct device_attribute *ipr_dev_attrs[] = {
3389	&ipr_adapter_handle_attr,
3390	NULL,
3391};
3392
3393/**
3394 * ipr_biosparam - Return the HSC mapping
3395 * @sdev:			scsi device struct
3396 * @block_device:	block device pointer
3397 * @capacity:		capacity of the device
3398 * @parm:			Array containing returned HSC values.
3399 *
3400 * This function generates the HSC parms that fdisk uses.
3401 * We want to make sure we return something that places partitions
3402 * on 4k boundaries for best performance with the IOA.
3403 *
3404 * Return value:
3405 * 	0 on success
3406 **/
3407static int ipr_biosparam(struct scsi_device *sdev,
3408			 struct block_device *block_device,
3409			 sector_t capacity, int *parm)
3410{
3411	int heads, sectors;
3412	sector_t cylinders;
3413
3414	heads = 128;
3415	sectors = 32;
3416
3417	cylinders = capacity;
3418	sector_div(cylinders, (128 * 32));
3419
3420	/* return result */
3421	parm[0] = heads;
3422	parm[1] = sectors;
3423	parm[2] = cylinders;
3424
3425	return 0;
3426}
3427
3428/**
3429 * ipr_find_starget - Find target based on bus/target.
3430 * @starget:	scsi target struct
3431 *
3432 * Return value:
3433 * 	resource entry pointer if found / NULL if not found
3434 **/
3435static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3436{
3437	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3438	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3439	struct ipr_resource_entry *res;
3440
3441	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3442		if ((res->cfgte.res_addr.bus == starget->channel) &&
3443		    (res->cfgte.res_addr.target == starget->id) &&
3444		    (res->cfgte.res_addr.lun == 0)) {
3445			return res;
3446		}
3447	}
3448
3449	return NULL;
3450}
3451
3452static struct ata_port_info sata_port_info;
3453
3454/**
3455 * ipr_target_alloc - Prepare for commands to a SCSI target
3456 * @starget:	scsi target struct
3457 *
3458 * If the device is a SATA device, this function allocates an
3459 * ATA port with libata, else it does nothing.
3460 *
3461 * Return value:
3462 * 	0 on success / non-0 on failure
3463 **/
3464static int ipr_target_alloc(struct scsi_target *starget)
3465{
3466	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3467	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3468	struct ipr_sata_port *sata_port;
3469	struct ata_port *ap;
3470	struct ipr_resource_entry *res;
3471	unsigned long lock_flags;
3472
3473	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3474	res = ipr_find_starget(starget);
3475	starget->hostdata = NULL;
3476
3477	if (res && ipr_is_gata(res)) {
3478		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3479		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3480		if (!sata_port)
3481			return -ENOMEM;
3482
3483		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3484		if (ap) {
3485			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3486			sata_port->ioa_cfg = ioa_cfg;
3487			sata_port->ap = ap;
3488			sata_port->res = res;
3489
3490			res->sata_port = sata_port;
3491			ap->private_data = sata_port;
3492			starget->hostdata = sata_port;
3493		} else {
3494			kfree(sata_port);
3495			return -ENOMEM;
3496		}
3497	}
3498	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3499
3500	return 0;
3501}
3502
3503/**
3504 * ipr_target_destroy - Destroy a SCSI target
3505 * @starget:	scsi target struct
3506 *
3507 * If the device was a SATA device, this function frees the libata
3508 * ATA port, else it does nothing.
3509 *
3510 **/
3511static void ipr_target_destroy(struct scsi_target *starget)
3512{
3513	struct ipr_sata_port *sata_port = starget->hostdata;
3514
3515	if (sata_port) {
3516		starget->hostdata = NULL;
3517		ata_sas_port_destroy(sata_port->ap);
3518		kfree(sata_port);
3519	}
3520}
3521
3522/**
3523 * ipr_find_sdev - Find device based on bus/target/lun.
3524 * @sdev:	scsi device struct
3525 *
3526 * Return value:
3527 * 	resource entry pointer if found / NULL if not found
3528 **/
3529static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3530{
3531	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3532	struct ipr_resource_entry *res;
3533
3534	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3535		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3536		    (res->cfgte.res_addr.target == sdev->id) &&
3537		    (res->cfgte.res_addr.lun == sdev->lun))
3538			return res;
3539	}
3540
3541	return NULL;
3542}
3543
3544/**
3545 * ipr_slave_destroy - Unconfigure a SCSI device
3546 * @sdev:	scsi device struct
3547 *
3548 * Return value:
3549 * 	nothing
3550 **/
3551static void ipr_slave_destroy(struct scsi_device *sdev)
3552{
3553	struct ipr_resource_entry *res;
3554	struct ipr_ioa_cfg *ioa_cfg;
3555	unsigned long lock_flags = 0;
3556
3557	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3558
3559	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3560	res = (struct ipr_resource_entry *) sdev->hostdata;
3561	if (res) {
3562		if (res->sata_port)
3563			ata_port_disable(res->sata_port->ap);
3564		sdev->hostdata = NULL;
3565		res->sdev = NULL;
3566		res->sata_port = NULL;
3567	}
3568	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3569}
3570
3571/**
3572 * ipr_slave_configure - Configure a SCSI device
3573 * @sdev:	scsi device struct
3574 *
3575 * This function configures the specified scsi device.
3576 *
3577 * Return value:
3578 * 	0 on success
3579 **/
3580static int ipr_slave_configure(struct scsi_device *sdev)
3581{
3582	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3583	struct ipr_resource_entry *res;
3584	unsigned long lock_flags = 0;
3585
3586	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3587	res = sdev->hostdata;
3588	if (res) {
3589		if (ipr_is_af_dasd_device(res))
3590			sdev->type = TYPE_RAID;
3591		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3592			sdev->scsi_level = 4;
3593			sdev->no_uld_attach = 1;
3594		}
3595		if (ipr_is_vset_device(res)) {
3596			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3597			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3598		}
3599		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3600			sdev->allow_restart = 1;
3601		if (ipr_is_gata(res) && res->sata_port) {
3602			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3603			ata_sas_slave_configure(sdev, res->sata_port->ap);
3604		} else {
3605			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3606		}
3607	}
3608	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3609	return 0;
3610}
3611
3612/**
3613 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3614 * @sdev:	scsi device struct
3615 *
3616 * This function initializes an ATA port so that future commands
3617 * sent through queuecommand will work.
3618 *
3619 * Return value:
3620 * 	0 on success
3621 **/
3622static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3623{
3624	struct ipr_sata_port *sata_port = NULL;
3625	int rc = -ENXIO;
3626
3627	ENTER;
3628	if (sdev->sdev_target)
3629		sata_port = sdev->sdev_target->hostdata;
3630	if (sata_port)
3631		rc = ata_sas_port_init(sata_port->ap);
3632	if (rc)
3633		ipr_slave_destroy(sdev);
3634
3635	LEAVE;
3636	return rc;
3637}
3638
3639/**
3640 * ipr_slave_alloc - Prepare for commands to a device.
3641 * @sdev:	scsi device struct
3642 *
3643 * This function saves a pointer to the resource entry
3644 * in the scsi device struct if the device exists. We
3645 * can then use this pointer in ipr_queuecommand when
3646 * handling new commands.
3647 *
3648 * Return value:
3649 * 	0 on success / -ENXIO if device does not exist
3650 **/
3651static int ipr_slave_alloc(struct scsi_device *sdev)
3652{
3653	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3654	struct ipr_resource_entry *res;
3655	unsigned long lock_flags;
3656	int rc = -ENXIO;
3657
3658	sdev->hostdata = NULL;
3659
3660	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3661
3662	res = ipr_find_sdev(sdev);
3663	if (res) {
3664		res->sdev = sdev;
3665		res->add_to_ml = 0;
3666		res->in_erp = 0;
3667		sdev->hostdata = res;
3668		if (!ipr_is_naca_model(res))
3669			res->needs_sync_complete = 1;
3670		rc = 0;
3671		if (ipr_is_gata(res)) {
3672			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3673			return ipr_ata_slave_alloc(sdev);
3674		}
3675	}
3676
3677	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3678
3679	return rc;
3680}
3681
3682/**
3683 * ipr_eh_host_reset - Reset the host adapter
3684 * @scsi_cmd:	scsi command struct
3685 *
3686 * Return value:
3687 * 	SUCCESS / FAILED
3688 **/
3689static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3690{
3691	struct ipr_ioa_cfg *ioa_cfg;
3692	int rc;
3693
3694	ENTER;
3695	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3696
3697	dev_err(&ioa_cfg->pdev->dev,
3698		"Adapter being reset as a result of error recovery.\n");
3699
3700	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3701		ioa_cfg->sdt_state = GET_DUMP;
3702
3703	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3704
3705	LEAVE;
3706	return rc;
3707}
3708
3709static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3710{
3711	int rc;
3712
3713	spin_lock_irq(cmd->device->host->host_lock);
3714	rc = __ipr_eh_host_reset(cmd);
3715	spin_unlock_irq(cmd->device->host->host_lock);
3716
3717	return rc;
3718}
3719
3720/**
3721 * ipr_device_reset - Reset the device
3722 * @ioa_cfg:	ioa config struct
3723 * @res:		resource entry struct
3724 *
3725 * This function issues a device reset to the affected device.
3726 * If the device is a SCSI device, a LUN reset will be sent
3727 * to the device first. If that does not work, a target reset
3728 * will be sent. If the device is a SATA device, a PHY reset will
3729 * be sent.
3730 *
3731 * Return value:
3732 *	0 on success / non-zero on failure
3733 **/
3734static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3735			    struct ipr_resource_entry *res)
3736{
3737	struct ipr_cmnd *ipr_cmd;
3738	struct ipr_ioarcb *ioarcb;
3739	struct ipr_cmd_pkt *cmd_pkt;
3740	struct ipr_ioarcb_ata_regs *regs;
3741	u32 ioasc;
3742
3743	ENTER;
3744	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3745	ioarcb = &ipr_cmd->ioarcb;
3746	cmd_pkt = &ioarcb->cmd_pkt;
3747	regs = &ioarcb->add_data.u.regs;
3748
3749	ioarcb->res_handle = res->cfgte.res_handle;
3750	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3751	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3752	if (ipr_is_gata(res)) {
3753		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3754		ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3755		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3756	}
3757
3758	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3759	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3760	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3761	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3762		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3763		       sizeof(struct ipr_ioasa_gata));
3764
3765	LEAVE;
3766	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3767}
3768
3769/**
3770 * ipr_sata_reset - Reset the SATA port
3771 * @ap:		SATA port to reset
3772 * @classes:	class of the attached device
3773 *
3774 * This function issues a SATA phy reset to the affected ATA port.
3775 *
3776 * Return value:
3777 *	0 on success / non-zero on failure
3778 **/
3779static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3780{
3781	struct ipr_sata_port *sata_port = ap->private_data;
3782	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3783	struct ipr_resource_entry *res;
3784	unsigned long lock_flags = 0;
3785	int rc = -ENXIO;
3786
3787	ENTER;
3788	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3789	while(ioa_cfg->in_reset_reload) {
3790		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3791		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3792		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3793	}
3794
3795	res = sata_port->res;
3796	if (res) {
3797		rc = ipr_device_reset(ioa_cfg, res);
3798		switch(res->cfgte.proto) {
3799		case IPR_PROTO_SATA:
3800		case IPR_PROTO_SAS_STP:
3801			*classes = ATA_DEV_ATA;
3802			break;
3803		case IPR_PROTO_SATA_ATAPI:
3804		case IPR_PROTO_SAS_STP_ATAPI:
3805			*classes = ATA_DEV_ATAPI;
3806			break;
3807		default:
3808			*classes = ATA_DEV_UNKNOWN;
3809			break;
3810		};
3811	}
3812
3813	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3814	LEAVE;
3815	return rc;
3816}
3817
3818/**
3819 * ipr_eh_dev_reset - Reset the device
3820 * @scsi_cmd:	scsi command struct
3821 *
3822 * This function issues a device reset to the affected device.
3823 * A LUN reset will be sent to the device first. If that does
3824 * not work, a target reset will be sent.
3825 *
3826 * Return value:
3827 *	SUCCESS / FAILED
3828 **/
3829static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3830{
3831	struct ipr_cmnd *ipr_cmd;
3832	struct ipr_ioa_cfg *ioa_cfg;
3833	struct ipr_resource_entry *res;
3834	struct ata_port *ap;
3835	int rc = 0;
3836
3837	ENTER;
3838	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3839	res = scsi_cmd->device->hostdata;
3840
3841	if (!res)
3842		return FAILED;
3843
3844	/*
3845	 * If we are currently going through reset/reload, return failed. This will force the
3846	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3847	 * reset to complete
3848	 */
3849	if (ioa_cfg->in_reset_reload)
3850		return FAILED;
3851	if (ioa_cfg->ioa_is_dead)
3852		return FAILED;
3853
3854	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3855		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3856			if (ipr_cmd->scsi_cmd)
3857				ipr_cmd->done = ipr_scsi_eh_done;
3858			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3859				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3860				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3861			}
3862		}
3863	}
3864
3865	res->resetting_device = 1;
3866	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3867
3868	if (ipr_is_gata(res) && res->sata_port) {
3869		ap = res->sata_port->ap;
3870		spin_unlock_irq(scsi_cmd->device->host->host_lock);
3871		ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3872		spin_lock_irq(scsi_cmd->device->host->host_lock);
3873	} else
3874		rc = ipr_device_reset(ioa_cfg, res);
3875	res->resetting_device = 0;
3876
3877	LEAVE;
3878	return (rc ? FAILED : SUCCESS);
3879}
3880
3881static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3882{
3883	int rc;
3884
3885	spin_lock_irq(cmd->device->host->host_lock);
3886	rc = __ipr_eh_dev_reset(cmd);
3887	spin_unlock_irq(cmd->device->host->host_lock);
3888
3889	return rc;
3890}
3891
3892/**
3893 * ipr_bus_reset_done - Op done function for bus reset.
3894 * @ipr_cmd:	ipr command struct
3895 *
3896 * This function is the op done function for a bus reset
3897 *
3898 * Return value:
3899 * 	none
3900 **/
3901static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3902{
3903	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3904	struct ipr_resource_entry *res;
3905
3906	ENTER;
3907	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3908		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3909			    sizeof(res->cfgte.res_handle))) {
3910			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3911			break;
3912		}
3913	}
3914
3915	/*
3916	 * If abort has not completed, indicate the reset has, else call the
3917	 * abort's done function to wake the sleeping eh thread
3918	 */
3919	if (ipr_cmd->sibling->sibling)
3920		ipr_cmd->sibling->sibling = NULL;
3921	else
3922		ipr_cmd->sibling->done(ipr_cmd->sibling);
3923
3924	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3925	LEAVE;
3926}
3927
3928/**
3929 * ipr_abort_timeout - An abort task has timed out
3930 * @ipr_cmd:	ipr command struct
3931 *
3932 * This function handles when an abort task times out. If this
3933 * happens we issue a bus reset since we have resources tied
3934 * up that must be freed before returning to the midlayer.
3935 *
3936 * Return value:
3937 *	none
3938 **/
3939static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3940{
3941	struct ipr_cmnd *reset_cmd;
3942	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3943	struct ipr_cmd_pkt *cmd_pkt;
3944	unsigned long lock_flags = 0;
3945
3946	ENTER;
3947	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3948	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3949		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950		return;
3951	}
3952
3953	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3954	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3955	ipr_cmd->sibling = reset_cmd;
3956	reset_cmd->sibling = ipr_cmd;
3957	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3958	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3959	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3960	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3961	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3962
3963	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3964	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965	LEAVE;
3966}
3967
3968/**
3969 * ipr_cancel_op - Cancel specified op
3970 * @scsi_cmd:	scsi command struct
3971 *
3972 * This function cancels specified op.
3973 *
3974 * Return value:
3975 *	SUCCESS / FAILED
3976 **/
3977static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3978{
3979	struct ipr_cmnd *ipr_cmd;
3980	struct ipr_ioa_cfg *ioa_cfg;
3981	struct ipr_resource_entry *res;
3982	struct ipr_cmd_pkt *cmd_pkt;
3983	u32 ioasc;
3984	int op_found = 0;
3985
3986	ENTER;
3987	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3988	res = scsi_cmd->device->hostdata;
3989
3990	/* If we are currently going through reset/reload, return failed.
3991	 * This will force the mid-layer to call ipr_eh_host_reset,
3992	 * which will then go to sleep and wait for the reset to complete
3993	 */
3994	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3995		return FAILED;
3996	if (!res || !ipr_is_gscsi(res))
3997		return FAILED;
3998
3999	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4000		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4001			ipr_cmd->done = ipr_scsi_eh_done;
4002			op_found = 1;
4003			break;
4004		}
4005	}
4006
4007	if (!op_found)
4008		return SUCCESS;
4009
4010	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4011	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4012	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4013	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4014	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4015	ipr_cmd->u.sdev = scsi_cmd->device;
4016
4017	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4018		    scsi_cmd->cmnd[0]);
4019	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4020	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4021
4022	/*
4023	 * If the abort task timed out and we sent a bus reset, we will get
4024	 * one the following responses to the abort
4025	 */
4026	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4027		ioasc = 0;
4028		ipr_trace;
4029	}
4030
4031	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4032	if (!ipr_is_naca_model(res))
4033		res->needs_sync_complete = 1;
4034
4035	LEAVE;
4036	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4037}
4038
4039/**
4040 * ipr_eh_abort - Abort a single op
4041 * @scsi_cmd:	scsi command struct
4042 *
4043 * Return value:
4044 * 	SUCCESS / FAILED
4045 **/
4046static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4047{
4048	unsigned long flags;
4049	int rc;
4050
4051	ENTER;
4052
4053	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4054	rc = ipr_cancel_op(scsi_cmd);
4055	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4056
4057	LEAVE;
4058	return rc;
4059}
4060
4061/**
4062 * ipr_handle_other_interrupt - Handle "other" interrupts
4063 * @ioa_cfg:	ioa config struct
4064 * @int_reg:	interrupt register
4065 *
4066 * Return value:
4067 * 	IRQ_NONE / IRQ_HANDLED
4068 **/
4069static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4070					      volatile u32 int_reg)
4071{
4072	irqreturn_t rc = IRQ_HANDLED;
4073
4074	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4075		/* Mask the interrupt */
4076		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4077
4078		/* Clear the interrupt */
4079		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4080		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4081
4082		list_del(&ioa_cfg->reset_cmd->queue);
4083		del_timer(&ioa_cfg->reset_cmd->timer);
4084		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4085	} else {
4086		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4087			ioa_cfg->ioa_unit_checked = 1;
4088		else
4089			dev_err(&ioa_cfg->pdev->dev,
4090				"Permanent IOA failure. 0x%08X\n", int_reg);
4091
4092		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4093			ioa_cfg->sdt_state = GET_DUMP;
4094
4095		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4096		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4097	}
4098
4099	return rc;
4100}
4101
4102/**
4103 * ipr_isr - Interrupt service routine
4104 * @irq:	irq number
4105 * @devp:	pointer to ioa config struct
4106 *
4107 * Return value:
4108 * 	IRQ_NONE / IRQ_HANDLED
4109 **/
4110static irqreturn_t ipr_isr(int irq, void *devp)
4111{
4112	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4113	unsigned long lock_flags = 0;
4114	volatile u32 int_reg, int_mask_reg;
4115	u32 ioasc;
4116	u16 cmd_index;
4117	struct ipr_cmnd *ipr_cmd;
4118	irqreturn_t rc = IRQ_NONE;
4119
4120	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4121
4122	/* If interrupts are disabled, ignore the interrupt */
4123	if (!ioa_cfg->allow_interrupts) {
4124		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4125		return IRQ_NONE;
4126	}
4127
4128	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4129	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4130
4131	/* If an interrupt on the adapter did not occur, ignore it */
4132	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4133		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4134		return IRQ_NONE;
4135	}
4136
4137	while (1) {
4138		ipr_cmd = NULL;
4139
4140		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4141		       ioa_cfg->toggle_bit) {
4142
4143			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4144				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4145
4146			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4147				ioa_cfg->errors_logged++;
4148				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4149
4150				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4151					ioa_cfg->sdt_state = GET_DUMP;
4152
4153				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4154				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4155				return IRQ_HANDLED;
4156			}
4157
4158			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4159
4160			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4161
4162			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4163
4164			list_del(&ipr_cmd->queue);
4165			del_timer(&ipr_cmd->timer);
4166			ipr_cmd->done(ipr_cmd);
4167
4168			rc = IRQ_HANDLED;
4169
4170			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4171				ioa_cfg->hrrq_curr++;
4172			} else {
4173				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4174				ioa_cfg->toggle_bit ^= 1u;
4175			}
4176		}
4177
4178		if (ipr_cmd != NULL) {
4179			/* Clear the PCI interrupt */
4180			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4181			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4182		} else
4183			break;
4184	}
4185
4186	if (unlikely(rc == IRQ_NONE))
4187		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4188
4189	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4190	return rc;
4191}
4192
4193/**
4194 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4195 * @ioa_cfg:	ioa config struct
4196 * @ipr_cmd:	ipr command struct
4197 *
4198 * Return value:
4199 * 	0 on success / -1 on failure
4200 **/
4201static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4202			   struct ipr_cmnd *ipr_cmd)
4203{
4204	int i;
4205	struct scatterlist *sglist;
4206	u32 length;
4207	u32 ioadl_flags = 0;
4208	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4209	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4210	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4211
4212	length = scsi_cmd->request_bufflen;
4213
4214	if (length == 0)
4215		return 0;
4216
4217	if (scsi_cmd->use_sg) {
4218		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4219						 scsi_cmd->request_buffer,
4220						 scsi_cmd->use_sg,
4221						 scsi_cmd->sc_data_direction);
4222
4223		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4224			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4225			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4226			ioarcb->write_data_transfer_length = cpu_to_be32(length);
4227			ioarcb->write_ioadl_len =
4228				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4229		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4230			ioadl_flags = IPR_IOADL_FLAGS_READ;
4231			ioarcb->read_data_transfer_length = cpu_to_be32(length);
4232			ioarcb->read_ioadl_len =
4233				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4234		}
4235
4236		sglist = scsi_cmd->request_buffer;
4237
4238		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4239			ioadl[i].flags_and_data_len =
4240				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4241			ioadl[i].address =
4242				cpu_to_be32(sg_dma_address(&sglist[i]));
4243		}
4244
4245		if (likely(ipr_cmd->dma_use_sg)) {
4246			ioadl[i-1].flags_and_data_len |=
4247				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4248			return 0;
4249		} else
4250			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4251	} else {
4252		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4253			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4254			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4255			ioarcb->write_data_transfer_length = cpu_to_be32(length);
4256			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4257		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4258			ioadl_flags = IPR_IOADL_FLAGS_READ;
4259			ioarcb->read_data_transfer_length = cpu_to_be32(length);
4260			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4261		}
4262
4263		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4264						     scsi_cmd->request_buffer, length,
4265						     scsi_cmd->sc_data_direction);
4266
4267		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4268			ipr_cmd->dma_use_sg = 1;
4269			ioadl[0].flags_and_data_len =
4270				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4271			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4272			return 0;
4273		} else
4274			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4275	}
4276
4277	return -1;
4278}
4279
4280/**
4281 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4282 * @scsi_cmd:	scsi command struct
4283 *
4284 * Return value:
4285 * 	task attributes
4286 **/
4287static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4288{
4289	u8 tag[2];
4290	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4291
4292	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4293		switch (tag[0]) {
4294		case MSG_SIMPLE_TAG:
4295			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4296			break;
4297		case MSG_HEAD_TAG:
4298			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4299			break;
4300		case MSG_ORDERED_TAG:
4301			rc = IPR_FLAGS_LO_ORDERED_TASK;
4302			break;
4303		};
4304	}
4305
4306	return rc;
4307}
4308
4309/**
4310 * ipr_erp_done - Process completion of ERP for a device
4311 * @ipr_cmd:		ipr command struct
4312 *
4313 * This function copies the sense buffer into the scsi_cmd
4314 * struct and pushes the scsi_done function.
4315 *
4316 * Return value:
4317 * 	nothing
4318 **/
4319static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4320{
4321	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4322	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4323	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4324	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4325
4326	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4327		scsi_cmd->result |= (DID_ERROR << 16);
4328		scmd_printk(KERN_ERR, scsi_cmd,
4329			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4330	} else {
4331		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4332		       SCSI_SENSE_BUFFERSIZE);
4333	}
4334
4335	if (res) {
4336		if (!ipr_is_naca_model(res))
4337			res->needs_sync_complete = 1;
4338		res->in_erp = 0;
4339	}
4340	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4341	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4342	scsi_cmd->scsi_done(scsi_cmd);
4343}
4344
4345/**
4346 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4347 * @ipr_cmd:	ipr command struct
4348 *
4349 * Return value:
4350 * 	none
4351 **/
4352static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4353{
4354	struct ipr_ioarcb *ioarcb;
4355	struct ipr_ioasa *ioasa;
4356
4357	ioarcb = &ipr_cmd->ioarcb;
4358	ioasa = &ipr_cmd->ioasa;
4359
4360	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4361	ioarcb->write_data_transfer_length = 0;
4362	ioarcb->read_data_transfer_length = 0;
4363	ioarcb->write_ioadl_len = 0;
4364	ioarcb->read_ioadl_len = 0;
4365	ioasa->ioasc = 0;
4366	ioasa->residual_data_len = 0;
4367}
4368
4369/**
4370 * ipr_erp_request_sense - Send request sense to a device
4371 * @ipr_cmd:	ipr command struct
4372 *
4373 * This function sends a request sense to a device as a result
4374 * of a check condition.
4375 *
4376 * Return value:
4377 * 	nothing
4378 **/
4379static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4380{
4381	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4382	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4383
4384	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4385		ipr_erp_done(ipr_cmd);
4386		return;
4387	}
4388
4389	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4390
4391	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4392	cmd_pkt->cdb[0] = REQUEST_SENSE;
4393	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4394	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4395	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4396	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4397
4398	ipr_cmd->ioadl[0].flags_and_data_len =
4399		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4400	ipr_cmd->ioadl[0].address =
4401		cpu_to_be32(ipr_cmd->sense_buffer_dma);
4402
4403	ipr_cmd->ioarcb.read_ioadl_len =
4404		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4405	ipr_cmd->ioarcb.read_data_transfer_length =
4406		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4407
4408	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4409		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4410}
4411
4412/**
4413 * ipr_erp_cancel_all - Send cancel all to a device
4414 * @ipr_cmd:	ipr command struct
4415 *
4416 * This function sends a cancel all to a device to clear the
4417 * queue. If we are running TCQ on the device, QERR is set to 1,
4418 * which means all outstanding ops have been dropped on the floor.
4419 * Cancel all will return them to us.
4420 *
4421 * Return value:
4422 * 	nothing
4423 **/
4424static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4425{
4426	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4427	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4428	struct ipr_cmd_pkt *cmd_pkt;
4429
4430	res->in_erp = 1;
4431
4432	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4433
4434	if (!scsi_get_tag_type(scsi_cmd->device)) {
4435		ipr_erp_request_sense(ipr_cmd);
4436		return;
4437	}
4438
4439	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4440	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4441	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4442
4443	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4444		   IPR_CANCEL_ALL_TIMEOUT);
4445}
4446
4447/**
4448 * ipr_dump_ioasa - Dump contents of IOASA
4449 * @ioa_cfg:	ioa config struct
4450 * @ipr_cmd:	ipr command struct
4451 * @res:		resource entry struct
4452 *
4453 * This function is invoked by the interrupt handler when ops
4454 * fail. It will log the IOASA if appropriate. Only called
4455 * for GPDD ops.
4456 *
4457 * Return value:
4458 * 	none
4459 **/
4460static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4461			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4462{
4463	int i;
4464	u16 data_len;
4465	u32 ioasc;
4466	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4467	__be32 *ioasa_data = (__be32 *)ioasa;
4468	int error_index;
4469
4470	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4471
4472	if (0 == ioasc)
4473		return;
4474
4475	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4476		return;
4477
4478	error_index = ipr_get_error(ioasc);
4479
4480	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4481		/* Don't log an error if the IOA already logged one */
4482		if (ioasa->ilid != 0)
4483			return;
4484
4485		if (ipr_error_table[error_index].log_ioasa == 0)
4486			return;
4487	}
4488
4489	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4490
4491	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4492		data_len = sizeof(struct ipr_ioasa);
4493	else
4494		data_len = be16_to_cpu(ioasa->ret_stat_len);
4495
4496	ipr_err("IOASA Dump:\n");
4497
4498	for (i = 0; i < data_len / 4; i += 4) {
4499		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4500			be32_to_cpu(ioasa_data[i]),
4501			be32_to_cpu(ioasa_data[i+1]),
4502			be32_to_cpu(ioasa_data[i+2]),
4503			be32_to_cpu(ioasa_data[i+3]));
4504	}
4505}
4506
4507/**
4508 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4509 * @ioasa:		IOASA
4510 * @sense_buf:	sense data buffer
4511 *
4512 * Return value:
4513 * 	none
4514 **/
4515static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4516{
4517	u32 failing_lba;
4518	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4519	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4520	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4521	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4522
4523	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4524
4525	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4526		return;
4527
4528	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4529
4530	if (ipr_is_vset_device(res) &&
4531	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4532	    ioasa->u.vset.failing_lba_hi != 0) {
4533		sense_buf[0] = 0x72;
4534		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4535		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4536		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4537
4538		sense_buf[7] = 12;
4539		sense_buf[8] = 0;
4540		sense_buf[9] = 0x0A;
4541		sense_buf[10] = 0x80;
4542
4543		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4544
4545		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4546		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4547		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4548		sense_buf[15] = failing_lba & 0x000000ff;
4549
4550		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4551
4552		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4553		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4554		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4555		sense_buf[19] = failing_lba & 0x000000ff;
4556	} else {
4557		sense_buf[0] = 0x70;
4558		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4559		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4560		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4561
4562		/* Illegal request */
4563		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4564		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4565			sense_buf[7] = 10;	/* additional length */
4566
4567			/* IOARCB was in error */
4568			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4569				sense_buf[15] = 0xC0;
4570			else	/* Parameter data was invalid */
4571				sense_buf[15] = 0x80;
4572
4573			sense_buf[16] =
4574			    ((IPR_FIELD_POINTER_MASK &
4575			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4576			sense_buf[17] =
4577			    (IPR_FIELD_POINTER_MASK &
4578			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4579		} else {
4580			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4581				if (ipr_is_vset_device(res))
4582					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4583				else
4584					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4585
4586				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4587				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4588				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4589				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4590				sense_buf[6] = failing_lba & 0x000000ff;
4591			}
4592
4593			sense_buf[7] = 6;	/* additional length */
4594		}
4595	}
4596}
4597
4598/**
4599 * ipr_get_autosense - Copy autosense data to sense buffer
4600 * @ipr_cmd:	ipr command struct
4601 *
4602 * This function copies the autosense buffer to the buffer
4603 * in the scsi_cmd, if there is autosense available.
4604 *
4605 * Return value:
4606 *	1 if autosense was available / 0 if not
4607 **/
4608static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4609{
4610	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4611
4612	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4613		return 0;
4614
4615	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4616	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4617		   SCSI_SENSE_BUFFERSIZE));
4618	return 1;
4619}
4620
4621/**
4622 * ipr_erp_start - Process an error response for a SCSI op
4623 * @ioa_cfg:	ioa config struct
4624 * @ipr_cmd:	ipr command struct
4625 *
4626 * This function determines whether or not to initiate ERP
4627 * on the affected device.
4628 *
4629 * Return value:
4630 * 	nothing
4631 **/
4632static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4633			      struct ipr_cmnd *ipr_cmd)
4634{
4635	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4636	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4637	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4638
4639	if (!res) {
4640		ipr_scsi_eh_done(ipr_cmd);
4641		return;
4642	}
4643
4644	if (ipr_is_gscsi(res))
4645		ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4646	else
4647		ipr_gen_sense(ipr_cmd);
4648
4649	switch (ioasc & IPR_IOASC_IOASC_MASK) {
4650	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4651		if (ipr_is_naca_model(res))
4652			scsi_cmd->result |= (DID_ABORT << 16);
4653		else
4654			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4655		break;
4656	case IPR_IOASC_IR_RESOURCE_HANDLE:
4657	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4658		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4659		break;
4660	case IPR_IOASC_HW_SEL_TIMEOUT:
4661		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4662		if (!ipr_is_naca_model(res))
4663			res->needs_sync_complete = 1;
4664		break;
4665	case IPR_IOASC_SYNC_REQUIRED:
4666		if (!res->in_erp)
4667			res->needs_sync_complete = 1;
4668		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4669		break;
4670	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4671	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4672		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4673		break;
4674	case IPR_IOASC_BUS_WAS_RESET:
4675	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4676		/*
4677		 * Report the bus reset and ask for a retry. The device
4678		 * will give CC/UA the next command.
4679		 */
4680		if (!res->resetting_device)
4681			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4682		scsi_cmd->result |= (DID_ERROR << 16);
4683		if (!ipr_is_naca_model(res))
4684			res->needs_sync_complete = 1;
4685		break;
4686	case IPR_IOASC_HW_DEV_BUS_STATUS:
4687		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4688		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4689			if (!ipr_get_autosense(ipr_cmd)) {
4690				if (!ipr_is_naca_model(res)) {
4691					ipr_erp_cancel_all(ipr_cmd);
4692					return;
4693				}
4694			}
4695		}
4696		if (!ipr_is_naca_model(res))
4697			res->needs_sync_complete = 1;
4698		break;
4699	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4700		break;
4701	default:
4702		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4703			scsi_cmd->result |= (DID_ERROR << 16);
4704		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4705			res->needs_sync_complete = 1;
4706		break;
4707	}
4708
4709	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4710	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4711	scsi_cmd->scsi_done(scsi_cmd);
4712}
4713
4714/**
4715 * ipr_scsi_done - mid-layer done function
4716 * @ipr_cmd:	ipr command struct
4717 *
4718 * This function is invoked by the interrupt handler for
4719 * ops generated by the SCSI mid-layer
4720 *
4721 * Return value:
4722 * 	none
4723 **/
4724static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4725{
4726	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4727	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4728	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4729
4730	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4731
4732	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4733		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4734		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4735		scsi_cmd->scsi_done(scsi_cmd);
4736	} else
4737		ipr_erp_start(ioa_cfg, ipr_cmd);
4738}
4739
4740/**
4741 * ipr_queuecommand - Queue a mid-layer request
4742 * @scsi_cmd:	scsi command struct
4743 * @done:		done function
4744 *
4745 * This function queues a request generated by the mid-layer.
4746 *
4747 * Return value:
4748 *	0 on success
4749 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4750 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4751 **/
4752static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4753			    void (*done) (struct scsi_cmnd *))
4754{
4755	struct ipr_ioa_cfg *ioa_cfg;
4756	struct ipr_resource_entry *res;
4757	struct ipr_ioarcb *ioarcb;
4758	struct ipr_cmnd *ipr_cmd;
4759	int rc = 0;
4760
4761	scsi_cmd->scsi_done = done;
4762	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4763	res = scsi_cmd->device->hostdata;
4764	scsi_cmd->result = (DID_OK << 16);
4765
4766	/*
4767	 * We are currently blocking all devices due to a host reset
4768	 * We have told the host to stop giving us new requests, but
4769	 * ERP ops don't count. FIXME
4770	 */
4771	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4772		return SCSI_MLQUEUE_HOST_BUSY;
4773
4774	/*
4775	 * FIXME - Create scsi_set_host_offline interface
4776	 *  and the ioa_is_dead check can be removed
4777	 */
4778	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4779		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4780		scsi_cmd->result = (DID_NO_CONNECT << 16);
4781		scsi_cmd->scsi_done(scsi_cmd);
4782		return 0;
4783	}
4784
4785	if (ipr_is_gata(res) && res->sata_port)
4786		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4787
4788	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4789	ioarcb = &ipr_cmd->ioarcb;
4790	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4791
4792	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4793	ipr_cmd->scsi_cmd = scsi_cmd;
4794	ioarcb->res_handle = res->cfgte.res_handle;
4795	ipr_cmd->done = ipr_scsi_done;
4796	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4797
4798	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4799		if (scsi_cmd->underflow == 0)
4800			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4801
4802		if (res->needs_sync_complete) {
4803			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4804			res->needs_sync_complete = 0;
4805		}
4806
4807		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4808		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4809		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4810		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4811	}
4812
4813	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4814	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4815		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4816
4817	if (likely(rc == 0))
4818		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4819
4820	if (likely(rc == 0)) {
4821		mb();
4822		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4823		       ioa_cfg->regs.ioarrin_reg);
4824	} else {
4825		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4826		 return SCSI_MLQUEUE_HOST_BUSY;
4827	}
4828
4829	return 0;
4830}
4831
4832/**
4833 * ipr_ioctl - IOCTL handler
4834 * @sdev:	scsi device struct
4835 * @cmd:	IOCTL cmd
4836 * @arg:	IOCTL arg
4837 *
4838 * Return value:
4839 * 	0 on success / other on failure
4840 **/
4841static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4842{
4843	struct ipr_resource_entry *res;
4844
4845	res = (struct ipr_resource_entry *)sdev->hostdata;
4846	if (res && ipr_is_gata(res))
4847		return ata_scsi_ioctl(sdev, cmd, arg);
4848
4849	return -EINVAL;
4850}
4851
4852/**
4853 * ipr_info - Get information about the card/driver
4854 * @scsi_host:	scsi host struct
4855 *
4856 * Return value:
4857 * 	pointer to buffer with description string
4858 **/
4859static const char * ipr_ioa_info(struct Scsi_Host *host)
4860{
4861	static char buffer[512];
4862	struct ipr_ioa_cfg *ioa_cfg;
4863	unsigned long lock_flags = 0;
4864
4865	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4866
4867	spin_lock_irqsave(host->host_lock, lock_flags);
4868	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4869	spin_unlock_irqrestore(host->host_lock, lock_flags);
4870
4871	return buffer;
4872}
4873
4874static struct scsi_host_template driver_template = {
4875	.module = THIS_MODULE,
4876	.name = "IPR",
4877	.info = ipr_ioa_info,
4878	.ioctl = ipr_ioctl,
4879	.queuecommand = ipr_queuecommand,
4880	.eh_abort_handler = ipr_eh_abort,
4881	.eh_device_reset_handler = ipr_eh_dev_reset,
4882	.eh_host_reset_handler = ipr_eh_host_reset,
4883	.slave_alloc = ipr_slave_alloc,
4884	.slave_configure = ipr_slave_configure,
4885	.slave_destroy = ipr_slave_destroy,
4886	.target_alloc = ipr_target_alloc,
4887	.target_destroy = ipr_target_destroy,
4888	.change_queue_depth = ipr_change_queue_depth,
4889	.change_queue_type = ipr_change_queue_type,
4890	.bios_param = ipr_biosparam,
4891	.can_queue = IPR_MAX_COMMANDS,
4892	.this_id = -1,
4893	.sg_tablesize = IPR_MAX_SGLIST,
4894	.max_sectors = IPR_IOA_MAX_SECTORS,
4895	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4896	.use_clustering = ENABLE_CLUSTERING,
4897	.shost_attrs = ipr_ioa_attrs,
4898	.sdev_attrs = ipr_dev_attrs,
4899	.proc_name = IPR_NAME
4900};
4901
4902/**
4903 * ipr_ata_phy_reset - libata phy_reset handler
4904 * @ap:		ata port to reset
4905 *
4906 **/
4907static void ipr_ata_phy_reset(struct ata_port *ap)
4908{
4909	unsigned long flags;
4910	struct ipr_sata_port *sata_port = ap->private_data;
4911	struct ipr_resource_entry *res = sata_port->res;
4912	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4913	int rc;
4914
4915	ENTER;
4916	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4917	while(ioa_cfg->in_reset_reload) {
4918		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4919		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4920		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4921	}
4922
4923	if (!ioa_cfg->allow_cmds)
4924		goto out_unlock;
4925
4926	rc = ipr_device_reset(ioa_cfg, res);
4927
4928	if (rc) {
4929		ap->ops->port_disable(ap);
4930		goto out_unlock;
4931	}
4932
4933	switch(res->cfgte.proto) {
4934	case IPR_PROTO_SATA:
4935	case IPR_PROTO_SAS_STP:
4936		ap->device[0].class = ATA_DEV_ATA;
4937		break;
4938	case IPR_PROTO_SATA_ATAPI:
4939	case IPR_PROTO_SAS_STP_ATAPI:
4940		ap->device[0].class = ATA_DEV_ATAPI;
4941		break;
4942	default:
4943		ap->device[0].class = ATA_DEV_UNKNOWN;
4944		ap->ops->port_disable(ap);
4945		break;
4946	};
4947
4948out_unlock:
4949	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4950	LEAVE;
4951}
4952
4953/**
4954 * ipr_ata_post_internal - Cleanup after an internal command
4955 * @qc:	ATA queued command
4956 *
4957 * Return value:
4958 * 	none
4959 **/
4960static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4961{
4962	struct ipr_sata_port *sata_port = qc->ap->private_data;
4963	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4964	struct ipr_cmnd *ipr_cmd;
4965	unsigned long flags;
4966
4967	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4968	while(ioa_cfg->in_reset_reload) {
4969		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4970		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4971		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4972	}
4973
4974	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4975		if (ipr_cmd->qc == qc) {
4976			ipr_device_reset(ioa_cfg, sata_port->res);
4977			break;
4978		}
4979	}
4980	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4981}
4982
4983/**
4984 * ipr_tf_read - Read the current ATA taskfile for the ATA port
4985 * @ap:	ATA port
4986 * @tf:	destination ATA taskfile
4987 *
4988 * Return value:
4989 * 	none
4990 **/
4991static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
4992{
4993	struct ipr_sata_port *sata_port = ap->private_data;
4994	struct ipr_ioasa_gata *g = &sata_port->ioasa;
4995
4996	tf->feature = g->error;
4997	tf->nsect = g->nsect;
4998	tf->lbal = g->lbal;
4999	tf->lbam = g->lbam;
5000	tf->lbah = g->lbah;
5001	tf->device = g->device;
5002	tf->command = g->status;
5003	tf->hob_nsect = g->hob_nsect;
5004	tf->hob_lbal = g->hob_lbal;
5005	tf->hob_lbam = g->hob_lbam;
5006	tf->hob_lbah = g->hob_lbah;
5007	tf->ctl = g->alt_status;
5008}
5009
5010/**
5011 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5012 * @regs:	destination
5013 * @tf:	source ATA taskfile
5014 *
5015 * Return value:
5016 * 	none
5017 **/
5018static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5019			     struct ata_taskfile *tf)
5020{
5021	regs->feature = tf->feature;
5022	regs->nsect = tf->nsect;
5023	regs->lbal = tf->lbal;
5024	regs->lbam = tf->lbam;
5025	regs->lbah = tf->lbah;
5026	regs->device = tf->device;
5027	regs->command = tf->command;
5028	regs->hob_feature = tf->hob_feature;
5029	regs->hob_nsect = tf->hob_nsect;
5030	regs->hob_lbal = tf->hob_lbal;
5031	regs->hob_lbam = tf->hob_lbam;
5032	regs->hob_lbah = tf->hob_lbah;
5033	regs->ctl = tf->ctl;
5034}
5035
5036/**
5037 * ipr_sata_done - done function for SATA commands
5038 * @ipr_cmd:	ipr command struct
5039 *
5040 * This function is invoked by the interrupt handler for
5041 * ops generated by the SCSI mid-layer to SATA devices
5042 *
5043 * Return value:
5044 * 	none
5045 **/
5046static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5047{
5048	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5049	struct ata_queued_cmd *qc = ipr_cmd->qc;
5050	struct ipr_sata_port *sata_port = qc->ap->private_data;
5051	struct ipr_resource_entry *res = sata_port->res;
5052	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5053
5054	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5055	       sizeof(struct ipr_ioasa_gata));
5056	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5057
5058	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5059		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5060					 res->cfgte.res_addr.target);
5061
5062	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5063		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5064	else
5065		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5066	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5067	ata_qc_complete(qc);
5068}
5069
5070/**
5071 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5072 * @ipr_cmd:	ipr command struct
5073 * @qc:		ATA queued command
5074 *
5075 **/
5076static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5077				struct ata_queued_cmd *qc)
5078{
5079	u32 ioadl_flags = 0;
5080	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5081	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5082	int len = qc->nbytes + qc->pad_len;
5083	struct scatterlist *sg;
5084
5085	if (len == 0)
5086		return;
5087
5088	if (qc->dma_dir == DMA_TO_DEVICE) {
5089		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5090		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5091		ioarcb->write_data_transfer_length = cpu_to_be32(len);
5092		ioarcb->write_ioadl_len =
5093			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5094	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5095		ioadl_flags = IPR_IOADL_FLAGS_READ;
5096		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5097		ioarcb->read_ioadl_len =
5098			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5099	}
5100
5101	ata_for_each_sg(sg, qc) {
5102		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5103		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5104		if (ata_sg_is_last(sg, qc))
5105			ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5106		else
5107			ioadl++;
5108	}
5109}
5110
5111/**
5112 * ipr_qc_issue - Issue a SATA qc to a device
5113 * @qc:	queued command
5114 *
5115 * Return value:
5116 * 	0 if success
5117 **/
5118static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5119{
5120	struct ata_port *ap = qc->ap;
5121	struct ipr_sata_port *sata_port = ap->private_data;
5122	struct ipr_resource_entry *res = sata_port->res;
5123	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5124	struct ipr_cmnd *ipr_cmd;
5125	struct ipr_ioarcb *ioarcb;
5126	struct ipr_ioarcb_ata_regs *regs;
5127
5128	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5129		return -EIO;
5130
5131	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5132	ioarcb = &ipr_cmd->ioarcb;
5133	regs = &ioarcb->add_data.u.regs;
5134
5135	memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5136	ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5137
5138	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5139	ipr_cmd->qc = qc;
5140	ipr_cmd->done = ipr_sata_done;
5141	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5142	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5143	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5144	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5145	ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5146
5147	ipr_build_ata_ioadl(ipr_cmd, qc);
5148	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5149	ipr_copy_sata_tf(regs, &qc->tf);
5150	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5151	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5152
5153	switch (qc->tf.protocol) {
5154	case ATA_PROT_NODATA:
5155	case ATA_PROT_PIO:
5156		break;
5157
5158	case ATA_PROT_DMA:
5159		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5160		break;
5161
5162	case ATA_PROT_ATAPI:
5163	case ATA_PROT_ATAPI_NODATA:
5164		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5165		break;
5166
5167	case ATA_PROT_ATAPI_DMA:
5168		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5169		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5170		break;
5171
5172	default:
5173		WARN_ON(1);
5174		return -1;
5175	}
5176
5177	mb();
5178	writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5179	       ioa_cfg->regs.ioarrin_reg);
5180	return 0;
5181}
5182
5183/**
5184 * ipr_ata_check_status - Return last ATA status
5185 * @ap:	ATA port
5186 *
5187 * Return value:
5188 * 	ATA status
5189 **/
5190static u8 ipr_ata_check_status(struct ata_port *ap)
5191{
5192	struct ipr_sata_port *sata_port = ap->private_data;
5193	return sata_port->ioasa.status;
5194}
5195
5196/**
5197 * ipr_ata_check_altstatus - Return last ATA altstatus
5198 * @ap:	ATA port
5199 *
5200 * Return value:
5201 * 	Alt ATA status
5202 **/
5203static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5204{
5205	struct ipr_sata_port *sata_port = ap->private_data;
5206	return sata_port->ioasa.alt_status;
5207}
5208
5209static struct ata_port_operations ipr_sata_ops = {
5210	.port_disable = ata_port_disable,
5211	.check_status = ipr_ata_check_status,
5212	.check_altstatus = ipr_ata_check_altstatus,
5213	.dev_select = ata_noop_dev_select,
5214	.phy_reset = ipr_ata_phy_reset,
5215	.post_internal_cmd = ipr_ata_post_internal,
5216	.tf_read = ipr_tf_read,
5217	.qc_prep = ata_noop_qc_prep,
5218	.qc_issue = ipr_qc_issue,
5219	.port_start = ata_sas_port_start,
5220	.port_stop = ata_sas_port_stop
5221};
5222
5223static struct ata_port_info sata_port_info = {
5224	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5225	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5226	.pio_mask	= 0x10, /* pio4 */
5227	.mwdma_mask = 0x07,
5228	.udma_mask	= 0x7f, /* udma0-6 */
5229	.port_ops	= &ipr_sata_ops
5230};
5231
5232#ifdef CONFIG_PPC_PSERIES
5233static const u16 ipr_blocked_processors[] = {
5234	PV_NORTHSTAR,
5235	PV_PULSAR,
5236	PV_POWER4,
5237	PV_ICESTAR,
5238	PV_SSTAR,
5239	PV_POWER4p,
5240	PV_630,
5241	PV_630p
5242};
5243
5244/**
5245 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5246 * @ioa_cfg:	ioa cfg struct
5247 *
5248 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5249 * certain pSeries hardware. This function determines if the given
5250 * adapter is in one of these confgurations or not.
5251 *
5252 * Return value:
5253 * 	1 if adapter is not supported / 0 if adapter is supported
5254 **/
5255static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5256{
5257	u8 rev_id;
5258	int i;
5259
5260	if (ioa_cfg->type == 0x5702) {
5261		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5262					 &rev_id) == PCIBIOS_SUCCESSFUL) {
5263			if (rev_id < 4) {
5264				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5265					if (__is_processor(ipr_blocked_processors[i]))
5266						return 1;
5267				}
5268			}
5269		}
5270	}
5271	return 0;
5272}
5273#else
5274#define ipr_invalid_adapter(ioa_cfg) 0
5275#endif
5276
5277/**
5278 * ipr_ioa_bringdown_done - IOA bring down completion.
5279 * @ipr_cmd:	ipr command struct
5280 *
5281 * This function processes the completion of an adapter bring down.
5282 * It wakes any reset sleepers.
5283 *
5284 * Return value:
5285 * 	IPR_RC_JOB_RETURN
5286 **/
5287static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5288{
5289	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5290
5291	ENTER;
5292	ioa_cfg->in_reset_reload = 0;
5293	ioa_cfg->reset_retries = 0;
5294	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5295	wake_up_all(&ioa_cfg->reset_wait_q);
5296
5297	spin_unlock_irq(ioa_cfg->host->host_lock);
5298	scsi_unblock_requests(ioa_cfg->host);
5299	spin_lock_irq(ioa_cfg->host->host_lock);
5300	LEAVE;
5301
5302	return IPR_RC_JOB_RETURN;
5303}
5304
5305/**
5306 * ipr_ioa_reset_done - IOA reset completion.
5307 * @ipr_cmd:	ipr command struct
5308 *
5309 * This function processes the completion of an adapter reset.
5310 * It schedules any necessary mid-layer add/removes and
5311 * wakes any reset sleepers.
5312 *
5313 * Return value:
5314 * 	IPR_RC_JOB_RETURN
5315 **/
5316static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5317{
5318	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5319	struct ipr_resource_entry *res;
5320	struct ipr_hostrcb *hostrcb, *temp;
5321	int i = 0;
5322
5323	ENTER;
5324	ioa_cfg->in_reset_reload = 0;
5325	ioa_cfg->allow_cmds = 1;
5326	ioa_cfg->reset_cmd = NULL;
5327	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5328
5329	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5330		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5331			ipr_trace;
5332			break;
5333		}
5334	}
5335	schedule_work(&ioa_cfg->work_q);
5336
5337	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5338		list_del(&hostrcb->queue);
5339		if (i++ < IPR_NUM_LOG_HCAMS)
5340			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5341		else
5342			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5343	}
5344
5345	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5346
5347	ioa_cfg->reset_retries = 0;
5348	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5349	wake_up_all(&ioa_cfg->reset_wait_q);
5350
5351	spin_unlock_irq(ioa_cfg->host->host_lock);
5352	scsi_unblock_requests(ioa_cfg->host);
5353	spin_lock_irq(ioa_cfg->host->host_lock);
5354
5355	if (!ioa_cfg->allow_cmds)
5356		scsi_block_requests(ioa_cfg->host);
5357
5358	LEAVE;
5359	return IPR_RC_JOB_RETURN;
5360}
5361
5362/**
5363 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5364 * @supported_dev:	supported device struct
5365 * @vpids:			vendor product id struct
5366 *
5367 * Return value:
5368 * 	none
5369 **/
5370static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5371				 struct ipr_std_inq_vpids *vpids)
5372{
5373	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5374	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5375	supported_dev->num_records = 1;
5376	supported_dev->data_length =
5377		cpu_to_be16(sizeof(struct ipr_supported_device));
5378	supported_dev->reserved = 0;
5379}
5380
5381/**
5382 * ipr_set_supported_devs - Send Set Supported Devices for a device
5383 * @ipr_cmd:	ipr command struct
5384 *
5385 * This function send a Set Supported Devices to the adapter
5386 *
5387 * Return value:
5388 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5389 **/
5390static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5391{
5392	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5393	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5394	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5395	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5396	struct ipr_resource_entry *res = ipr_cmd->u.res;
5397
5398	ipr_cmd->job_step = ipr_ioa_reset_done;
5399
5400	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5401		if (!ipr_is_scsi_disk(res))
5402			continue;
5403
5404		ipr_cmd->u.res = res;
5405		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5406
5407		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5408		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5409		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5410
5411		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5412		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5413		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5414
5415		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5416							sizeof(struct ipr_supported_device));
5417		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5418					     offsetof(struct ipr_misc_cbs, supp_dev));
5419		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5420		ioarcb->write_data_transfer_length =
5421			cpu_to_be32(sizeof(struct ipr_supported_device));
5422
5423		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5424			   IPR_SET_SUP_DEVICE_TIMEOUT);
5425
5426		ipr_cmd->job_step = ipr_set_supported_devs;
5427		return IPR_RC_JOB_RETURN;
5428	}
5429
5430	return IPR_RC_JOB_CONTINUE;
5431}
5432
5433/**
5434 * ipr_setup_write_cache - Disable write cache if needed
5435 * @ipr_cmd:	ipr command struct
5436 *
5437 * This function sets up adapters write cache to desired setting
5438 *
5439 * Return value:
5440 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5441 **/
5442static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5443{
5444	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5445
5446	ipr_cmd->job_step = ipr_set_supported_devs;
5447	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5448				    struct ipr_resource_entry, queue);
5449
5450	if (ioa_cfg->cache_state != CACHE_DISABLED)
5451		return IPR_RC_JOB_CONTINUE;
5452
5453	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5454	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5455	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5456	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5457
5458	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5459
5460	return IPR_RC_JOB_RETURN;
5461}
5462
5463/**
5464 * ipr_get_mode_page - Locate specified mode page
5465 * @mode_pages:	mode page buffer
5466 * @page_code:	page code to find
5467 * @len:		minimum required length for mode page
5468 *
5469 * Return value:
5470 * 	pointer to mode page / NULL on failure
5471 **/
5472static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5473			       u32 page_code, u32 len)
5474{
5475	struct ipr_mode_page_hdr *mode_hdr;
5476	u32 page_length;
5477	u32 length;
5478
5479	if (!mode_pages || (mode_pages->hdr.length == 0))
5480		return NULL;
5481
5482	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5483	mode_hdr = (struct ipr_mode_page_hdr *)
5484		(mode_pages->data + mode_pages->hdr.block_desc_len);
5485
5486	while (length) {
5487		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5488			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5489				return mode_hdr;
5490			break;
5491		} else {
5492			page_length = (sizeof(struct ipr_mode_page_hdr) +
5493				       mode_hdr->page_length);
5494			length -= page_length;
5495			mode_hdr = (struct ipr_mode_page_hdr *)
5496				((unsigned long)mode_hdr + page_length);
5497		}
5498	}
5499	return NULL;
5500}
5501
5502/**
5503 * ipr_check_term_power - Check for term power errors
5504 * @ioa_cfg:	ioa config struct
5505 * @mode_pages:	IOAFP mode pages buffer
5506 *
5507 * Check the IOAFP's mode page 28 for term power errors
5508 *
5509 * Return value:
5510 * 	nothing
5511 **/
5512static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5513				 struct ipr_mode_pages *mode_pages)
5514{
5515	int i;
5516	int entry_length;
5517	struct ipr_dev_bus_entry *bus;
5518	struct ipr_mode_page28 *mode_page;
5519
5520	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5521				      sizeof(struct ipr_mode_page28));
5522
5523	entry_length = mode_page->entry_length;
5524
5525	bus = mode_page->bus;
5526
5527	for (i = 0; i < mode_page->num_entries; i++) {
5528		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5529			dev_err(&ioa_cfg->pdev->dev,
5530				"Term power is absent on scsi bus %d\n",
5531				bus->res_addr.bus);
5532		}
5533
5534		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5535	}
5536}
5537
5538/**
5539 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5540 * @ioa_cfg:	ioa config struct
5541 *
5542 * Looks through the config table checking for SES devices. If
5543 * the SES device is in the SES table indicating a maximum SCSI
5544 * bus speed, the speed is limited for the bus.
5545 *
5546 * Return value:
5547 * 	none
5548 **/
5549static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5550{
5551	u32 max_xfer_rate;
5552	int i;
5553
5554	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5555		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5556						       ioa_cfg->bus_attr[i].bus_width);
5557
5558		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5559			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5560	}
5561}
5562
5563/**
5564 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5565 * @ioa_cfg:	ioa config struct
5566 * @mode_pages:	mode page 28 buffer
5567 *
5568 * Updates mode page 28 based on driver configuration
5569 *
5570 * Return value:
5571 * 	none
5572 **/
5573static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5574					  	struct ipr_mode_pages *mode_pages)
5575{
5576	int i, entry_length;
5577	struct ipr_dev_bus_entry *bus;
5578	struct ipr_bus_attributes *bus_attr;
5579	struct ipr_mode_page28 *mode_page;
5580
5581	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5582				      sizeof(struct ipr_mode_page28));
5583
5584	entry_length = mode_page->entry_length;
5585
5586	/* Loop for each device bus entry */
5587	for (i = 0, bus = mode_page->bus;
5588	     i < mode_page->num_entries;
5589	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5590		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5591			dev_err(&ioa_cfg->pdev->dev,
5592				"Invalid resource address reported: 0x%08X\n",
5593				IPR_GET_PHYS_LOC(bus->res_addr));
5594			continue;
5595		}
5596
5597		bus_attr = &ioa_cfg->bus_attr[i];
5598		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5599		bus->bus_width = bus_attr->bus_width;
5600		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5601		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5602		if (bus_attr->qas_enabled)
5603			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5604		else
5605			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5606	}
5607}
5608
5609/**
5610 * ipr_build_mode_select - Build a mode select command
5611 * @ipr_cmd:	ipr command struct
5612 * @res_handle:	resource handle to send command to
5613 * @parm:		Byte 2 of Mode Sense command
5614 * @dma_addr:	DMA buffer address
5615 * @xfer_len:	data transfer length
5616 *
5617 * Return value:
5618 * 	none
5619 **/
5620static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5621				  __be32 res_handle, u8 parm, u32 dma_addr,
5622				  u8 xfer_len)
5623{
5624	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5625	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5626
5627	ioarcb->res_handle = res_handle;
5628	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5629	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5630	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5631	ioarcb->cmd_pkt.cdb[1] = parm;
5632	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5633
5634	ioadl->flags_and_data_len =
5635		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5636	ioadl->address = cpu_to_be32(dma_addr);
5637	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5638	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5639}
5640
5641/**
5642 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5643 * @ipr_cmd:	ipr command struct
5644 *
5645 * This function sets up the SCSI bus attributes and sends
5646 * a Mode Select for Page 28 to activate them.
5647 *
5648 * Return value:
5649 * 	IPR_RC_JOB_RETURN
5650 **/
5651static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5652{
5653	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5654	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5655	int length;
5656
5657	ENTER;
5658	ipr_scsi_bus_speed_limit(ioa_cfg);
5659	ipr_check_term_power(ioa_cfg, mode_pages);
5660	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5661	length = mode_pages->hdr.length + 1;
5662	mode_pages->hdr.length = 0;
5663
5664	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5665			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5666			      length);
5667
5668	ipr_cmd->job_step = ipr_setup_write_cache;
5669	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5670
5671	LEAVE;
5672	return IPR_RC_JOB_RETURN;
5673}
5674
5675/**
5676 * ipr_build_mode_sense - Builds a mode sense command
5677 * @ipr_cmd:	ipr command struct
5678 * @res:		resource entry struct
5679 * @parm:		Byte 2 of mode sense command
5680 * @dma_addr:	DMA address of mode sense buffer
5681 * @xfer_len:	Size of DMA buffer
5682 *
5683 * Return value:
5684 * 	none
5685 **/
5686static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5687				 __be32 res_handle,
5688				 u8 parm, u32 dma_addr, u8 xfer_len)
5689{
5690	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5691	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5692
5693	ioarcb->res_handle = res_handle;
5694	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5695	ioarcb->cmd_pkt.cdb[2] = parm;
5696	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5697	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5698
5699	ioadl->flags_and_data_len =
5700		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5701	ioadl->address = cpu_to_be32(dma_addr);
5702	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5703	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5704}
5705
5706/**
5707 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5708 * @ipr_cmd:	ipr command struct
5709 *
5710 * This function handles the failure of an IOA bringup command.
5711 *
5712 * Return value:
5713 * 	IPR_RC_JOB_RETURN
5714 **/
5715static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5716{
5717	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5718	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5719
5720	dev_err(&ioa_cfg->pdev->dev,
5721		"0x%02X failed with IOASC: 0x%08X\n",
5722		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5723
5724	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5725	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5726	return IPR_RC_JOB_RETURN;
5727}
5728
5729/**
5730 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5731 * @ipr_cmd:	ipr command struct
5732 *
5733 * This function handles the failure of a Mode Sense to the IOAFP.
5734 * Some adapters do not handle all mode pages.
5735 *
5736 * Return value:
5737 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5738 **/
5739static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5740{
5741	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5742
5743	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5744		ipr_cmd->job_step = ipr_setup_write_cache;
5745		return IPR_RC_JOB_CONTINUE;
5746	}
5747
5748	return ipr_reset_cmd_failed(ipr_cmd);
5749}
5750
5751/**
5752 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5753 * @ipr_cmd:	ipr command struct
5754 *
5755 * This function send a Page 28 mode sense to the IOA to
5756 * retrieve SCSI bus attributes.
5757 *
5758 * Return value:
5759 * 	IPR_RC_JOB_RETURN
5760 **/
5761static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5762{
5763	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5764
5765	ENTER;
5766	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5767			     0x28, ioa_cfg->vpd_cbs_dma +
5768			     offsetof(struct ipr_misc_cbs, mode_pages),
5769			     sizeof(struct ipr_mode_pages));
5770
5771	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5772	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5773
5774	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5775
5776	LEAVE;
5777	return IPR_RC_JOB_RETURN;
5778}
5779
5780/**
5781 * ipr_init_res_table - Initialize the resource table
5782 * @ipr_cmd:	ipr command struct
5783 *
5784 * This function looks through the existing resource table, comparing
5785 * it with the config table. This function will take care of old/new
5786 * devices and schedule adding/removing them from the mid-layer
5787 * as appropriate.
5788 *
5789 * Return value:
5790 * 	IPR_RC_JOB_CONTINUE
5791 **/
5792static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5793{
5794	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5795	struct ipr_resource_entry *res, *temp;
5796	struct ipr_config_table_entry *cfgte;
5797	int found, i;
5798	LIST_HEAD(old_res);
5799
5800	ENTER;
5801	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5802		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5803
5804	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5805		list_move_tail(&res->queue, &old_res);
5806
5807	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5808		cfgte = &ioa_cfg->cfg_table->dev[i];
5809		found = 0;
5810
5811		list_for_each_entry_safe(res, temp, &old_res, queue) {
5812			if (!memcmp(&res->cfgte.res_addr,
5813				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5814				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5815				found = 1;
5816				break;
5817			}
5818		}
5819
5820		if (!found) {
5821			if (list_empty(&ioa_cfg->free_res_q)) {
5822				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5823				break;
5824			}
5825
5826			found = 1;
5827			res = list_entry(ioa_cfg->free_res_q.next,
5828					 struct ipr_resource_entry, queue);
5829			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5830			ipr_init_res_entry(res);
5831			res->add_to_ml = 1;
5832		}
5833
5834		if (found)
5835			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5836	}
5837
5838	list_for_each_entry_safe(res, temp, &old_res, queue) {
5839		if (res->sdev) {
5840			res->del_from_ml = 1;
5841			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5842			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5843		} else {
5844			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5845		}
5846	}
5847
5848	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5849
5850	LEAVE;
5851	return IPR_RC_JOB_CONTINUE;
5852}
5853
5854/**
5855 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5856 * @ipr_cmd:	ipr command struct
5857 *
5858 * This function sends a Query IOA Configuration command
5859 * to the adapter to retrieve the IOA configuration table.
5860 *
5861 * Return value:
5862 * 	IPR_RC_JOB_RETURN
5863 **/
5864static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5865{
5866	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5867	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5868	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5869	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5870
5871	ENTER;
5872	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5873		 ucode_vpd->major_release, ucode_vpd->card_type,
5874		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5875	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5876	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5877
5878	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5879	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5880	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5881
5882	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5883	ioarcb->read_data_transfer_length =
5884		cpu_to_be32(sizeof(struct ipr_config_table));
5885
5886	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5887	ioadl->flags_and_data_len =
5888		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5889
5890	ipr_cmd->job_step = ipr_init_res_table;
5891
5892	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5893
5894	LEAVE;
5895	return IPR_RC_JOB_RETURN;
5896}
5897
5898/**
5899 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5900 * @ipr_cmd:	ipr command struct
5901 *
5902 * This utility function sends an inquiry to the adapter.
5903 *
5904 * Return value:
5905 * 	none
5906 **/
5907static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5908			      u32 dma_addr, u8 xfer_len)
5909{
5910	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5911	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5912
5913	ENTER;
5914	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5915	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5916
5917	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5918	ioarcb->cmd_pkt.cdb[1] = flags;
5919	ioarcb->cmd_pkt.cdb[2] = page;
5920	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5921
5922	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5923	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5924
5925	ioadl->address = cpu_to_be32(dma_addr);
5926	ioadl->flags_and_data_len =
5927		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5928
5929	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5930	LEAVE;
5931}
5932
5933/**
5934 * ipr_inquiry_page_supported - Is the given inquiry page supported
5935 * @page0:		inquiry page 0 buffer
5936 * @page:		page code.
5937 *
5938 * This function determines if the specified inquiry page is supported.
5939 *
5940 * Return value:
5941 *	1 if page is supported / 0 if not
5942 **/
5943static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5944{
5945	int i;
5946
5947	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5948		if (page0->page[i] == page)
5949			return 1;
5950
5951	return 0;
5952}
5953
5954/**
5955 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5956 * @ipr_cmd:	ipr command struct
5957 *
5958 * This function sends a Page 3 inquiry to the adapter
5959 * to retrieve software VPD information.
5960 *
5961 * Return value:
5962 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5963 **/
5964static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5965{
5966	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5967	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5968
5969	ENTER;
5970
5971	if (!ipr_inquiry_page_supported(page0, 1))
5972		ioa_cfg->cache_state = CACHE_NONE;
5973
5974	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5975
5976	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5977			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5978			  sizeof(struct ipr_inquiry_page3));
5979
5980	LEAVE;
5981	return IPR_RC_JOB_RETURN;
5982}
5983
5984/**
5985 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5986 * @ipr_cmd:	ipr command struct
5987 *
5988 * This function sends a Page 0 inquiry to the adapter
5989 * to retrieve supported inquiry pages.
5990 *
5991 * Return value:
5992 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5993 **/
5994static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5995{
5996	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5997	char type[5];
5998
5999	ENTER;
6000
6001	/* Grab the type out of the VPD and store it away */
6002	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6003	type[4] = '\0';
6004	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6005
6006	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6007
6008	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6009			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6010			  sizeof(struct ipr_inquiry_page0));
6011
6012	LEAVE;
6013	return IPR_RC_JOB_RETURN;
6014}
6015
6016/**
6017 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6018 * @ipr_cmd:	ipr command struct
6019 *
6020 * This function sends a standard inquiry to the adapter.
6021 *
6022 * Return value:
6023 * 	IPR_RC_JOB_RETURN
6024 **/
6025static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6026{
6027	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6028
6029	ENTER;
6030	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6031
6032	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6033			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6034			  sizeof(struct ipr_ioa_vpd));
6035
6036	LEAVE;
6037	return IPR_RC_JOB_RETURN;
6038}
6039
6040/**
6041 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6042 * @ipr_cmd:	ipr command struct
6043 *
6044 * This function send an Identify Host Request Response Queue
6045 * command to establish the HRRQ with the adapter.
6046 *
6047 * Return value:
6048 * 	IPR_RC_JOB_RETURN
6049 **/
6050static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6051{
6052	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6053	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6054
6055	ENTER;
6056	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6057
6058	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6059	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6060
6061	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6062	ioarcb->cmd_pkt.cdb[2] =
6063		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6064	ioarcb->cmd_pkt.cdb[3] =
6065		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6066	ioarcb->cmd_pkt.cdb[4] =
6067		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6068	ioarcb->cmd_pkt.cdb[5] =
6069		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6070	ioarcb->cmd_pkt.cdb[7] =
6071		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6072	ioarcb->cmd_pkt.cdb[8] =
6073		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6074
6075	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6076
6077	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6078
6079	LEAVE;
6080	return IPR_RC_JOB_RETURN;
6081}
6082
6083/**
6084 * ipr_reset_timer_done - Adapter reset timer function
6085 * @ipr_cmd:	ipr command struct
6086 *
6087 * Description: This function is used in adapter reset processing
6088 * for timing events. If the reset_cmd pointer in the IOA
6089 * config struct is not this adapter's we are doing nested
6090 * resets and fail_all_ops will take care of freeing the
6091 * command block.
6092 *
6093 * Return value:
6094 * 	none
6095 **/
6096static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6097{
6098	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6099	unsigned long lock_flags = 0;
6100
6101	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6102
6103	if (ioa_cfg->reset_cmd == ipr_cmd) {
6104		list_del(&ipr_cmd->queue);
6105		ipr_cmd->done(ipr_cmd);
6106	}
6107
6108	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6109}
6110
6111/**
6112 * ipr_reset_start_timer - Start a timer for adapter reset job
6113 * @ipr_cmd:	ipr command struct
6114 * @timeout:	timeout value
6115 *
6116 * Description: This function is used in adapter reset processing
6117 * for timing events. If the reset_cmd pointer in the IOA
6118 * config struct is not this adapter's we are doing nested
6119 * resets and fail_all_ops will take care of freeing the
6120 * command block.
6121 *
6122 * Return value:
6123 * 	none
6124 **/
6125static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6126				  unsigned long timeout)
6127{
6128	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6129	ipr_cmd->done = ipr_reset_ioa_job;
6130
6131	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6132	ipr_cmd->timer.expires = jiffies + timeout;
6133	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6134	add_timer(&ipr_cmd->timer);
6135}
6136
6137/**
6138 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6139 * @ioa_cfg:	ioa cfg struct
6140 *
6141 * Return value:
6142 * 	nothing
6143 **/
6144static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6145{
6146	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6147
6148	/* Initialize Host RRQ pointers */
6149	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6150	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6151	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6152	ioa_cfg->toggle_bit = 1;
6153
6154	/* Zero out config table */
6155	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6156}
6157
6158/**
6159 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6160 * @ipr_cmd:	ipr command struct
6161 *
6162 * This function reinitializes some control blocks and
6163 * enables destructive diagnostics on the adapter.
6164 *
6165 * Return value:
6166 * 	IPR_RC_JOB_RETURN
6167 **/
6168static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6169{
6170	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6171	volatile u32 int_reg;
6172
6173	ENTER;
6174	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6175	ipr_init_ioa_mem(ioa_cfg);
6176
6177	ioa_cfg->allow_interrupts = 1;
6178	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6179
6180	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6181		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6182		       ioa_cfg->regs.clr_interrupt_mask_reg);
6183		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6184		return IPR_RC_JOB_CONTINUE;
6185	}
6186
6187	/* Enable destructive diagnostics on IOA */
6188	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6189
6190	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6191	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6192
6193	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6194
6195	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6196	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
6197	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6198	ipr_cmd->done = ipr_reset_ioa_job;
6199	add_timer(&ipr_cmd->timer);
6200	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6201
6202	LEAVE;
6203	return IPR_RC_JOB_RETURN;
6204}
6205
6206/**
6207 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6208 * @ipr_cmd:	ipr command struct
6209 *
6210 * This function is invoked when an adapter dump has run out
6211 * of processing time.
6212 *
6213 * Return value:
6214 * 	IPR_RC_JOB_CONTINUE
6215 **/
6216static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6217{
6218	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6219
6220	if (ioa_cfg->sdt_state == GET_DUMP)
6221		ioa_cfg->sdt_state = ABORT_DUMP;
6222
6223	ipr_cmd->job_step = ipr_reset_alert;
6224
6225	return IPR_RC_JOB_CONTINUE;
6226}
6227
6228/**
6229 * ipr_unit_check_no_data - Log a unit check/no data error log
6230 * @ioa_cfg:		ioa config struct
6231 *
6232 * Logs an error indicating the adapter unit checked, but for some
6233 * reason, we were unable to fetch the unit check buffer.
6234 *
6235 * Return value:
6236 * 	nothing
6237 **/
6238static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6239{
6240	ioa_cfg->errors_logged++;
6241	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6242}
6243
6244/**
6245 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6246 * @ioa_cfg:		ioa config struct
6247 *
6248 * Fetches the unit check buffer from the adapter by clocking the data
6249 * through the mailbox register.
6250 *
6251 * Return value:
6252 * 	nothing
6253 **/
6254static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6255{
6256	unsigned long mailbox;
6257	struct ipr_hostrcb *hostrcb;
6258	struct ipr_uc_sdt sdt;
6259	int rc, length;
6260
6261	mailbox = readl(ioa_cfg->ioa_mailbox);
6262
6263	if (!ipr_sdt_is_fmt2(mailbox)) {
6264		ipr_unit_check_no_data(ioa_cfg);
6265		return;
6266	}
6267
6268	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6269	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6270					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6271
6272	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6273	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6274		ipr_unit_check_no_data(ioa_cfg);
6275		return;
6276	}
6277
6278	/* Find length of the first sdt entry (UC buffer) */
6279	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6280		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6281
6282	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6283			     struct ipr_hostrcb, queue);
6284	list_del(&hostrcb->queue);
6285	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6286
6287	rc = ipr_get_ldump_data_section(ioa_cfg,
6288					be32_to_cpu(sdt.entry[0].bar_str_offset),
6289					(__be32 *)&hostrcb->hcam,
6290					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6291
6292	if (!rc)
6293		ipr_handle_log_data(ioa_cfg, hostrcb);
6294	else
6295		ipr_unit_check_no_data(ioa_cfg);
6296
6297	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6298}
6299
6300/**
6301 * ipr_reset_restore_cfg_space - Restore PCI config space.
6302 * @ipr_cmd:	ipr command struct
6303 *
6304 * Description: This function restores the saved PCI config space of
6305 * the adapter, fails all outstanding ops back to the callers, and
6306 * fetches the dump/unit check if applicable to this reset.
6307 *
6308 * Return value:
6309 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6310 **/
6311static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6312{
6313	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6314	int rc;
6315
6316	ENTER;
6317	pci_unblock_user_cfg_access(ioa_cfg->pdev);
6318	rc = pci_restore_state(ioa_cfg->pdev);
6319
6320	if (rc != PCIBIOS_SUCCESSFUL) {
6321		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6322		return IPR_RC_JOB_CONTINUE;
6323	}
6324
6325	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6326		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6327		return IPR_RC_JOB_CONTINUE;
6328	}
6329
6330	ipr_fail_all_ops(ioa_cfg);
6331
6332	if (ioa_cfg->ioa_unit_checked) {
6333		ioa_cfg->ioa_unit_checked = 0;
6334		ipr_get_unit_check_buffer(ioa_cfg);
6335		ipr_cmd->job_step = ipr_reset_alert;
6336		ipr_reset_start_timer(ipr_cmd, 0);
6337		return IPR_RC_JOB_RETURN;
6338	}
6339
6340	if (ioa_cfg->in_ioa_bringdown) {
6341		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6342	} else {
6343		ipr_cmd->job_step = ipr_reset_enable_ioa;
6344
6345		if (GET_DUMP == ioa_cfg->sdt_state) {
6346			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6347			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6348			schedule_work(&ioa_cfg->work_q);
6349			return IPR_RC_JOB_RETURN;
6350		}
6351	}
6352
6353	ENTER;
6354	return IPR_RC_JOB_CONTINUE;
6355}
6356
6357/**
6358 * ipr_reset_start_bist - Run BIST on the adapter.
6359 * @ipr_cmd:	ipr command struct
6360 *
6361 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6362 *
6363 * Return value:
6364 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6365 **/
6366static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6367{
6368	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6369	int rc;
6370
6371	ENTER;
6372	pci_block_user_cfg_access(ioa_cfg->pdev);
6373	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6374
6375	if (rc != PCIBIOS_SUCCESSFUL) {
6376		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6377		rc = IPR_RC_JOB_CONTINUE;
6378	} else {
6379		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6380		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6381		rc = IPR_RC_JOB_RETURN;
6382	}
6383
6384	LEAVE;
6385	return rc;
6386}
6387
6388/**
6389 * ipr_reset_allowed - Query whether or not IOA can be reset
6390 * @ioa_cfg:	ioa config struct
6391 *
6392 * Return value:
6393 * 	0 if reset not allowed / non-zero if reset is allowed
6394 **/
6395static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6396{
6397	volatile u32 temp_reg;
6398
6399	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6400	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6401}
6402
6403/**
6404 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6405 * @ipr_cmd:	ipr command struct
6406 *
6407 * Description: This function waits for adapter permission to run BIST,
6408 * then runs BIST. If the adapter does not give permission after a
6409 * reasonable time, we will reset the adapter anyway. The impact of
6410 * resetting the adapter without warning the adapter is the risk of
6411 * losing the persistent error log on the adapter. If the adapter is
6412 * reset while it is writing to the flash on the adapter, the flash
6413 * segment will have bad ECC and be zeroed.
6414 *
6415 * Return value:
6416 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6417 **/
6418static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6419{
6420	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6421	int rc = IPR_RC_JOB_RETURN;
6422
6423	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6424		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6425		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6426	} else {
6427		ipr_cmd->job_step = ipr_reset_start_bist;
6428		rc = IPR_RC_JOB_CONTINUE;
6429	}
6430
6431	return rc;
6432}
6433
6434/**
6435 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6436 * @ipr_cmd:	ipr command struct
6437 *
6438 * Description: This function alerts the adapter that it will be reset.
6439 * If memory space is not currently enabled, proceed directly
6440 * to running BIST on the adapter. The timer must always be started
6441 * so we guarantee we do not run BIST from ipr_isr.
6442 *
6443 * Return value:
6444 * 	IPR_RC_JOB_RETURN
6445 **/
6446static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6447{
6448	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6449	u16 cmd_reg;
6450	int rc;
6451
6452	ENTER;
6453	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6454
6455	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6456		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6457		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6458		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6459	} else {
6460		ipr_cmd->job_step = ipr_reset_start_bist;
6461	}
6462
6463	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6464	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6465
6466	LEAVE;
6467	return IPR_RC_JOB_RETURN;
6468}
6469
6470/**
6471 * ipr_reset_ucode_download_done - Microcode download completion
6472 * @ipr_cmd:	ipr command struct
6473 *
6474 * Description: This function unmaps the microcode download buffer.
6475 *
6476 * Return value:
6477 * 	IPR_RC_JOB_CONTINUE
6478 **/
6479static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6480{
6481	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6482	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6483
6484	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6485		     sglist->num_sg, DMA_TO_DEVICE);
6486
6487	ipr_cmd->job_step = ipr_reset_alert;
6488	return IPR_RC_JOB_CONTINUE;
6489}
6490
6491/**
6492 * ipr_reset_ucode_download - Download microcode to the adapter
6493 * @ipr_cmd:	ipr command struct
6494 *
6495 * Description: This function checks to see if it there is microcode
6496 * to download to the adapter. If there is, a download is performed.
6497 *
6498 * Return value:
6499 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6500 **/
6501static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6502{
6503	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6504	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6505
6506	ENTER;
6507	ipr_cmd->job_step = ipr_reset_alert;
6508
6509	if (!sglist)
6510		return IPR_RC_JOB_CONTINUE;
6511
6512	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6513	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6514	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6515	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6516	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6517	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6518	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6519
6520	ipr_build_ucode_ioadl(ipr_cmd, sglist);
6521	ipr_cmd->job_step = ipr_reset_ucode_download_done;
6522
6523	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6524		   IPR_WRITE_BUFFER_TIMEOUT);
6525
6526	LEAVE;
6527	return IPR_RC_JOB_RETURN;
6528}
6529
6530/**
6531 * ipr_reset_shutdown_ioa - Shutdown the adapter
6532 * @ipr_cmd:	ipr command struct
6533 *
6534 * Description: This function issues an adapter shutdown of the
6535 * specified type to the specified adapter as part of the
6536 * adapter reset job.
6537 *
6538 * Return value:
6539 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6540 **/
6541static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6542{
6543	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6544	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6545	unsigned long timeout;
6546	int rc = IPR_RC_JOB_CONTINUE;
6547
6548	ENTER;
6549	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6550		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6551		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6552		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6553		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6554
6555		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6556			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6557		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6558			timeout = IPR_INTERNAL_TIMEOUT;
6559		else
6560			timeout = IPR_SHUTDOWN_TIMEOUT;
6561
6562		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6563
6564		rc = IPR_RC_JOB_RETURN;
6565		ipr_cmd->job_step = ipr_reset_ucode_download;
6566	} else
6567		ipr_cmd->job_step = ipr_reset_alert;
6568
6569	LEAVE;
6570	return rc;
6571}
6572
6573/**
6574 * ipr_reset_ioa_job - Adapter reset job
6575 * @ipr_cmd:	ipr command struct
6576 *
6577 * Description: This function is the job router for the adapter reset job.
6578 *
6579 * Return value:
6580 * 	none
6581 **/
6582static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6583{
6584	u32 rc, ioasc;
6585	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6586
6587	do {
6588		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6589
6590		if (ioa_cfg->reset_cmd != ipr_cmd) {
6591			/*
6592			 * We are doing nested adapter resets and this is
6593			 * not the current reset job.
6594			 */
6595			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6596			return;
6597		}
6598
6599		if (IPR_IOASC_SENSE_KEY(ioasc)) {
6600			rc = ipr_cmd->job_step_failed(ipr_cmd);
6601			if (rc == IPR_RC_JOB_RETURN)
6602				return;
6603		}
6604
6605		ipr_reinit_ipr_cmnd(ipr_cmd);
6606		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6607		rc = ipr_cmd->job_step(ipr_cmd);
6608	} while(rc == IPR_RC_JOB_CONTINUE);
6609}
6610
6611/**
6612 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6613 * @ioa_cfg:		ioa config struct
6614 * @job_step:		first job step of reset job
6615 * @shutdown_type:	shutdown type
6616 *
6617 * Description: This function will initiate the reset of the given adapter
6618 * starting at the selected job step.
6619 * If the caller needs to wait on the completion of the reset,
6620 * the caller must sleep on the reset_wait_q.
6621 *
6622 * Return value:
6623 * 	none
6624 **/
6625static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6626				    int (*job_step) (struct ipr_cmnd *),
6627				    enum ipr_shutdown_type shutdown_type)
6628{
6629	struct ipr_cmnd *ipr_cmd;
6630
6631	ioa_cfg->in_reset_reload = 1;
6632	ioa_cfg->allow_cmds = 0;
6633	scsi_block_requests(ioa_cfg->host);
6634
6635	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6636	ioa_cfg->reset_cmd = ipr_cmd;
6637	ipr_cmd->job_step = job_step;
6638	ipr_cmd->u.shutdown_type = shutdown_type;
6639
6640	ipr_reset_ioa_job(ipr_cmd);
6641}
6642
6643/**
6644 * ipr_initiate_ioa_reset - Initiate an adapter reset
6645 * @ioa_cfg:		ioa config struct
6646 * @shutdown_type:	shutdown type
6647 *
6648 * Description: This function will initiate the reset of the given adapter.
6649 * If the caller needs to wait on the completion of the reset,
6650 * the caller must sleep on the reset_wait_q.
6651 *
6652 * Return value:
6653 * 	none
6654 **/
6655static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6656				   enum ipr_shutdown_type shutdown_type)
6657{
6658	if (ioa_cfg->ioa_is_dead)
6659		return;
6660
6661	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6662		ioa_cfg->sdt_state = ABORT_DUMP;
6663
6664	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6665		dev_err(&ioa_cfg->pdev->dev,
6666			"IOA taken offline - error recovery failed\n");
6667
6668		ioa_cfg->reset_retries = 0;
6669		ioa_cfg->ioa_is_dead = 1;
6670
6671		if (ioa_cfg->in_ioa_bringdown) {
6672			ioa_cfg->reset_cmd = NULL;
6673			ioa_cfg->in_reset_reload = 0;
6674			ipr_fail_all_ops(ioa_cfg);
6675			wake_up_all(&ioa_cfg->reset_wait_q);
6676
6677			spin_unlock_irq(ioa_cfg->host->host_lock);
6678			scsi_unblock_requests(ioa_cfg->host);
6679			spin_lock_irq(ioa_cfg->host->host_lock);
6680			return;
6681		} else {
6682			ioa_cfg->in_ioa_bringdown = 1;
6683			shutdown_type = IPR_SHUTDOWN_NONE;
6684		}
6685	}
6686
6687	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6688				shutdown_type);
6689}
6690
6691/**
6692 * ipr_reset_freeze - Hold off all I/O activity
6693 * @ipr_cmd:	ipr command struct
6694 *
6695 * Description: If the PCI slot is frozen, hold off all I/O
6696 * activity; then, as soon as the slot is available again,
6697 * initiate an adapter reset.
6698 */
6699static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6700{
6701	/* Disallow new interrupts, avoid loop */
6702	ipr_cmd->ioa_cfg->allow_interrupts = 0;
6703	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6704	ipr_cmd->done = ipr_reset_ioa_job;
6705	return IPR_RC_JOB_RETURN;
6706}
6707
6708/**
6709 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6710 * @pdev:	PCI device struct
6711 *
6712 * Description: This routine is called to tell us that the PCI bus
6713 * is down. Can't do anything here, except put the device driver
6714 * into a holding pattern, waiting for the PCI bus to come back.
6715 */
6716static void ipr_pci_frozen(struct pci_dev *pdev)
6717{
6718	unsigned long flags = 0;
6719	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6720
6721	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6722	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6723	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6724}
6725
6726/**
6727 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6728 * @pdev:	PCI device struct
6729 *
6730 * Description: This routine is called by the pci error recovery
6731 * code after the PCI slot has been reset, just before we
6732 * should resume normal operations.
6733 */
6734static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6735{
6736	unsigned long flags = 0;
6737	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6738
6739	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6740	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6741	                                 IPR_SHUTDOWN_NONE);
6742	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6743	return PCI_ERS_RESULT_RECOVERED;
6744}
6745
6746/**
6747 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6748 * @pdev:	PCI device struct
6749 *
6750 * Description: This routine is called when the PCI bus has
6751 * permanently failed.
6752 */
6753static void ipr_pci_perm_failure(struct pci_dev *pdev)
6754{
6755	unsigned long flags = 0;
6756	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6757
6758	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6759	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6760		ioa_cfg->sdt_state = ABORT_DUMP;
6761	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6762	ioa_cfg->in_ioa_bringdown = 1;
6763	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6764	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6765}
6766
6767/**
6768 * ipr_pci_error_detected - Called when a PCI error is detected.
6769 * @pdev:	PCI device struct
6770 * @state:	PCI channel state
6771 *
6772 * Description: Called when a PCI error is detected.
6773 *
6774 * Return value:
6775 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6776 */
6777static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6778					       pci_channel_state_t state)
6779{
6780	switch (state) {
6781	case pci_channel_io_frozen:
6782		ipr_pci_frozen(pdev);
6783		return PCI_ERS_RESULT_NEED_RESET;
6784	case pci_channel_io_perm_failure:
6785		ipr_pci_perm_failure(pdev);
6786		return PCI_ERS_RESULT_DISCONNECT;
6787		break;
6788	default:
6789		break;
6790	}
6791	return PCI_ERS_RESULT_NEED_RESET;
6792}
6793
6794/**
6795 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6796 * @ioa_cfg:	ioa cfg struct
6797 *
6798 * Description: This is the second phase of adapter intialization
6799 * This function takes care of initilizing the adapter to the point
6800 * where it can accept new commands.
6801
6802 * Return value:
6803 * 	0 on sucess / -EIO on failure
6804 **/
6805static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6806{
6807	int rc = 0;
6808	unsigned long host_lock_flags = 0;
6809
6810	ENTER;
6811	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6812	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6813	if (ioa_cfg->needs_hard_reset) {
6814		ioa_cfg->needs_hard_reset = 0;
6815		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6816	} else
6817		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6818					IPR_SHUTDOWN_NONE);
6819
6820	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6821	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6822	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6823
6824	if (ioa_cfg->ioa_is_dead) {
6825		rc = -EIO;
6826	} else if (ipr_invalid_adapter(ioa_cfg)) {
6827		if (!ipr_testmode)
6828			rc = -EIO;
6829
6830		dev_err(&ioa_cfg->pdev->dev,
6831			"Adapter not supported in this hardware configuration.\n");
6832	}
6833
6834	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6835
6836	LEAVE;
6837	return rc;
6838}
6839
6840/**
6841 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6842 * @ioa_cfg:	ioa config struct
6843 *
6844 * Return value:
6845 * 	none
6846 **/
6847static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6848{
6849	int i;
6850
6851	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6852		if (ioa_cfg->ipr_cmnd_list[i])
6853			pci_pool_free(ioa_cfg->ipr_cmd_pool,
6854				      ioa_cfg->ipr_cmnd_list[i],
6855				      ioa_cfg->ipr_cmnd_list_dma[i]);
6856
6857		ioa_cfg->ipr_cmnd_list[i] = NULL;
6858	}
6859
6860	if (ioa_cfg->ipr_cmd_pool)
6861		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6862
6863	ioa_cfg->ipr_cmd_pool = NULL;
6864}
6865
6866/**
6867 * ipr_free_mem - Frees memory allocated for an adapter
6868 * @ioa_cfg:	ioa cfg struct
6869 *
6870 * Return value:
6871 * 	nothing
6872 **/
6873static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6874{
6875	int i;
6876
6877	kfree(ioa_cfg->res_entries);
6878	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6879			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6880	ipr_free_cmd_blks(ioa_cfg);
6881	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6882			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6883	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6884			    ioa_cfg->cfg_table,
6885			    ioa_cfg->cfg_table_dma);
6886
6887	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6888		pci_free_consistent(ioa_cfg->pdev,
6889				    sizeof(struct ipr_hostrcb),
6890				    ioa_cfg->hostrcb[i],
6891				    ioa_cfg->hostrcb_dma[i]);
6892	}
6893
6894	ipr_free_dump(ioa_cfg);
6895	kfree(ioa_cfg->trace);
6896}
6897
6898/**
6899 * ipr_free_all_resources - Free all allocated resources for an adapter.
6900 * @ipr_cmd:	ipr command struct
6901 *
6902 * This function frees all allocated resources for the
6903 * specified adapter.
6904 *
6905 * Return value:
6906 * 	none
6907 **/
6908static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6909{
6910	struct pci_dev *pdev = ioa_cfg->pdev;
6911
6912	ENTER;
6913	free_irq(pdev->irq, ioa_cfg);
6914	iounmap(ioa_cfg->hdw_dma_regs);
6915	pci_release_regions(pdev);
6916	ipr_free_mem(ioa_cfg);
6917	scsi_host_put(ioa_cfg->host);
6918	pci_disable_device(pdev);
6919	LEAVE;
6920}
6921
6922/**
6923 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6924 * @ioa_cfg:	ioa config struct
6925 *
6926 * Return value:
6927 * 	0 on success / -ENOMEM on allocation failure
6928 **/
6929static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6930{
6931	struct ipr_cmnd *ipr_cmd;
6932	struct ipr_ioarcb *ioarcb;
6933	dma_addr_t dma_addr;
6934	int i;
6935
6936	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6937						 sizeof(struct ipr_cmnd), 8, 0);
6938
6939	if (!ioa_cfg->ipr_cmd_pool)
6940		return -ENOMEM;
6941
6942	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6943		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
6944
6945		if (!ipr_cmd) {
6946			ipr_free_cmd_blks(ioa_cfg);
6947			return -ENOMEM;
6948		}
6949
6950		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6951		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6952		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6953
6954		ioarcb = &ipr_cmd->ioarcb;
6955		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6956		ioarcb->host_response_handle = cpu_to_be32(i << 2);
6957		ioarcb->write_ioadl_addr =
6958			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6959		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6960		ioarcb->ioasa_host_pci_addr =
6961			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6962		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6963		ipr_cmd->cmd_index = i;
6964		ipr_cmd->ioa_cfg = ioa_cfg;
6965		ipr_cmd->sense_buffer_dma = dma_addr +
6966			offsetof(struct ipr_cmnd, sense_buffer);
6967
6968		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6969	}
6970
6971	return 0;
6972}
6973
6974/**
6975 * ipr_alloc_mem - Allocate memory for an adapter
6976 * @ioa_cfg:	ioa config struct
6977 *
6978 * Return value:
6979 * 	0 on success / non-zero for error
6980 **/
6981static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6982{
6983	struct pci_dev *pdev = ioa_cfg->pdev;
6984	int i, rc = -ENOMEM;
6985
6986	ENTER;
6987	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6988				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6989
6990	if (!ioa_cfg->res_entries)
6991		goto out;
6992
6993	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6994		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6995
6996	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6997						sizeof(struct ipr_misc_cbs),
6998						&ioa_cfg->vpd_cbs_dma);
6999
7000	if (!ioa_cfg->vpd_cbs)
7001		goto out_free_res_entries;
7002
7003	if (ipr_alloc_cmd_blks(ioa_cfg))
7004		goto out_free_vpd_cbs;
7005
7006	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7007						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7008						 &ioa_cfg->host_rrq_dma);
7009
7010	if (!ioa_cfg->host_rrq)
7011		goto out_ipr_free_cmd_blocks;
7012
7013	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7014						  sizeof(struct ipr_config_table),
7015						  &ioa_cfg->cfg_table_dma);
7016
7017	if (!ioa_cfg->cfg_table)
7018		goto out_free_host_rrq;
7019
7020	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7021		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7022							   sizeof(struct ipr_hostrcb),
7023							   &ioa_cfg->hostrcb_dma[i]);
7024
7025		if (!ioa_cfg->hostrcb[i])
7026			goto out_free_hostrcb_dma;
7027
7028		ioa_cfg->hostrcb[i]->hostrcb_dma =
7029			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7030		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7031		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7032	}
7033
7034	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7035				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7036
7037	if (!ioa_cfg->trace)
7038		goto out_free_hostrcb_dma;
7039
7040	rc = 0;
7041out:
7042	LEAVE;
7043	return rc;
7044
7045out_free_hostrcb_dma:
7046	while (i-- > 0) {
7047		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7048				    ioa_cfg->hostrcb[i],
7049				    ioa_cfg->hostrcb_dma[i]);
7050	}
7051	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7052			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7053out_free_host_rrq:
7054	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7055			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7056out_ipr_free_cmd_blocks:
7057	ipr_free_cmd_blks(ioa_cfg);
7058out_free_vpd_cbs:
7059	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7060			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7061out_free_res_entries:
7062	kfree(ioa_cfg->res_entries);
7063	goto out;
7064}
7065
7066/**
7067 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7068 * @ioa_cfg:	ioa config struct
7069 *
7070 * Return value:
7071 * 	none
7072 **/
7073static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7074{
7075	int i;
7076
7077	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7078		ioa_cfg->bus_attr[i].bus = i;
7079		ioa_cfg->bus_attr[i].qas_enabled = 0;
7080		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7081		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7082			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7083		else
7084			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7085	}
7086}
7087
7088/**
7089 * ipr_init_ioa_cfg - Initialize IOA config struct
7090 * @ioa_cfg:	ioa config struct
7091 * @host:		scsi host struct
7092 * @pdev:		PCI dev struct
7093 *
7094 * Return value:
7095 * 	none
7096 **/
7097static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7098				       struct Scsi_Host *host, struct pci_dev *pdev)
7099{
7100	const struct ipr_interrupt_offsets *p;
7101	struct ipr_interrupts *t;
7102	void __iomem *base;
7103
7104	ioa_cfg->host = host;
7105	ioa_cfg->pdev = pdev;
7106	ioa_cfg->log_level = ipr_log_level;
7107	ioa_cfg->doorbell = IPR_DOORBELL;
7108	if (!ipr_auto_create)
7109		ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7110	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7111	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7112	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7113	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7114	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7115	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7116	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7117	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7118
7119	INIT_LIST_HEAD(&ioa_cfg->free_q);
7120	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7121	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7122	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7123	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7124	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7125	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7126	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7127	ioa_cfg->sdt_state = INACTIVE;
7128	if (ipr_enable_cache)
7129		ioa_cfg->cache_state = CACHE_ENABLED;
7130	else
7131		ioa_cfg->cache_state = CACHE_DISABLED;
7132
7133	ipr_initialize_bus_attr(ioa_cfg);
7134
7135	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7136	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7137	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7138	host->unique_id = host->host_no;
7139	host->max_cmd_len = IPR_MAX_CDB_LEN;
7140	pci_set_drvdata(pdev, ioa_cfg);
7141
7142	p = &ioa_cfg->chip_cfg->regs;
7143	t = &ioa_cfg->regs;
7144	base = ioa_cfg->hdw_dma_regs;
7145
7146	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7147	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7148	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7149	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7150	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7151	t->ioarrin_reg = base + p->ioarrin_reg;
7152	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7153	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7154	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7155}
7156
7157/**
7158 * ipr_get_chip_cfg - Find adapter chip configuration
7159 * @dev_id:		PCI device id struct
7160 *
7161 * Return value:
7162 * 	ptr to chip config on success / NULL on failure
7163 **/
7164static const struct ipr_chip_cfg_t * __devinit
7165ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7166{
7167	int i;
7168
7169	if (dev_id->driver_data)
7170		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
7171
7172	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7173		if (ipr_chip[i].vendor == dev_id->vendor &&
7174		    ipr_chip[i].device == dev_id->device)
7175			return ipr_chip[i].cfg;
7176	return NULL;
7177}
7178
7179/**
7180 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7181 * @pdev:		PCI device struct
7182 * @dev_id:		PCI device id struct
7183 *
7184 * Return value:
7185 * 	0 on success / non-zero on failure
7186 **/
7187static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7188				   const struct pci_device_id *dev_id)
7189{
7190	struct ipr_ioa_cfg *ioa_cfg;
7191	struct Scsi_Host *host;
7192	unsigned long ipr_regs_pci;
7193	void __iomem *ipr_regs;
7194	int rc = PCIBIOS_SUCCESSFUL;
7195	volatile u32 mask, uproc;
7196
7197	ENTER;
7198
7199	if ((rc = pci_enable_device(pdev))) {
7200		dev_err(&pdev->dev, "Cannot enable adapter\n");
7201		goto out;
7202	}
7203
7204	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7205
7206	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7207
7208	if (!host) {
7209		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7210		rc = -ENOMEM;
7211		goto out_disable;
7212	}
7213
7214	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7215	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7216	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7217		      sata_port_info.flags, &ipr_sata_ops);
7218
7219	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7220
7221	if (!ioa_cfg->chip_cfg) {
7222		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7223			dev_id->vendor, dev_id->device);
7224		goto out_scsi_host_put;
7225	}
7226
7227	ipr_regs_pci = pci_resource_start(pdev, 0);
7228
7229	rc = pci_request_regions(pdev, IPR_NAME);
7230	if (rc < 0) {
7231		dev_err(&pdev->dev,
7232			"Couldn't register memory range of registers\n");
7233		goto out_scsi_host_put;
7234	}
7235
7236	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7237
7238	if (!ipr_regs) {
7239		dev_err(&pdev->dev,
7240			"Couldn't map memory range of registers\n");
7241		rc = -ENOMEM;
7242		goto out_release_regions;
7243	}
7244
7245	ioa_cfg->hdw_dma_regs = ipr_regs;
7246	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7247	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7248
7249	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7250
7251	pci_set_master(pdev);
7252
7253	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7254	if (rc < 0) {
7255		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7256		goto cleanup_nomem;
7257	}
7258
7259	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7260				   ioa_cfg->chip_cfg->cache_line_size);
7261
7262	if (rc != PCIBIOS_SUCCESSFUL) {
7263		dev_err(&pdev->dev, "Write of cache line size failed\n");
7264		rc = -EIO;
7265		goto cleanup_nomem;
7266	}
7267
7268	/* Save away PCI config space for use following IOA reset */
7269	rc = pci_save_state(pdev);
7270
7271	if (rc != PCIBIOS_SUCCESSFUL) {
7272		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7273		rc = -EIO;
7274		goto cleanup_nomem;
7275	}
7276
7277	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7278		goto cleanup_nomem;
7279
7280	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7281		goto cleanup_nomem;
7282
7283	rc = ipr_alloc_mem(ioa_cfg);
7284	if (rc < 0) {
7285		dev_err(&pdev->dev,
7286			"Couldn't allocate enough memory for device driver!\n");
7287		goto cleanup_nomem;
7288	}
7289
7290	/*
7291	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7292	 * the card is in an unknown state and needs a hard reset
7293	 */
7294	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7295	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7296	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7297		ioa_cfg->needs_hard_reset = 1;
7298
7299	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7300	rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7301
7302	if (rc) {
7303		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7304			pdev->irq, rc);
7305		goto cleanup_nolog;
7306	}
7307
7308	spin_lock(&ipr_driver_lock);
7309	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7310	spin_unlock(&ipr_driver_lock);
7311
7312	LEAVE;
7313out:
7314	return rc;
7315
7316cleanup_nolog:
7317	ipr_free_mem(ioa_cfg);
7318cleanup_nomem:
7319	iounmap(ipr_regs);
7320out_release_regions:
7321	pci_release_regions(pdev);
7322out_scsi_host_put:
7323	scsi_host_put(host);
7324out_disable:
7325	pci_disable_device(pdev);
7326	goto out;
7327}
7328
7329/**
7330 * ipr_scan_vsets - Scans for VSET devices
7331 * @ioa_cfg:	ioa config struct
7332 *
7333 * Description: Since the VSET resources do not follow SAM in that we can have
7334 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7335 *
7336 * Return value:
7337 * 	none
7338 **/
7339static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7340{
7341	int target, lun;
7342
7343	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7344		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7345			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7346}
7347
7348/**
7349 * ipr_initiate_ioa_bringdown - Bring down an adapter
7350 * @ioa_cfg:		ioa config struct
7351 * @shutdown_type:	shutdown type
7352 *
7353 * Description: This function will initiate bringing down the adapter.
7354 * This consists of issuing an IOA shutdown to the adapter
7355 * to flush the cache, and running BIST.
7356 * If the caller needs to wait on the completion of the reset,
7357 * the caller must sleep on the reset_wait_q.
7358 *
7359 * Return value:
7360 * 	none
7361 **/
7362static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7363				       enum ipr_shutdown_type shutdown_type)
7364{
7365	ENTER;
7366	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7367		ioa_cfg->sdt_state = ABORT_DUMP;
7368	ioa_cfg->reset_retries = 0;
7369	ioa_cfg->in_ioa_bringdown = 1;
7370	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7371	LEAVE;
7372}
7373
7374/**
7375 * __ipr_remove - Remove a single adapter
7376 * @pdev:	pci device struct
7377 *
7378 * Adapter hot plug remove entry point.
7379 *
7380 * Return value:
7381 * 	none
7382 **/
7383static void __ipr_remove(struct pci_dev *pdev)
7384{
7385	unsigned long host_lock_flags = 0;
7386	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7387	ENTER;
7388
7389	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7390	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7391
7392	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7393	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7394	flush_scheduled_work();
7395	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7396
7397	spin_lock(&ipr_driver_lock);
7398	list_del(&ioa_cfg->queue);
7399	spin_unlock(&ipr_driver_lock);
7400
7401	if (ioa_cfg->sdt_state == ABORT_DUMP)
7402		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7403	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7404
7405	ipr_free_all_resources(ioa_cfg);
7406
7407	LEAVE;
7408}
7409
7410/**
7411 * ipr_remove - IOA hot plug remove entry point
7412 * @pdev:	pci device struct
7413 *
7414 * Adapter hot plug remove entry point.
7415 *
7416 * Return value:
7417 * 	none
7418 **/
7419static void ipr_remove(struct pci_dev *pdev)
7420{
7421	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7422
7423	ENTER;
7424
7425	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7426			      &ipr_trace_attr);
7427	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7428			     &ipr_dump_attr);
7429	scsi_remove_host(ioa_cfg->host);
7430
7431	__ipr_remove(pdev);
7432
7433	LEAVE;
7434}
7435
7436/**
7437 * ipr_probe - Adapter hot plug add entry point
7438 *
7439 * Return value:
7440 * 	0 on success / non-zero on failure
7441 **/
7442static int __devinit ipr_probe(struct pci_dev *pdev,
7443			       const struct pci_device_id *dev_id)
7444{
7445	struct ipr_ioa_cfg *ioa_cfg;
7446	int rc;
7447
7448	rc = ipr_probe_ioa(pdev, dev_id);
7449
7450	if (rc)
7451		return rc;
7452
7453	ioa_cfg = pci_get_drvdata(pdev);
7454	rc = ipr_probe_ioa_part2(ioa_cfg);
7455
7456	if (rc) {
7457		__ipr_remove(pdev);
7458		return rc;
7459	}
7460
7461	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7462
7463	if (rc) {
7464		__ipr_remove(pdev);
7465		return rc;
7466	}
7467
7468	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7469				   &ipr_trace_attr);
7470
7471	if (rc) {
7472		scsi_remove_host(ioa_cfg->host);
7473		__ipr_remove(pdev);
7474		return rc;
7475	}
7476
7477	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7478				   &ipr_dump_attr);
7479
7480	if (rc) {
7481		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7482				      &ipr_trace_attr);
7483		scsi_remove_host(ioa_cfg->host);
7484		__ipr_remove(pdev);
7485		return rc;
7486	}
7487
7488	scsi_scan_host(ioa_cfg->host);
7489	ipr_scan_vsets(ioa_cfg);
7490	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7491	ioa_cfg->allow_ml_add_del = 1;
7492	ioa_cfg->host->max_channel = IPR_VSET_BUS;
7493	schedule_work(&ioa_cfg->work_q);
7494	return 0;
7495}
7496
7497/**
7498 * ipr_shutdown - Shutdown handler.
7499 * @pdev:	pci device struct
7500 *
7501 * This function is invoked upon system shutdown/reboot. It will issue
7502 * an adapter shutdown to the adapter to flush the write cache.
7503 *
7504 * Return value:
7505 * 	none
7506 **/
7507static void ipr_shutdown(struct pci_dev *pdev)
7508{
7509	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7510	unsigned long lock_flags = 0;
7511
7512	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7513	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7514	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7515	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7516}
7517
7518static struct pci_device_id ipr_pci_table[] __devinitdata = {
7519	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7520		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
7521		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7522	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7523		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
7524	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7525	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7526		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
7527	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7528	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7529		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
7530	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7531	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7532		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
7533	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7534	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7535		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
7536	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7537	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7538		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
7539	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7540	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7541		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
7542		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7543	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7544	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7545	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7546	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7547	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7548	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7549	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7550	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7551	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7552	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7553	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7554	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7555	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7556	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7557	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7558	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7559	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7560	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7561	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7562	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
7563	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7564	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7565	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
7566	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7567	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7568		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
7569		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7570	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7571		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
7572		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7573	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7574		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
7575		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7576	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7577		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
7578		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7579	{ }
7580};
7581MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7582
7583static struct pci_error_handlers ipr_err_handler = {
7584	.error_detected = ipr_pci_error_detected,
7585	.slot_reset = ipr_pci_slot_reset,
7586};
7587
7588static struct pci_driver ipr_driver = {
7589	.name = IPR_NAME,
7590	.id_table = ipr_pci_table,
7591	.probe = ipr_probe,
7592	.remove = ipr_remove,
7593	.shutdown = ipr_shutdown,
7594	.err_handler = &ipr_err_handler,
7595};
7596
7597/**
7598 * ipr_init - Module entry point
7599 *
7600 * Return value:
7601 * 	0 on success / negative value on failure
7602 **/
7603static int __init ipr_init(void)
7604{
7605	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7606		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7607
7608	return pci_register_driver(&ipr_driver);
7609}
7610
7611/**
7612 * ipr_exit - Module unload
7613 *
7614 * Module unload entry point.
7615 *
7616 * Return value:
7617 * 	none
7618 **/
7619static void __exit ipr_exit(void)
7620{
7621	pci_unregister_driver(&ipr_driver);
7622}
7623
7624module_init(ipr_init);
7625module_exit(ipr_exit);
7626