ipr.c revision fb3ed3cb4b8ba84e5b0899ef752495f213973843
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 *   Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0;
96static int ipr_auto_create = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock);
98
99/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101	{ /* Gemstone, Citrine, and Obsidian */
102		.mailbox = 0x0042C,
103		.cache_line_size = 0x20,
104		{
105			.set_interrupt_mask_reg = 0x0022C,
106			.clr_interrupt_mask_reg = 0x00230,
107			.sense_interrupt_mask_reg = 0x0022C,
108			.clr_interrupt_reg = 0x00228,
109			.sense_interrupt_reg = 0x00224,
110			.ioarrin_reg = 0x00404,
111			.sense_uproc_interrupt_reg = 0x00214,
112			.set_uproc_interrupt_reg = 0x00214,
113			.clr_uproc_interrupt_reg = 0x00218
114		}
115	},
116	{ /* Snipe and Scamp */
117		.mailbox = 0x0052C,
118		.cache_line_size = 0x20,
119		{
120			.set_interrupt_mask_reg = 0x00288,
121			.clr_interrupt_mask_reg = 0x0028C,
122			.sense_interrupt_mask_reg = 0x00288,
123			.clr_interrupt_reg = 0x00284,
124			.sense_interrupt_reg = 0x00280,
125			.ioarrin_reg = 0x00504,
126			.sense_uproc_interrupt_reg = 0x00290,
127			.set_uproc_interrupt_reg = 0x00290,
128			.clr_uproc_interrupt_reg = 0x00294
129		}
130	},
131};
132
133static const struct ipr_chip_t ipr_chip[] = {
134	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
138	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140};
141
142static int ipr_max_bus_speeds [] = {
143	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144};
145
146MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148module_param_named(max_speed, ipr_max_speed, uint, 0);
149MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150module_param_named(log_level, ipr_log_level, uint, 0);
151MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152module_param_named(testmode, ipr_testmode, int, 0);
153MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154module_param_named(fastfail, ipr_fastfail, int, 0);
155MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162module_param_named(auto_create, ipr_auto_create, int, 0);
163MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION);
166
167/*  A constant array of IOASCs/URCs/Error Messages */
168static const
169struct ipr_error_table_t ipr_error_table[] = {
170	{0x00000000, 1, 1,
171	"8155: An unknown error was received"},
172	{0x00330000, 0, 0,
173	"Soft underlength error"},
174	{0x005A0000, 0, 0,
175	"Command to be cancelled not found"},
176	{0x00808000, 0, 0,
177	"Qualified success"},
178	{0x01080000, 1, 1,
179	"FFFE: Soft device bus error recovered by the IOA"},
180	{0x01170600, 0, 1,
181	"FFF9: Device sector reassign successful"},
182	{0x01170900, 0, 1,
183	"FFF7: Media error recovered by device rewrite procedures"},
184	{0x01180200, 0, 1,
185	"7001: IOA sector reassignment successful"},
186	{0x01180500, 0, 1,
187	"FFF9: Soft media error. Sector reassignment recommended"},
188	{0x01180600, 0, 1,
189	"FFF7: Media error recovered by IOA rewrite procedures"},
190	{0x01418000, 0, 1,
191	"FF3D: Soft PCI bus error recovered by the IOA"},
192	{0x01440000, 1, 1,
193	"FFF6: Device hardware error recovered by the IOA"},
194	{0x01448100, 0, 1,
195	"FFF6: Device hardware error recovered by the device"},
196	{0x01448200, 1, 1,
197	"FF3D: Soft IOA error recovered by the IOA"},
198	{0x01448300, 0, 1,
199	"FFFA: Undefined device response recovered by the IOA"},
200	{0x014A0000, 1, 1,
201	"FFF6: Device bus error, message or command phase"},
202	{0x015D0000, 0, 1,
203	"FFF6: Failure prediction threshold exceeded"},
204	{0x015D9200, 0, 1,
205	"8009: Impending cache battery pack failure"},
206	{0x02040400, 0, 0,
207	"34FF: Disk device format in progress"},
208	{0x023F0000, 0, 0,
209	"Synchronization required"},
210	{0x024E0000, 0, 0,
211	"No ready, IOA shutdown"},
212	{0x025A0000, 0, 0,
213	"Not ready, IOA has been shutdown"},
214	{0x02670100, 0, 1,
215	"3020: Storage subsystem configuration error"},
216	{0x03110B00, 0, 0,
217	"FFF5: Medium error, data unreadable, recommend reassign"},
218	{0x03110C00, 0, 0,
219	"7000: Medium error, data unreadable, do not reassign"},
220	{0x03310000, 0, 1,
221	"FFF3: Disk media format bad"},
222	{0x04050000, 0, 1,
223	"3002: Addressed device failed to respond to selection"},
224	{0x04080000, 1, 1,
225	"3100: Device bus error"},
226	{0x04080100, 0, 1,
227	"3109: IOA timed out a device command"},
228	{0x04088000, 0, 0,
229	"3120: SCSI bus is not operational"},
230	{0x04118000, 0, 1,
231	"9000: IOA reserved area data check"},
232	{0x04118100, 0, 1,
233	"9001: IOA reserved area invalid data pattern"},
234	{0x04118200, 0, 1,
235	"9002: IOA reserved area LRC error"},
236	{0x04320000, 0, 1,
237	"102E: Out of alternate sectors for disk storage"},
238	{0x04330000, 1, 1,
239	"FFF4: Data transfer underlength error"},
240	{0x04338000, 1, 1,
241	"FFF4: Data transfer overlength error"},
242	{0x043E0100, 0, 1,
243	"3400: Logical unit failure"},
244	{0x04408500, 0, 1,
245	"FFF4: Device microcode is corrupt"},
246	{0x04418000, 1, 1,
247	"8150: PCI bus error"},
248	{0x04430000, 1, 0,
249	"Unsupported device bus message received"},
250	{0x04440000, 1, 1,
251	"FFF4: Disk device problem"},
252	{0x04448200, 1, 1,
253	"8150: Permanent IOA failure"},
254	{0x04448300, 0, 1,
255	"3010: Disk device returned wrong response to IOA"},
256	{0x04448400, 0, 1,
257	"8151: IOA microcode error"},
258	{0x04448500, 0, 0,
259	"Device bus status error"},
260	{0x04448600, 0, 1,
261	"8157: IOA error requiring IOA reset to recover"},
262	{0x04490000, 0, 0,
263	"Message reject received from the device"},
264	{0x04449200, 0, 1,
265	"8008: A permanent cache battery pack failure occurred"},
266	{0x0444A000, 0, 1,
267	"9090: Disk unit has been modified after the last known status"},
268	{0x0444A200, 0, 1,
269	"9081: IOA detected device error"},
270	{0x0444A300, 0, 1,
271	"9082: IOA detected device error"},
272	{0x044A0000, 1, 1,
273	"3110: Device bus error, message or command phase"},
274	{0x04670400, 0, 1,
275	"9091: Incorrect hardware configuration change has been detected"},
276	{0x04678000, 0, 1,
277	"9073: Invalid multi-adapter configuration"},
278	{0x046E0000, 0, 1,
279	"FFF4: Command to logical unit failed"},
280	{0x05240000, 1, 0,
281	"Illegal request, invalid request type or request packet"},
282	{0x05250000, 0, 0,
283	"Illegal request, invalid resource handle"},
284	{0x05258000, 0, 0,
285	"Illegal request, commands not allowed to this device"},
286	{0x05258100, 0, 0,
287	"Illegal request, command not allowed to a secondary adapter"},
288	{0x05260000, 0, 0,
289	"Illegal request, invalid field in parameter list"},
290	{0x05260100, 0, 0,
291	"Illegal request, parameter not supported"},
292	{0x05260200, 0, 0,
293	"Illegal request, parameter value invalid"},
294	{0x052C0000, 0, 0,
295	"Illegal request, command sequence error"},
296	{0x052C8000, 1, 0,
297	"Illegal request, dual adapter support not enabled"},
298	{0x06040500, 0, 1,
299	"9031: Array protection temporarily suspended, protection resuming"},
300	{0x06040600, 0, 1,
301	"9040: Array protection temporarily suspended, protection resuming"},
302	{0x06290000, 0, 1,
303	"FFFB: SCSI bus was reset"},
304	{0x06290500, 0, 0,
305	"FFFE: SCSI bus transition to single ended"},
306	{0x06290600, 0, 0,
307	"FFFE: SCSI bus transition to LVD"},
308	{0x06298000, 0, 1,
309	"FFFB: SCSI bus was reset by another initiator"},
310	{0x063F0300, 0, 1,
311	"3029: A device replacement has occurred"},
312	{0x064C8000, 0, 1,
313	"9051: IOA cache data exists for a missing or failed device"},
314	{0x064C8100, 0, 1,
315	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
316	{0x06670100, 0, 1,
317	"9025: Disk unit is not supported at its physical location"},
318	{0x06670600, 0, 1,
319	"3020: IOA detected a SCSI bus configuration error"},
320	{0x06678000, 0, 1,
321	"3150: SCSI bus configuration error"},
322	{0x06678100, 0, 1,
323	"9074: Asymmetric advanced function disk configuration"},
324	{0x06690200, 0, 1,
325	"9041: Array protection temporarily suspended"},
326	{0x06698200, 0, 1,
327	"9042: Corrupt array parity detected on specified device"},
328	{0x066B0200, 0, 1,
329	"9030: Array no longer protected due to missing or failed disk unit"},
330	{0x066B8000, 0, 1,
331	"9071: Link operational transition"},
332	{0x066B8100, 0, 1,
333	"9072: Link not operational transition"},
334	{0x066B8200, 0, 1,
335	"9032: Array exposed but still protected"},
336	{0x07270000, 0, 0,
337	"Failure due to other device"},
338	{0x07278000, 0, 1,
339	"9008: IOA does not support functions expected by devices"},
340	{0x07278100, 0, 1,
341	"9010: Cache data associated with attached devices cannot be found"},
342	{0x07278200, 0, 1,
343	"9011: Cache data belongs to devices other than those attached"},
344	{0x07278400, 0, 1,
345	"9020: Array missing 2 or more devices with only 1 device present"},
346	{0x07278500, 0, 1,
347	"9021: Array missing 2 or more devices with 2 or more devices present"},
348	{0x07278600, 0, 1,
349	"9022: Exposed array is missing a required device"},
350	{0x07278700, 0, 1,
351	"9023: Array member(s) not at required physical locations"},
352	{0x07278800, 0, 1,
353	"9024: Array not functional due to present hardware configuration"},
354	{0x07278900, 0, 1,
355	"9026: Array not functional due to present hardware configuration"},
356	{0x07278A00, 0, 1,
357	"9027: Array is missing a device and parity is out of sync"},
358	{0x07278B00, 0, 1,
359	"9028: Maximum number of arrays already exist"},
360	{0x07278C00, 0, 1,
361	"9050: Required cache data cannot be located for a disk unit"},
362	{0x07278D00, 0, 1,
363	"9052: Cache data exists for a device that has been modified"},
364	{0x07278F00, 0, 1,
365	"9054: IOA resources not available due to previous problems"},
366	{0x07279100, 0, 1,
367	"9092: Disk unit requires initialization before use"},
368	{0x07279200, 0, 1,
369	"9029: Incorrect hardware configuration change has been detected"},
370	{0x07279600, 0, 1,
371	"9060: One or more disk pairs are missing from an array"},
372	{0x07279700, 0, 1,
373	"9061: One or more disks are missing from an array"},
374	{0x07279800, 0, 1,
375	"9062: One or more disks are missing from an array"},
376	{0x07279900, 0, 1,
377	"9063: Maximum number of functional arrays has been exceeded"},
378	{0x0B260000, 0, 0,
379	"Aborted command, invalid descriptor"},
380	{0x0B5A0000, 0, 0,
381	"Command terminated by host"}
382};
383
384static const struct ipr_ses_table_entry ipr_ses_table[] = {
385	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
386	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
387	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
388	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
389	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
390	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
391	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
392	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
393	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
395	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
396	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
397	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
398};
399
400/*
401 *  Function Prototypes
402 */
403static int ipr_reset_alert(struct ipr_cmnd *);
404static void ipr_process_ccn(struct ipr_cmnd *);
405static void ipr_process_error(struct ipr_cmnd *);
406static void ipr_reset_ioa_job(struct ipr_cmnd *);
407static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
408				   enum ipr_shutdown_type);
409
410#ifdef CONFIG_SCSI_IPR_TRACE
411/**
412 * ipr_trc_hook - Add a trace entry to the driver trace
413 * @ipr_cmd:	ipr command struct
414 * @type:		trace type
415 * @add_data:	additional data
416 *
417 * Return value:
418 * 	none
419 **/
420static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
421			 u8 type, u32 add_data)
422{
423	struct ipr_trace_entry *trace_entry;
424	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
425
426	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
427	trace_entry->time = jiffies;
428	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
429	trace_entry->type = type;
430	trace_entry->cmd_index = ipr_cmd->cmd_index;
431	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
432	trace_entry->u.add_data = add_data;
433}
434#else
435#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
436#endif
437
438/**
439 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
440 * @ipr_cmd:	ipr command struct
441 *
442 * Return value:
443 * 	none
444 **/
445static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
446{
447	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
448	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
449
450	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
451	ioarcb->write_data_transfer_length = 0;
452	ioarcb->read_data_transfer_length = 0;
453	ioarcb->write_ioadl_len = 0;
454	ioarcb->read_ioadl_len = 0;
455	ioasa->ioasc = 0;
456	ioasa->residual_data_len = 0;
457
458	ipr_cmd->scsi_cmd = NULL;
459	ipr_cmd->sense_buffer[0] = 0;
460	ipr_cmd->dma_use_sg = 0;
461}
462
463/**
464 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
465 * @ipr_cmd:	ipr command struct
466 *
467 * Return value:
468 * 	none
469 **/
470static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
471{
472	ipr_reinit_ipr_cmnd(ipr_cmd);
473	ipr_cmd->u.scratch = 0;
474	ipr_cmd->sibling = NULL;
475	init_timer(&ipr_cmd->timer);
476}
477
478/**
479 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
480 * @ioa_cfg:	ioa config struct
481 *
482 * Return value:
483 * 	pointer to ipr command struct
484 **/
485static
486struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
487{
488	struct ipr_cmnd *ipr_cmd;
489
490	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
491	list_del(&ipr_cmd->queue);
492	ipr_init_ipr_cmnd(ipr_cmd);
493
494	return ipr_cmd;
495}
496
497/**
498 * ipr_unmap_sglist - Unmap scatterlist if mapped
499 * @ioa_cfg:	ioa config struct
500 * @ipr_cmd:	ipr command struct
501 *
502 * Return value:
503 * 	nothing
504 **/
505static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
506			     struct ipr_cmnd *ipr_cmd)
507{
508	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
509
510	if (ipr_cmd->dma_use_sg) {
511		if (scsi_cmd->use_sg > 0) {
512			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
513				     scsi_cmd->use_sg,
514				     scsi_cmd->sc_data_direction);
515		} else {
516			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
517					 scsi_cmd->request_bufflen,
518					 scsi_cmd->sc_data_direction);
519		}
520	}
521}
522
523/**
524 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
525 * @ioa_cfg:	ioa config struct
526 * @clr_ints:     interrupts to clear
527 *
528 * This function masks all interrupts on the adapter, then clears the
529 * interrupts specified in the mask
530 *
531 * Return value:
532 * 	none
533 **/
534static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
535					  u32 clr_ints)
536{
537	volatile u32 int_reg;
538
539	/* Stop new interrupts */
540	ioa_cfg->allow_interrupts = 0;
541
542	/* Set interrupt mask to stop all new interrupts */
543	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
544
545	/* Clear any pending interrupts */
546	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
547	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
548}
549
550/**
551 * ipr_save_pcix_cmd_reg - Save PCI-X command register
552 * @ioa_cfg:	ioa config struct
553 *
554 * Return value:
555 * 	0 on success / -EIO on failure
556 **/
557static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
558{
559	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
560
561	if (pcix_cmd_reg == 0) {
562		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
563		return -EIO;
564	}
565
566	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
567				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
568		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
569		return -EIO;
570	}
571
572	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
573	return 0;
574}
575
576/**
577 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
578 * @ioa_cfg:	ioa config struct
579 *
580 * Return value:
581 * 	0 on success / -EIO on failure
582 **/
583static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
584{
585	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
586
587	if (pcix_cmd_reg) {
588		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
589					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
590			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
591			return -EIO;
592		}
593	} else {
594		dev_err(&ioa_cfg->pdev->dev,
595			"Failed to setup PCI-X command register\n");
596		return -EIO;
597	}
598
599	return 0;
600}
601
602/**
603 * ipr_scsi_eh_done - mid-layer done function for aborted ops
604 * @ipr_cmd:	ipr command struct
605 *
606 * This function is invoked by the interrupt handler for
607 * ops generated by the SCSI mid-layer which are being aborted.
608 *
609 * Return value:
610 * 	none
611 **/
612static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
613{
614	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
615	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
616
617	scsi_cmd->result |= (DID_ERROR << 16);
618
619	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
620	scsi_cmd->scsi_done(scsi_cmd);
621	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
622}
623
624/**
625 * ipr_fail_all_ops - Fails all outstanding ops.
626 * @ioa_cfg:	ioa config struct
627 *
628 * This function fails all outstanding ops.
629 *
630 * Return value:
631 * 	none
632 **/
633static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
634{
635	struct ipr_cmnd *ipr_cmd, *temp;
636
637	ENTER;
638	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
639		list_del(&ipr_cmd->queue);
640
641		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
642		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
643
644		if (ipr_cmd->scsi_cmd)
645			ipr_cmd->done = ipr_scsi_eh_done;
646
647		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
648		del_timer(&ipr_cmd->timer);
649		ipr_cmd->done(ipr_cmd);
650	}
651
652	LEAVE;
653}
654
655/**
656 * ipr_do_req -  Send driver initiated requests.
657 * @ipr_cmd:		ipr command struct
658 * @done:			done function
659 * @timeout_func:	timeout function
660 * @timeout:		timeout value
661 *
662 * This function sends the specified command to the adapter with the
663 * timeout given. The done function is invoked on command completion.
664 *
665 * Return value:
666 * 	none
667 **/
668static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
669		       void (*done) (struct ipr_cmnd *),
670		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
671{
672	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
673
674	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
675
676	ipr_cmd->done = done;
677
678	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
679	ipr_cmd->timer.expires = jiffies + timeout;
680	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
681
682	add_timer(&ipr_cmd->timer);
683
684	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
685
686	mb();
687	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
688	       ioa_cfg->regs.ioarrin_reg);
689}
690
691/**
692 * ipr_internal_cmd_done - Op done function for an internally generated op.
693 * @ipr_cmd:	ipr command struct
694 *
695 * This function is the op done function for an internally generated,
696 * blocking op. It simply wakes the sleeping thread.
697 *
698 * Return value:
699 * 	none
700 **/
701static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
702{
703	if (ipr_cmd->sibling)
704		ipr_cmd->sibling = NULL;
705	else
706		complete(&ipr_cmd->completion);
707}
708
709/**
710 * ipr_send_blocking_cmd - Send command and sleep on its completion.
711 * @ipr_cmd:	ipr command struct
712 * @timeout_func:	function to invoke if command times out
713 * @timeout:	timeout
714 *
715 * Return value:
716 * 	none
717 **/
718static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
719				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
720				  u32 timeout)
721{
722	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
723
724	init_completion(&ipr_cmd->completion);
725	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
726
727	spin_unlock_irq(ioa_cfg->host->host_lock);
728	wait_for_completion(&ipr_cmd->completion);
729	spin_lock_irq(ioa_cfg->host->host_lock);
730}
731
732/**
733 * ipr_send_hcam - Send an HCAM to the adapter.
734 * @ioa_cfg:	ioa config struct
735 * @type:		HCAM type
736 * @hostrcb:	hostrcb struct
737 *
738 * This function will send a Host Controlled Async command to the adapter.
739 * If HCAMs are currently not allowed to be issued to the adapter, it will
740 * place the hostrcb on the free queue.
741 *
742 * Return value:
743 * 	none
744 **/
745static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
746			  struct ipr_hostrcb *hostrcb)
747{
748	struct ipr_cmnd *ipr_cmd;
749	struct ipr_ioarcb *ioarcb;
750
751	if (ioa_cfg->allow_cmds) {
752		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
753		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
754		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
755
756		ipr_cmd->u.hostrcb = hostrcb;
757		ioarcb = &ipr_cmd->ioarcb;
758
759		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
760		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
761		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
762		ioarcb->cmd_pkt.cdb[1] = type;
763		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
764		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
765
766		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
767		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
768		ipr_cmd->ioadl[0].flags_and_data_len =
769			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
770		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
771
772		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
773			ipr_cmd->done = ipr_process_ccn;
774		else
775			ipr_cmd->done = ipr_process_error;
776
777		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
778
779		mb();
780		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
781		       ioa_cfg->regs.ioarrin_reg);
782	} else {
783		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
784	}
785}
786
787/**
788 * ipr_init_res_entry - Initialize a resource entry struct.
789 * @res:	resource entry struct
790 *
791 * Return value:
792 * 	none
793 **/
794static void ipr_init_res_entry(struct ipr_resource_entry *res)
795{
796	res->needs_sync_complete = 0;
797	res->in_erp = 0;
798	res->add_to_ml = 0;
799	res->del_from_ml = 0;
800	res->resetting_device = 0;
801	res->sdev = NULL;
802}
803
804/**
805 * ipr_handle_config_change - Handle a config change from the adapter
806 * @ioa_cfg:	ioa config struct
807 * @hostrcb:	hostrcb
808 *
809 * Return value:
810 * 	none
811 **/
812static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
813			      struct ipr_hostrcb *hostrcb)
814{
815	struct ipr_resource_entry *res = NULL;
816	struct ipr_config_table_entry *cfgte;
817	u32 is_ndn = 1;
818
819	cfgte = &hostrcb->hcam.u.ccn.cfgte;
820
821	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
822		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
823			    sizeof(cfgte->res_addr))) {
824			is_ndn = 0;
825			break;
826		}
827	}
828
829	if (is_ndn) {
830		if (list_empty(&ioa_cfg->free_res_q)) {
831			ipr_send_hcam(ioa_cfg,
832				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
833				      hostrcb);
834			return;
835		}
836
837		res = list_entry(ioa_cfg->free_res_q.next,
838				 struct ipr_resource_entry, queue);
839
840		list_del(&res->queue);
841		ipr_init_res_entry(res);
842		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
843	}
844
845	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
846
847	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
848		if (res->sdev) {
849			res->del_from_ml = 1;
850			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
851			if (ioa_cfg->allow_ml_add_del)
852				schedule_work(&ioa_cfg->work_q);
853		} else
854			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
855	} else if (!res->sdev) {
856		res->add_to_ml = 1;
857		if (ioa_cfg->allow_ml_add_del)
858			schedule_work(&ioa_cfg->work_q);
859	}
860
861	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
862}
863
864/**
865 * ipr_process_ccn - Op done function for a CCN.
866 * @ipr_cmd:	ipr command struct
867 *
868 * This function is the op done function for a configuration
869 * change notification host controlled async from the adapter.
870 *
871 * Return value:
872 * 	none
873 **/
874static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
875{
876	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
877	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
878	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
879
880	list_del(&hostrcb->queue);
881	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
882
883	if (ioasc) {
884		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
885			dev_err(&ioa_cfg->pdev->dev,
886				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
887
888		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
889	} else {
890		ipr_handle_config_change(ioa_cfg, hostrcb);
891	}
892}
893
894/**
895 * ipr_log_vpd - Log the passed VPD to the error log.
896 * @vpd:		vendor/product id/sn struct
897 *
898 * Return value:
899 * 	none
900 **/
901static void ipr_log_vpd(struct ipr_vpd *vpd)
902{
903	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
904		    + IPR_SERIAL_NUM_LEN];
905
906	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
907	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
908	       IPR_PROD_ID_LEN);
909	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
910	ipr_err("Vendor/Product ID: %s\n", buffer);
911
912	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
913	buffer[IPR_SERIAL_NUM_LEN] = '\0';
914	ipr_err("    Serial Number: %s\n", buffer);
915}
916
917/**
918 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
919 * @vpd:		vendor/product id/sn/wwn struct
920 *
921 * Return value:
922 * 	none
923 **/
924static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
925{
926	ipr_log_vpd(&vpd->vpd);
927	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
928		be32_to_cpu(vpd->wwid[1]));
929}
930
931/**
932 * ipr_log_enhanced_cache_error - Log a cache error.
933 * @ioa_cfg:	ioa config struct
934 * @hostrcb:	hostrcb struct
935 *
936 * Return value:
937 * 	none
938 **/
939static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
940					 struct ipr_hostrcb *hostrcb)
941{
942	struct ipr_hostrcb_type_12_error *error =
943		&hostrcb->hcam.u.error.u.type_12_error;
944
945	ipr_err("-----Current Configuration-----\n");
946	ipr_err("Cache Directory Card Information:\n");
947	ipr_log_ext_vpd(&error->ioa_vpd);
948	ipr_err("Adapter Card Information:\n");
949	ipr_log_ext_vpd(&error->cfc_vpd);
950
951	ipr_err("-----Expected Configuration-----\n");
952	ipr_err("Cache Directory Card Information:\n");
953	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
954	ipr_err("Adapter Card Information:\n");
955	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
956
957	ipr_err("Additional IOA Data: %08X %08X %08X\n",
958		     be32_to_cpu(error->ioa_data[0]),
959		     be32_to_cpu(error->ioa_data[1]),
960		     be32_to_cpu(error->ioa_data[2]));
961}
962
963/**
964 * ipr_log_cache_error - Log a cache error.
965 * @ioa_cfg:	ioa config struct
966 * @hostrcb:	hostrcb struct
967 *
968 * Return value:
969 * 	none
970 **/
971static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
972				struct ipr_hostrcb *hostrcb)
973{
974	struct ipr_hostrcb_type_02_error *error =
975		&hostrcb->hcam.u.error.u.type_02_error;
976
977	ipr_err("-----Current Configuration-----\n");
978	ipr_err("Cache Directory Card Information:\n");
979	ipr_log_vpd(&error->ioa_vpd);
980	ipr_err("Adapter Card Information:\n");
981	ipr_log_vpd(&error->cfc_vpd);
982
983	ipr_err("-----Expected Configuration-----\n");
984	ipr_err("Cache Directory Card Information:\n");
985	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
986	ipr_err("Adapter Card Information:\n");
987	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
988
989	ipr_err("Additional IOA Data: %08X %08X %08X\n",
990		     be32_to_cpu(error->ioa_data[0]),
991		     be32_to_cpu(error->ioa_data[1]),
992		     be32_to_cpu(error->ioa_data[2]));
993}
994
995/**
996 * ipr_log_enhanced_config_error - Log a configuration error.
997 * @ioa_cfg:	ioa config struct
998 * @hostrcb:	hostrcb struct
999 *
1000 * Return value:
1001 * 	none
1002 **/
1003static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1004					  struct ipr_hostrcb *hostrcb)
1005{
1006	int errors_logged, i;
1007	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1008	struct ipr_hostrcb_type_13_error *error;
1009
1010	error = &hostrcb->hcam.u.error.u.type_13_error;
1011	errors_logged = be32_to_cpu(error->errors_logged);
1012
1013	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1014		be32_to_cpu(error->errors_detected), errors_logged);
1015
1016	dev_entry = error->dev;
1017
1018	for (i = 0; i < errors_logged; i++, dev_entry++) {
1019		ipr_err_separator;
1020
1021		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1022		ipr_log_ext_vpd(&dev_entry->vpd);
1023
1024		ipr_err("-----New Device Information-----\n");
1025		ipr_log_ext_vpd(&dev_entry->new_vpd);
1026
1027		ipr_err("Cache Directory Card Information:\n");
1028		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1029
1030		ipr_err("Adapter Card Information:\n");
1031		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1032	}
1033}
1034
1035/**
1036 * ipr_log_config_error - Log a configuration error.
1037 * @ioa_cfg:	ioa config struct
1038 * @hostrcb:	hostrcb struct
1039 *
1040 * Return value:
1041 * 	none
1042 **/
1043static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1044				 struct ipr_hostrcb *hostrcb)
1045{
1046	int errors_logged, i;
1047	struct ipr_hostrcb_device_data_entry *dev_entry;
1048	struct ipr_hostrcb_type_03_error *error;
1049
1050	error = &hostrcb->hcam.u.error.u.type_03_error;
1051	errors_logged = be32_to_cpu(error->errors_logged);
1052
1053	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1054		be32_to_cpu(error->errors_detected), errors_logged);
1055
1056	dev_entry = error->dev;
1057
1058	for (i = 0; i < errors_logged; i++, dev_entry++) {
1059		ipr_err_separator;
1060
1061		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1062		ipr_log_vpd(&dev_entry->vpd);
1063
1064		ipr_err("-----New Device Information-----\n");
1065		ipr_log_vpd(&dev_entry->new_vpd);
1066
1067		ipr_err("Cache Directory Card Information:\n");
1068		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1069
1070		ipr_err("Adapter Card Information:\n");
1071		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1072
1073		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1074			be32_to_cpu(dev_entry->ioa_data[0]),
1075			be32_to_cpu(dev_entry->ioa_data[1]),
1076			be32_to_cpu(dev_entry->ioa_data[2]),
1077			be32_to_cpu(dev_entry->ioa_data[3]),
1078			be32_to_cpu(dev_entry->ioa_data[4]));
1079	}
1080}
1081
1082/**
1083 * ipr_log_enhanced_array_error - Log an array configuration error.
1084 * @ioa_cfg:	ioa config struct
1085 * @hostrcb:	hostrcb struct
1086 *
1087 * Return value:
1088 * 	none
1089 **/
1090static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1091					 struct ipr_hostrcb *hostrcb)
1092{
1093	int i, num_entries;
1094	struct ipr_hostrcb_type_14_error *error;
1095	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1096	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1097
1098	error = &hostrcb->hcam.u.error.u.type_14_error;
1099
1100	ipr_err_separator;
1101
1102	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1103		error->protection_level,
1104		ioa_cfg->host->host_no,
1105		error->last_func_vset_res_addr.bus,
1106		error->last_func_vset_res_addr.target,
1107		error->last_func_vset_res_addr.lun);
1108
1109	ipr_err_separator;
1110
1111	array_entry = error->array_member;
1112	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1113			    sizeof(error->array_member));
1114
1115	for (i = 0; i < num_entries; i++, array_entry++) {
1116		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1117			continue;
1118
1119		if (be32_to_cpu(error->exposed_mode_adn) == i)
1120			ipr_err("Exposed Array Member %d:\n", i);
1121		else
1122			ipr_err("Array Member %d:\n", i);
1123
1124		ipr_log_ext_vpd(&array_entry->vpd);
1125		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1126		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1127				 "Expected Location");
1128
1129		ipr_err_separator;
1130	}
1131}
1132
1133/**
1134 * ipr_log_array_error - Log an array configuration error.
1135 * @ioa_cfg:	ioa config struct
1136 * @hostrcb:	hostrcb struct
1137 *
1138 * Return value:
1139 * 	none
1140 **/
1141static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1142				struct ipr_hostrcb *hostrcb)
1143{
1144	int i;
1145	struct ipr_hostrcb_type_04_error *error;
1146	struct ipr_hostrcb_array_data_entry *array_entry;
1147	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1148
1149	error = &hostrcb->hcam.u.error.u.type_04_error;
1150
1151	ipr_err_separator;
1152
1153	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1154		error->protection_level,
1155		ioa_cfg->host->host_no,
1156		error->last_func_vset_res_addr.bus,
1157		error->last_func_vset_res_addr.target,
1158		error->last_func_vset_res_addr.lun);
1159
1160	ipr_err_separator;
1161
1162	array_entry = error->array_member;
1163
1164	for (i = 0; i < 18; i++) {
1165		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1166			continue;
1167
1168		if (be32_to_cpu(error->exposed_mode_adn) == i)
1169			ipr_err("Exposed Array Member %d:\n", i);
1170		else
1171			ipr_err("Array Member %d:\n", i);
1172
1173		ipr_log_vpd(&array_entry->vpd);
1174
1175		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1176		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1177				 "Expected Location");
1178
1179		ipr_err_separator;
1180
1181		if (i == 9)
1182			array_entry = error->array_member2;
1183		else
1184			array_entry++;
1185	}
1186}
1187
1188/**
1189 * ipr_log_hex_data - Log additional hex IOA error data.
1190 * @data:		IOA error data
1191 * @len:		data length
1192 *
1193 * Return value:
1194 * 	none
1195 **/
1196static void ipr_log_hex_data(u32 *data, int len)
1197{
1198	int i;
1199
1200	if (len == 0)
1201		return;
1202
1203	for (i = 0; i < len / 4; i += 4) {
1204		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1205			be32_to_cpu(data[i]),
1206			be32_to_cpu(data[i+1]),
1207			be32_to_cpu(data[i+2]),
1208			be32_to_cpu(data[i+3]));
1209	}
1210}
1211
1212/**
1213 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1214 * @ioa_cfg:	ioa config struct
1215 * @hostrcb:	hostrcb struct
1216 *
1217 * Return value:
1218 * 	none
1219 **/
1220static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1221					    struct ipr_hostrcb *hostrcb)
1222{
1223	struct ipr_hostrcb_type_17_error *error;
1224
1225	error = &hostrcb->hcam.u.error.u.type_17_error;
1226	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1227
1228	ipr_err("%s\n", error->failure_reason);
1229	ipr_err("Remote Adapter VPD:\n");
1230	ipr_log_ext_vpd(&error->vpd);
1231	ipr_log_hex_data(error->data,
1232			 be32_to_cpu(hostrcb->hcam.length) -
1233			 (offsetof(struct ipr_hostrcb_error, u) +
1234			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1235}
1236
1237/**
1238 * ipr_log_dual_ioa_error - Log a dual adapter error.
1239 * @ioa_cfg:	ioa config struct
1240 * @hostrcb:	hostrcb struct
1241 *
1242 * Return value:
1243 * 	none
1244 **/
1245static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1246				   struct ipr_hostrcb *hostrcb)
1247{
1248	struct ipr_hostrcb_type_07_error *error;
1249
1250	error = &hostrcb->hcam.u.error.u.type_07_error;
1251	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1252
1253	ipr_err("%s\n", error->failure_reason);
1254	ipr_err("Remote Adapter VPD:\n");
1255	ipr_log_vpd(&error->vpd);
1256	ipr_log_hex_data(error->data,
1257			 be32_to_cpu(hostrcb->hcam.length) -
1258			 (offsetof(struct ipr_hostrcb_error, u) +
1259			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1260}
1261
1262/**
1263 * ipr_log_generic_error - Log an adapter error.
1264 * @ioa_cfg:	ioa config struct
1265 * @hostrcb:	hostrcb struct
1266 *
1267 * Return value:
1268 * 	none
1269 **/
1270static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1271				  struct ipr_hostrcb *hostrcb)
1272{
1273	ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1274			 be32_to_cpu(hostrcb->hcam.length));
1275}
1276
1277/**
1278 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1279 * @ioasc:	IOASC
1280 *
1281 * This function will return the index of into the ipr_error_table
1282 * for the specified IOASC. If the IOASC is not in the table,
1283 * 0 will be returned, which points to the entry used for unknown errors.
1284 *
1285 * Return value:
1286 * 	index into the ipr_error_table
1287 **/
1288static u32 ipr_get_error(u32 ioasc)
1289{
1290	int i;
1291
1292	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1293		if (ipr_error_table[i].ioasc == ioasc)
1294			return i;
1295
1296	return 0;
1297}
1298
1299/**
1300 * ipr_handle_log_data - Log an adapter error.
1301 * @ioa_cfg:	ioa config struct
1302 * @hostrcb:	hostrcb struct
1303 *
1304 * This function logs an adapter error to the system.
1305 *
1306 * Return value:
1307 * 	none
1308 **/
1309static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1310				struct ipr_hostrcb *hostrcb)
1311{
1312	u32 ioasc;
1313	int error_index;
1314
1315	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1316		return;
1317
1318	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1319		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1320
1321	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1322
1323	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1324	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1325		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1326		scsi_report_bus_reset(ioa_cfg->host,
1327				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1328	}
1329
1330	error_index = ipr_get_error(ioasc);
1331
1332	if (!ipr_error_table[error_index].log_hcam)
1333		return;
1334
1335	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1336		ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1337			   "%s\n", ipr_error_table[error_index].error);
1338	} else {
1339		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1340			ipr_error_table[error_index].error);
1341	}
1342
1343	/* Set indication we have logged an error */
1344	ioa_cfg->errors_logged++;
1345
1346	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1347		return;
1348	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1349		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1350
1351	switch (hostrcb->hcam.overlay_id) {
1352	case IPR_HOST_RCB_OVERLAY_ID_2:
1353		ipr_log_cache_error(ioa_cfg, hostrcb);
1354		break;
1355	case IPR_HOST_RCB_OVERLAY_ID_3:
1356		ipr_log_config_error(ioa_cfg, hostrcb);
1357		break;
1358	case IPR_HOST_RCB_OVERLAY_ID_4:
1359	case IPR_HOST_RCB_OVERLAY_ID_6:
1360		ipr_log_array_error(ioa_cfg, hostrcb);
1361		break;
1362	case IPR_HOST_RCB_OVERLAY_ID_7:
1363		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1364		break;
1365	case IPR_HOST_RCB_OVERLAY_ID_12:
1366		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1367		break;
1368	case IPR_HOST_RCB_OVERLAY_ID_13:
1369		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1370		break;
1371	case IPR_HOST_RCB_OVERLAY_ID_14:
1372	case IPR_HOST_RCB_OVERLAY_ID_16:
1373		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1374		break;
1375	case IPR_HOST_RCB_OVERLAY_ID_17:
1376		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1377		break;
1378	case IPR_HOST_RCB_OVERLAY_ID_1:
1379	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1380	default:
1381		ipr_log_generic_error(ioa_cfg, hostrcb);
1382		break;
1383	}
1384}
1385
1386/**
1387 * ipr_process_error - Op done function for an adapter error log.
1388 * @ipr_cmd:	ipr command struct
1389 *
1390 * This function is the op done function for an error log host
1391 * controlled async from the adapter. It will log the error and
1392 * send the HCAM back to the adapter.
1393 *
1394 * Return value:
1395 * 	none
1396 **/
1397static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1398{
1399	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1400	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1401	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1402
1403	list_del(&hostrcb->queue);
1404	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1405
1406	if (!ioasc) {
1407		ipr_handle_log_data(ioa_cfg, hostrcb);
1408	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1409		dev_err(&ioa_cfg->pdev->dev,
1410			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1411	}
1412
1413	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1414}
1415
1416/**
1417 * ipr_timeout -  An internally generated op has timed out.
1418 * @ipr_cmd:	ipr command struct
1419 *
1420 * This function blocks host requests and initiates an
1421 * adapter reset.
1422 *
1423 * Return value:
1424 * 	none
1425 **/
1426static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1427{
1428	unsigned long lock_flags = 0;
1429	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1430
1431	ENTER;
1432	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1433
1434	ioa_cfg->errors_logged++;
1435	dev_err(&ioa_cfg->pdev->dev,
1436		"Adapter being reset due to command timeout.\n");
1437
1438	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1439		ioa_cfg->sdt_state = GET_DUMP;
1440
1441	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1442		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1443
1444	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1445	LEAVE;
1446}
1447
1448/**
1449 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1450 * @ipr_cmd:	ipr command struct
1451 *
1452 * This function blocks host requests and initiates an
1453 * adapter reset.
1454 *
1455 * Return value:
1456 * 	none
1457 **/
1458static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1459{
1460	unsigned long lock_flags = 0;
1461	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1462
1463	ENTER;
1464	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1465
1466	ioa_cfg->errors_logged++;
1467	dev_err(&ioa_cfg->pdev->dev,
1468		"Adapter timed out transitioning to operational.\n");
1469
1470	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1471		ioa_cfg->sdt_state = GET_DUMP;
1472
1473	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1474		if (ipr_fastfail)
1475			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1476		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1477	}
1478
1479	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1480	LEAVE;
1481}
1482
1483/**
1484 * ipr_reset_reload - Reset/Reload the IOA
1485 * @ioa_cfg:		ioa config struct
1486 * @shutdown_type:	shutdown type
1487 *
1488 * This function resets the adapter and re-initializes it.
1489 * This function assumes that all new host commands have been stopped.
1490 * Return value:
1491 * 	SUCCESS / FAILED
1492 **/
1493static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1494			    enum ipr_shutdown_type shutdown_type)
1495{
1496	if (!ioa_cfg->in_reset_reload)
1497		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1498
1499	spin_unlock_irq(ioa_cfg->host->host_lock);
1500	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1501	spin_lock_irq(ioa_cfg->host->host_lock);
1502
1503	/* If we got hit with a host reset while we were already resetting
1504	 the adapter for some reason, and the reset failed. */
1505	if (ioa_cfg->ioa_is_dead) {
1506		ipr_trace;
1507		return FAILED;
1508	}
1509
1510	return SUCCESS;
1511}
1512
1513/**
1514 * ipr_find_ses_entry - Find matching SES in SES table
1515 * @res:	resource entry struct of SES
1516 *
1517 * Return value:
1518 * 	pointer to SES table entry / NULL on failure
1519 **/
1520static const struct ipr_ses_table_entry *
1521ipr_find_ses_entry(struct ipr_resource_entry *res)
1522{
1523	int i, j, matches;
1524	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1525
1526	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1527		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1528			if (ste->compare_product_id_byte[j] == 'X') {
1529				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1530					matches++;
1531				else
1532					break;
1533			} else
1534				matches++;
1535		}
1536
1537		if (matches == IPR_PROD_ID_LEN)
1538			return ste;
1539	}
1540
1541	return NULL;
1542}
1543
1544/**
1545 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1546 * @ioa_cfg:	ioa config struct
1547 * @bus:		SCSI bus
1548 * @bus_width:	bus width
1549 *
1550 * Return value:
1551 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1552 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1553 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1554 *	max 160MHz = max 320MB/sec).
1555 **/
1556static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1557{
1558	struct ipr_resource_entry *res;
1559	const struct ipr_ses_table_entry *ste;
1560	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1561
1562	/* Loop through each config table entry in the config table buffer */
1563	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1564		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1565			continue;
1566
1567		if (bus != res->cfgte.res_addr.bus)
1568			continue;
1569
1570		if (!(ste = ipr_find_ses_entry(res)))
1571			continue;
1572
1573		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1574	}
1575
1576	return max_xfer_rate;
1577}
1578
1579/**
1580 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1581 * @ioa_cfg:		ioa config struct
1582 * @max_delay:		max delay in micro-seconds to wait
1583 *
1584 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1585 *
1586 * Return value:
1587 * 	0 on success / other on failure
1588 **/
1589static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1590{
1591	volatile u32 pcii_reg;
1592	int delay = 1;
1593
1594	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1595	while (delay < max_delay) {
1596		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1597
1598		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1599			return 0;
1600
1601		/* udelay cannot be used if delay is more than a few milliseconds */
1602		if ((delay / 1000) > MAX_UDELAY_MS)
1603			mdelay(delay / 1000);
1604		else
1605			udelay(delay);
1606
1607		delay += delay;
1608	}
1609	return -EIO;
1610}
1611
1612/**
1613 * ipr_get_ldump_data_section - Dump IOA memory
1614 * @ioa_cfg:			ioa config struct
1615 * @start_addr:			adapter address to dump
1616 * @dest:				destination kernel buffer
1617 * @length_in_words:	length to dump in 4 byte words
1618 *
1619 * Return value:
1620 * 	0 on success / -EIO on failure
1621 **/
1622static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1623				      u32 start_addr,
1624				      __be32 *dest, u32 length_in_words)
1625{
1626	volatile u32 temp_pcii_reg;
1627	int i, delay = 0;
1628
1629	/* Write IOA interrupt reg starting LDUMP state  */
1630	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1631	       ioa_cfg->regs.set_uproc_interrupt_reg);
1632
1633	/* Wait for IO debug acknowledge */
1634	if (ipr_wait_iodbg_ack(ioa_cfg,
1635			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1636		dev_err(&ioa_cfg->pdev->dev,
1637			"IOA dump long data transfer timeout\n");
1638		return -EIO;
1639	}
1640
1641	/* Signal LDUMP interlocked - clear IO debug ack */
1642	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1643	       ioa_cfg->regs.clr_interrupt_reg);
1644
1645	/* Write Mailbox with starting address */
1646	writel(start_addr, ioa_cfg->ioa_mailbox);
1647
1648	/* Signal address valid - clear IOA Reset alert */
1649	writel(IPR_UPROCI_RESET_ALERT,
1650	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1651
1652	for (i = 0; i < length_in_words; i++) {
1653		/* Wait for IO debug acknowledge */
1654		if (ipr_wait_iodbg_ack(ioa_cfg,
1655				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1656			dev_err(&ioa_cfg->pdev->dev,
1657				"IOA dump short data transfer timeout\n");
1658			return -EIO;
1659		}
1660
1661		/* Read data from mailbox and increment destination pointer */
1662		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1663		dest++;
1664
1665		/* For all but the last word of data, signal data received */
1666		if (i < (length_in_words - 1)) {
1667			/* Signal dump data received - Clear IO debug Ack */
1668			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1669			       ioa_cfg->regs.clr_interrupt_reg);
1670		}
1671	}
1672
1673	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1674	writel(IPR_UPROCI_RESET_ALERT,
1675	       ioa_cfg->regs.set_uproc_interrupt_reg);
1676
1677	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1678	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1679
1680	/* Signal dump data received - Clear IO debug Ack */
1681	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1682	       ioa_cfg->regs.clr_interrupt_reg);
1683
1684	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1685	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1686		temp_pcii_reg =
1687		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1688
1689		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1690			return 0;
1691
1692		udelay(10);
1693		delay += 10;
1694	}
1695
1696	return 0;
1697}
1698
1699#ifdef CONFIG_SCSI_IPR_DUMP
1700/**
1701 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1702 * @ioa_cfg:		ioa config struct
1703 * @pci_address:	adapter address
1704 * @length:			length of data to copy
1705 *
1706 * Copy data from PCI adapter to kernel buffer.
1707 * Note: length MUST be a 4 byte multiple
1708 * Return value:
1709 * 	0 on success / other on failure
1710 **/
1711static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1712			unsigned long pci_address, u32 length)
1713{
1714	int bytes_copied = 0;
1715	int cur_len, rc, rem_len, rem_page_len;
1716	__be32 *page;
1717	unsigned long lock_flags = 0;
1718	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1719
1720	while (bytes_copied < length &&
1721	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1722		if (ioa_dump->page_offset >= PAGE_SIZE ||
1723		    ioa_dump->page_offset == 0) {
1724			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1725
1726			if (!page) {
1727				ipr_trace;
1728				return bytes_copied;
1729			}
1730
1731			ioa_dump->page_offset = 0;
1732			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1733			ioa_dump->next_page_index++;
1734		} else
1735			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1736
1737		rem_len = length - bytes_copied;
1738		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1739		cur_len = min(rem_len, rem_page_len);
1740
1741		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1742		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1743			rc = -EIO;
1744		} else {
1745			rc = ipr_get_ldump_data_section(ioa_cfg,
1746							pci_address + bytes_copied,
1747							&page[ioa_dump->page_offset / 4],
1748							(cur_len / sizeof(u32)));
1749		}
1750		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1751
1752		if (!rc) {
1753			ioa_dump->page_offset += cur_len;
1754			bytes_copied += cur_len;
1755		} else {
1756			ipr_trace;
1757			break;
1758		}
1759		schedule();
1760	}
1761
1762	return bytes_copied;
1763}
1764
1765/**
1766 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1767 * @hdr:	dump entry header struct
1768 *
1769 * Return value:
1770 * 	nothing
1771 **/
1772static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1773{
1774	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1775	hdr->num_elems = 1;
1776	hdr->offset = sizeof(*hdr);
1777	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1778}
1779
1780/**
1781 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1782 * @ioa_cfg:	ioa config struct
1783 * @driver_dump:	driver dump struct
1784 *
1785 * Return value:
1786 * 	nothing
1787 **/
1788static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1789				   struct ipr_driver_dump *driver_dump)
1790{
1791	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1792
1793	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1794	driver_dump->ioa_type_entry.hdr.len =
1795		sizeof(struct ipr_dump_ioa_type_entry) -
1796		sizeof(struct ipr_dump_entry_header);
1797	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1798	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1799	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1800	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1801		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1802		ucode_vpd->minor_release[1];
1803	driver_dump->hdr.num_entries++;
1804}
1805
1806/**
1807 * ipr_dump_version_data - Fill in the driver version in the dump.
1808 * @ioa_cfg:	ioa config struct
1809 * @driver_dump:	driver dump struct
1810 *
1811 * Return value:
1812 * 	nothing
1813 **/
1814static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1815				  struct ipr_driver_dump *driver_dump)
1816{
1817	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1818	driver_dump->version_entry.hdr.len =
1819		sizeof(struct ipr_dump_version_entry) -
1820		sizeof(struct ipr_dump_entry_header);
1821	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1822	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1823	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1824	driver_dump->hdr.num_entries++;
1825}
1826
1827/**
1828 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1829 * @ioa_cfg:	ioa config struct
1830 * @driver_dump:	driver dump struct
1831 *
1832 * Return value:
1833 * 	nothing
1834 **/
1835static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1836				   struct ipr_driver_dump *driver_dump)
1837{
1838	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1839	driver_dump->trace_entry.hdr.len =
1840		sizeof(struct ipr_dump_trace_entry) -
1841		sizeof(struct ipr_dump_entry_header);
1842	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1843	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1844	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1845	driver_dump->hdr.num_entries++;
1846}
1847
1848/**
1849 * ipr_dump_location_data - Fill in the IOA location in the dump.
1850 * @ioa_cfg:	ioa config struct
1851 * @driver_dump:	driver dump struct
1852 *
1853 * Return value:
1854 * 	nothing
1855 **/
1856static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1857				   struct ipr_driver_dump *driver_dump)
1858{
1859	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1860	driver_dump->location_entry.hdr.len =
1861		sizeof(struct ipr_dump_location_entry) -
1862		sizeof(struct ipr_dump_entry_header);
1863	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1864	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1865	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1866	driver_dump->hdr.num_entries++;
1867}
1868
1869/**
1870 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1871 * @ioa_cfg:	ioa config struct
1872 * @dump:		dump struct
1873 *
1874 * Return value:
1875 * 	nothing
1876 **/
1877static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1878{
1879	unsigned long start_addr, sdt_word;
1880	unsigned long lock_flags = 0;
1881	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1882	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1883	u32 num_entries, start_off, end_off;
1884	u32 bytes_to_copy, bytes_copied, rc;
1885	struct ipr_sdt *sdt;
1886	int i;
1887
1888	ENTER;
1889
1890	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1891
1892	if (ioa_cfg->sdt_state != GET_DUMP) {
1893		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1894		return;
1895	}
1896
1897	start_addr = readl(ioa_cfg->ioa_mailbox);
1898
1899	if (!ipr_sdt_is_fmt2(start_addr)) {
1900		dev_err(&ioa_cfg->pdev->dev,
1901			"Invalid dump table format: %lx\n", start_addr);
1902		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1903		return;
1904	}
1905
1906	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1907
1908	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1909
1910	/* Initialize the overall dump header */
1911	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1912	driver_dump->hdr.num_entries = 1;
1913	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1914	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1915	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1916	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1917
1918	ipr_dump_version_data(ioa_cfg, driver_dump);
1919	ipr_dump_location_data(ioa_cfg, driver_dump);
1920	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1921	ipr_dump_trace_data(ioa_cfg, driver_dump);
1922
1923	/* Update dump_header */
1924	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1925
1926	/* IOA Dump entry */
1927	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1928	ioa_dump->format = IPR_SDT_FMT2;
1929	ioa_dump->hdr.len = 0;
1930	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1931	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1932
1933	/* First entries in sdt are actually a list of dump addresses and
1934	 lengths to gather the real dump data.  sdt represents the pointer
1935	 to the ioa generated dump table.  Dump data will be extracted based
1936	 on entries in this table */
1937	sdt = &ioa_dump->sdt;
1938
1939	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1940					sizeof(struct ipr_sdt) / sizeof(__be32));
1941
1942	/* Smart Dump table is ready to use and the first entry is valid */
1943	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1944		dev_err(&ioa_cfg->pdev->dev,
1945			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1946			rc, be32_to_cpu(sdt->hdr.state));
1947		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1948		ioa_cfg->sdt_state = DUMP_OBTAINED;
1949		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1950		return;
1951	}
1952
1953	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1954
1955	if (num_entries > IPR_NUM_SDT_ENTRIES)
1956		num_entries = IPR_NUM_SDT_ENTRIES;
1957
1958	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1959
1960	for (i = 0; i < num_entries; i++) {
1961		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1962			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1963			break;
1964		}
1965
1966		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1967			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1968			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1969			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1970
1971			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1972				bytes_to_copy = end_off - start_off;
1973				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1974					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1975					continue;
1976				}
1977
1978				/* Copy data from adapter to driver buffers */
1979				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1980							    bytes_to_copy);
1981
1982				ioa_dump->hdr.len += bytes_copied;
1983
1984				if (bytes_copied != bytes_to_copy) {
1985					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1986					break;
1987				}
1988			}
1989		}
1990	}
1991
1992	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1993
1994	/* Update dump_header */
1995	driver_dump->hdr.len += ioa_dump->hdr.len;
1996	wmb();
1997	ioa_cfg->sdt_state = DUMP_OBTAINED;
1998	LEAVE;
1999}
2000
2001#else
2002#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2003#endif
2004
2005/**
2006 * ipr_release_dump - Free adapter dump memory
2007 * @kref:	kref struct
2008 *
2009 * Return value:
2010 *	nothing
2011 **/
2012static void ipr_release_dump(struct kref *kref)
2013{
2014	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2015	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2016	unsigned long lock_flags = 0;
2017	int i;
2018
2019	ENTER;
2020	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2021	ioa_cfg->dump = NULL;
2022	ioa_cfg->sdt_state = INACTIVE;
2023	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2024
2025	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2026		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2027
2028	kfree(dump);
2029	LEAVE;
2030}
2031
2032/**
2033 * ipr_worker_thread - Worker thread
2034 * @data:		ioa config struct
2035 *
2036 * Called at task level from a work thread. This function takes care
2037 * of adding and removing device from the mid-layer as configuration
2038 * changes are detected by the adapter.
2039 *
2040 * Return value:
2041 * 	nothing
2042 **/
2043static void ipr_worker_thread(void *data)
2044{
2045	unsigned long lock_flags;
2046	struct ipr_resource_entry *res;
2047	struct scsi_device *sdev;
2048	struct ipr_dump *dump;
2049	struct ipr_ioa_cfg *ioa_cfg = data;
2050	u8 bus, target, lun;
2051	int did_work;
2052
2053	ENTER;
2054	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2055
2056	if (ioa_cfg->sdt_state == GET_DUMP) {
2057		dump = ioa_cfg->dump;
2058		if (!dump) {
2059			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2060			return;
2061		}
2062		kref_get(&dump->kref);
2063		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2064		ipr_get_ioa_dump(ioa_cfg, dump);
2065		kref_put(&dump->kref, ipr_release_dump);
2066
2067		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2068		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2069			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2070		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2071		return;
2072	}
2073
2074restart:
2075	do {
2076		did_work = 0;
2077		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2078			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2079			return;
2080		}
2081
2082		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2083			if (res->del_from_ml && res->sdev) {
2084				did_work = 1;
2085				sdev = res->sdev;
2086				if (!scsi_device_get(sdev)) {
2087					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2088					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2089					scsi_remove_device(sdev);
2090					scsi_device_put(sdev);
2091					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2092				}
2093				break;
2094			}
2095		}
2096	} while(did_work);
2097
2098	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2099		if (res->add_to_ml) {
2100			bus = res->cfgte.res_addr.bus;
2101			target = res->cfgte.res_addr.target;
2102			lun = res->cfgte.res_addr.lun;
2103			res->add_to_ml = 0;
2104			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2105			scsi_add_device(ioa_cfg->host, bus, target, lun);
2106			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2107			goto restart;
2108		}
2109	}
2110
2111	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2112	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2113	LEAVE;
2114}
2115
2116#ifdef CONFIG_SCSI_IPR_TRACE
2117/**
2118 * ipr_read_trace - Dump the adapter trace
2119 * @kobj:		kobject struct
2120 * @buf:		buffer
2121 * @off:		offset
2122 * @count:		buffer size
2123 *
2124 * Return value:
2125 *	number of bytes printed to buffer
2126 **/
2127static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2128			      loff_t off, size_t count)
2129{
2130	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2131	struct Scsi_Host *shost = class_to_shost(cdev);
2132	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2133	unsigned long lock_flags = 0;
2134	int size = IPR_TRACE_SIZE;
2135	char *src = (char *)ioa_cfg->trace;
2136
2137	if (off > size)
2138		return 0;
2139	if (off + count > size) {
2140		size -= off;
2141		count = size;
2142	}
2143
2144	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2145	memcpy(buf, &src[off], count);
2146	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2147	return count;
2148}
2149
2150static struct bin_attribute ipr_trace_attr = {
2151	.attr =	{
2152		.name = "trace",
2153		.mode = S_IRUGO,
2154	},
2155	.size = 0,
2156	.read = ipr_read_trace,
2157};
2158#endif
2159
2160static const struct {
2161	enum ipr_cache_state state;
2162	char *name;
2163} cache_state [] = {
2164	{ CACHE_NONE, "none" },
2165	{ CACHE_DISABLED, "disabled" },
2166	{ CACHE_ENABLED, "enabled" }
2167};
2168
2169/**
2170 * ipr_show_write_caching - Show the write caching attribute
2171 * @class_dev:	class device struct
2172 * @buf:		buffer
2173 *
2174 * Return value:
2175 *	number of bytes printed to buffer
2176 **/
2177static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2178{
2179	struct Scsi_Host *shost = class_to_shost(class_dev);
2180	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2181	unsigned long lock_flags = 0;
2182	int i, len = 0;
2183
2184	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2185	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2186		if (cache_state[i].state == ioa_cfg->cache_state) {
2187			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2188			break;
2189		}
2190	}
2191	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2192	return len;
2193}
2194
2195
2196/**
2197 * ipr_store_write_caching - Enable/disable adapter write cache
2198 * @class_dev:	class_device struct
2199 * @buf:		buffer
2200 * @count:		buffer size
2201 *
2202 * This function will enable/disable adapter write cache.
2203 *
2204 * Return value:
2205 * 	count on success / other on failure
2206 **/
2207static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2208					const char *buf, size_t count)
2209{
2210	struct Scsi_Host *shost = class_to_shost(class_dev);
2211	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2212	unsigned long lock_flags = 0;
2213	enum ipr_cache_state new_state = CACHE_INVALID;
2214	int i;
2215
2216	if (!capable(CAP_SYS_ADMIN))
2217		return -EACCES;
2218	if (ioa_cfg->cache_state == CACHE_NONE)
2219		return -EINVAL;
2220
2221	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2222		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2223			new_state = cache_state[i].state;
2224			break;
2225		}
2226	}
2227
2228	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2229		return -EINVAL;
2230
2231	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2232	if (ioa_cfg->cache_state == new_state) {
2233		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2234		return count;
2235	}
2236
2237	ioa_cfg->cache_state = new_state;
2238	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2239		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2240	if (!ioa_cfg->in_reset_reload)
2241		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2242	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2243	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2244
2245	return count;
2246}
2247
2248static struct class_device_attribute ipr_ioa_cache_attr = {
2249	.attr = {
2250		.name =		"write_cache",
2251		.mode =		S_IRUGO | S_IWUSR,
2252	},
2253	.show = ipr_show_write_caching,
2254	.store = ipr_store_write_caching
2255};
2256
2257/**
2258 * ipr_show_fw_version - Show the firmware version
2259 * @class_dev:	class device struct
2260 * @buf:		buffer
2261 *
2262 * Return value:
2263 *	number of bytes printed to buffer
2264 **/
2265static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2266{
2267	struct Scsi_Host *shost = class_to_shost(class_dev);
2268	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2269	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2270	unsigned long lock_flags = 0;
2271	int len;
2272
2273	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2274	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2275		       ucode_vpd->major_release, ucode_vpd->card_type,
2276		       ucode_vpd->minor_release[0],
2277		       ucode_vpd->minor_release[1]);
2278	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2279	return len;
2280}
2281
2282static struct class_device_attribute ipr_fw_version_attr = {
2283	.attr = {
2284		.name =		"fw_version",
2285		.mode =		S_IRUGO,
2286	},
2287	.show = ipr_show_fw_version,
2288};
2289
2290/**
2291 * ipr_show_log_level - Show the adapter's error logging level
2292 * @class_dev:	class device struct
2293 * @buf:		buffer
2294 *
2295 * Return value:
2296 * 	number of bytes printed to buffer
2297 **/
2298static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2299{
2300	struct Scsi_Host *shost = class_to_shost(class_dev);
2301	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2302	unsigned long lock_flags = 0;
2303	int len;
2304
2305	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2306	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2307	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2308	return len;
2309}
2310
2311/**
2312 * ipr_store_log_level - Change the adapter's error logging level
2313 * @class_dev:	class device struct
2314 * @buf:		buffer
2315 *
2316 * Return value:
2317 * 	number of bytes printed to buffer
2318 **/
2319static ssize_t ipr_store_log_level(struct class_device *class_dev,
2320				   const char *buf, size_t count)
2321{
2322	struct Scsi_Host *shost = class_to_shost(class_dev);
2323	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2324	unsigned long lock_flags = 0;
2325
2326	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2327	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2328	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2329	return strlen(buf);
2330}
2331
2332static struct class_device_attribute ipr_log_level_attr = {
2333	.attr = {
2334		.name =		"log_level",
2335		.mode =		S_IRUGO | S_IWUSR,
2336	},
2337	.show = ipr_show_log_level,
2338	.store = ipr_store_log_level
2339};
2340
2341/**
2342 * ipr_store_diagnostics - IOA Diagnostics interface
2343 * @class_dev:	class_device struct
2344 * @buf:		buffer
2345 * @count:		buffer size
2346 *
2347 * This function will reset the adapter and wait a reasonable
2348 * amount of time for any errors that the adapter might log.
2349 *
2350 * Return value:
2351 * 	count on success / other on failure
2352 **/
2353static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2354				     const char *buf, size_t count)
2355{
2356	struct Scsi_Host *shost = class_to_shost(class_dev);
2357	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2358	unsigned long lock_flags = 0;
2359	int rc = count;
2360
2361	if (!capable(CAP_SYS_ADMIN))
2362		return -EACCES;
2363
2364	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2365	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2366	ioa_cfg->errors_logged = 0;
2367	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2368
2369	if (ioa_cfg->in_reset_reload) {
2370		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2371		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2372
2373		/* Wait for a second for any errors to be logged */
2374		msleep(1000);
2375	} else {
2376		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2377		return -EIO;
2378	}
2379
2380	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2381	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2382		rc = -EIO;
2383	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2384
2385	return rc;
2386}
2387
2388static struct class_device_attribute ipr_diagnostics_attr = {
2389	.attr = {
2390		.name =		"run_diagnostics",
2391		.mode =		S_IWUSR,
2392	},
2393	.store = ipr_store_diagnostics
2394};
2395
2396/**
2397 * ipr_show_adapter_state - Show the adapter's state
2398 * @class_dev:	class device struct
2399 * @buf:		buffer
2400 *
2401 * Return value:
2402 * 	number of bytes printed to buffer
2403 **/
2404static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2405{
2406	struct Scsi_Host *shost = class_to_shost(class_dev);
2407	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2408	unsigned long lock_flags = 0;
2409	int len;
2410
2411	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2412	if (ioa_cfg->ioa_is_dead)
2413		len = snprintf(buf, PAGE_SIZE, "offline\n");
2414	else
2415		len = snprintf(buf, PAGE_SIZE, "online\n");
2416	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2417	return len;
2418}
2419
2420/**
2421 * ipr_store_adapter_state - Change adapter state
2422 * @class_dev:	class_device struct
2423 * @buf:		buffer
2424 * @count:		buffer size
2425 *
2426 * This function will change the adapter's state.
2427 *
2428 * Return value:
2429 * 	count on success / other on failure
2430 **/
2431static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2432				       const char *buf, size_t count)
2433{
2434	struct Scsi_Host *shost = class_to_shost(class_dev);
2435	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2436	unsigned long lock_flags;
2437	int result = count;
2438
2439	if (!capable(CAP_SYS_ADMIN))
2440		return -EACCES;
2441
2442	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2444		ioa_cfg->ioa_is_dead = 0;
2445		ioa_cfg->reset_retries = 0;
2446		ioa_cfg->in_ioa_bringdown = 0;
2447		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2448	}
2449	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2450	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2451
2452	return result;
2453}
2454
2455static struct class_device_attribute ipr_ioa_state_attr = {
2456	.attr = {
2457		.name =		"state",
2458		.mode =		S_IRUGO | S_IWUSR,
2459	},
2460	.show = ipr_show_adapter_state,
2461	.store = ipr_store_adapter_state
2462};
2463
2464/**
2465 * ipr_store_reset_adapter - Reset the adapter
2466 * @class_dev:	class_device struct
2467 * @buf:		buffer
2468 * @count:		buffer size
2469 *
2470 * This function will reset the adapter.
2471 *
2472 * Return value:
2473 * 	count on success / other on failure
2474 **/
2475static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2476				       const char *buf, size_t count)
2477{
2478	struct Scsi_Host *shost = class_to_shost(class_dev);
2479	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2480	unsigned long lock_flags;
2481	int result = count;
2482
2483	if (!capable(CAP_SYS_ADMIN))
2484		return -EACCES;
2485
2486	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2487	if (!ioa_cfg->in_reset_reload)
2488		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2489	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2490	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2491
2492	return result;
2493}
2494
2495static struct class_device_attribute ipr_ioa_reset_attr = {
2496	.attr = {
2497		.name =		"reset_host",
2498		.mode =		S_IWUSR,
2499	},
2500	.store = ipr_store_reset_adapter
2501};
2502
2503/**
2504 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2505 * @buf_len:		buffer length
2506 *
2507 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2508 * list to use for microcode download
2509 *
2510 * Return value:
2511 * 	pointer to sglist / NULL on failure
2512 **/
2513static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2514{
2515	int sg_size, order, bsize_elem, num_elem, i, j;
2516	struct ipr_sglist *sglist;
2517	struct scatterlist *scatterlist;
2518	struct page *page;
2519
2520	/* Get the minimum size per scatter/gather element */
2521	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2522
2523	/* Get the actual size per element */
2524	order = get_order(sg_size);
2525
2526	/* Determine the actual number of bytes per element */
2527	bsize_elem = PAGE_SIZE * (1 << order);
2528
2529	/* Determine the actual number of sg entries needed */
2530	if (buf_len % bsize_elem)
2531		num_elem = (buf_len / bsize_elem) + 1;
2532	else
2533		num_elem = buf_len / bsize_elem;
2534
2535	/* Allocate a scatter/gather list for the DMA */
2536	sglist = kzalloc(sizeof(struct ipr_sglist) +
2537			 (sizeof(struct scatterlist) * (num_elem - 1)),
2538			 GFP_KERNEL);
2539
2540	if (sglist == NULL) {
2541		ipr_trace;
2542		return NULL;
2543	}
2544
2545	scatterlist = sglist->scatterlist;
2546
2547	sglist->order = order;
2548	sglist->num_sg = num_elem;
2549
2550	/* Allocate a bunch of sg elements */
2551	for (i = 0; i < num_elem; i++) {
2552		page = alloc_pages(GFP_KERNEL, order);
2553		if (!page) {
2554			ipr_trace;
2555
2556			/* Free up what we already allocated */
2557			for (j = i - 1; j >= 0; j--)
2558				__free_pages(scatterlist[j].page, order);
2559			kfree(sglist);
2560			return NULL;
2561		}
2562
2563		scatterlist[i].page = page;
2564	}
2565
2566	return sglist;
2567}
2568
2569/**
2570 * ipr_free_ucode_buffer - Frees a microcode download buffer
2571 * @p_dnld:		scatter/gather list pointer
2572 *
2573 * Free a DMA'able ucode download buffer previously allocated with
2574 * ipr_alloc_ucode_buffer
2575 *
2576 * Return value:
2577 * 	nothing
2578 **/
2579static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2580{
2581	int i;
2582
2583	for (i = 0; i < sglist->num_sg; i++)
2584		__free_pages(sglist->scatterlist[i].page, sglist->order);
2585
2586	kfree(sglist);
2587}
2588
2589/**
2590 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2591 * @sglist:		scatter/gather list pointer
2592 * @buffer:		buffer pointer
2593 * @len:		buffer length
2594 *
2595 * Copy a microcode image from a user buffer into a buffer allocated by
2596 * ipr_alloc_ucode_buffer
2597 *
2598 * Return value:
2599 * 	0 on success / other on failure
2600 **/
2601static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2602				 u8 *buffer, u32 len)
2603{
2604	int bsize_elem, i, result = 0;
2605	struct scatterlist *scatterlist;
2606	void *kaddr;
2607
2608	/* Determine the actual number of bytes per element */
2609	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2610
2611	scatterlist = sglist->scatterlist;
2612
2613	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2614		kaddr = kmap(scatterlist[i].page);
2615		memcpy(kaddr, buffer, bsize_elem);
2616		kunmap(scatterlist[i].page);
2617
2618		scatterlist[i].length = bsize_elem;
2619
2620		if (result != 0) {
2621			ipr_trace;
2622			return result;
2623		}
2624	}
2625
2626	if (len % bsize_elem) {
2627		kaddr = kmap(scatterlist[i].page);
2628		memcpy(kaddr, buffer, len % bsize_elem);
2629		kunmap(scatterlist[i].page);
2630
2631		scatterlist[i].length = len % bsize_elem;
2632	}
2633
2634	sglist->buffer_len = len;
2635	return result;
2636}
2637
2638/**
2639 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2640 * @ipr_cmd:	ipr command struct
2641 * @sglist:		scatter/gather list
2642 *
2643 * Builds a microcode download IOA data list (IOADL).
2644 *
2645 **/
2646static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2647				  struct ipr_sglist *sglist)
2648{
2649	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2650	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2651	struct scatterlist *scatterlist = sglist->scatterlist;
2652	int i;
2653
2654	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2655	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2656	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2657	ioarcb->write_ioadl_len =
2658		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2659
2660	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2661		ioadl[i].flags_and_data_len =
2662			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2663		ioadl[i].address =
2664			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2665	}
2666
2667	ioadl[i-1].flags_and_data_len |=
2668		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2669}
2670
2671/**
2672 * ipr_update_ioa_ucode - Update IOA's microcode
2673 * @ioa_cfg:	ioa config struct
2674 * @sglist:		scatter/gather list
2675 *
2676 * Initiate an adapter reset to update the IOA's microcode
2677 *
2678 * Return value:
2679 * 	0 on success / -EIO on failure
2680 **/
2681static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2682				struct ipr_sglist *sglist)
2683{
2684	unsigned long lock_flags;
2685
2686	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687
2688	if (ioa_cfg->ucode_sglist) {
2689		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2690		dev_err(&ioa_cfg->pdev->dev,
2691			"Microcode download already in progress\n");
2692		return -EIO;
2693	}
2694
2695	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2696					sglist->num_sg, DMA_TO_DEVICE);
2697
2698	if (!sglist->num_dma_sg) {
2699		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2700		dev_err(&ioa_cfg->pdev->dev,
2701			"Failed to map microcode download buffer!\n");
2702		return -EIO;
2703	}
2704
2705	ioa_cfg->ucode_sglist = sglist;
2706	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2707	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2708	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2709
2710	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2711	ioa_cfg->ucode_sglist = NULL;
2712	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713	return 0;
2714}
2715
2716/**
2717 * ipr_store_update_fw - Update the firmware on the adapter
2718 * @class_dev:	class_device struct
2719 * @buf:		buffer
2720 * @count:		buffer size
2721 *
2722 * This function will update the firmware on the adapter.
2723 *
2724 * Return value:
2725 * 	count on success / other on failure
2726 **/
2727static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2728				       const char *buf, size_t count)
2729{
2730	struct Scsi_Host *shost = class_to_shost(class_dev);
2731	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2732	struct ipr_ucode_image_header *image_hdr;
2733	const struct firmware *fw_entry;
2734	struct ipr_sglist *sglist;
2735	char fname[100];
2736	char *src;
2737	int len, result, dnld_size;
2738
2739	if (!capable(CAP_SYS_ADMIN))
2740		return -EACCES;
2741
2742	len = snprintf(fname, 99, "%s", buf);
2743	fname[len-1] = '\0';
2744
2745	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2746		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2747		return -EIO;
2748	}
2749
2750	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2751
2752	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2753	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2754	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2755		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2756		release_firmware(fw_entry);
2757		return -EINVAL;
2758	}
2759
2760	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2761	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2762	sglist = ipr_alloc_ucode_buffer(dnld_size);
2763
2764	if (!sglist) {
2765		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2766		release_firmware(fw_entry);
2767		return -ENOMEM;
2768	}
2769
2770	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2771
2772	if (result) {
2773		dev_err(&ioa_cfg->pdev->dev,
2774			"Microcode buffer copy to DMA buffer failed\n");
2775		goto out;
2776	}
2777
2778	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2779
2780	if (!result)
2781		result = count;
2782out:
2783	ipr_free_ucode_buffer(sglist);
2784	release_firmware(fw_entry);
2785	return result;
2786}
2787
2788static struct class_device_attribute ipr_update_fw_attr = {
2789	.attr = {
2790		.name =		"update_fw",
2791		.mode =		S_IWUSR,
2792	},
2793	.store = ipr_store_update_fw
2794};
2795
2796static struct class_device_attribute *ipr_ioa_attrs[] = {
2797	&ipr_fw_version_attr,
2798	&ipr_log_level_attr,
2799	&ipr_diagnostics_attr,
2800	&ipr_ioa_state_attr,
2801	&ipr_ioa_reset_attr,
2802	&ipr_update_fw_attr,
2803	&ipr_ioa_cache_attr,
2804	NULL,
2805};
2806
2807#ifdef CONFIG_SCSI_IPR_DUMP
2808/**
2809 * ipr_read_dump - Dump the adapter
2810 * @kobj:		kobject struct
2811 * @buf:		buffer
2812 * @off:		offset
2813 * @count:		buffer size
2814 *
2815 * Return value:
2816 *	number of bytes printed to buffer
2817 **/
2818static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2819			      loff_t off, size_t count)
2820{
2821	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2822	struct Scsi_Host *shost = class_to_shost(cdev);
2823	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2824	struct ipr_dump *dump;
2825	unsigned long lock_flags = 0;
2826	char *src;
2827	int len;
2828	size_t rc = count;
2829
2830	if (!capable(CAP_SYS_ADMIN))
2831		return -EACCES;
2832
2833	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2834	dump = ioa_cfg->dump;
2835
2836	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2837		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2838		return 0;
2839	}
2840	kref_get(&dump->kref);
2841	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2842
2843	if (off > dump->driver_dump.hdr.len) {
2844		kref_put(&dump->kref, ipr_release_dump);
2845		return 0;
2846	}
2847
2848	if (off + count > dump->driver_dump.hdr.len) {
2849		count = dump->driver_dump.hdr.len - off;
2850		rc = count;
2851	}
2852
2853	if (count && off < sizeof(dump->driver_dump)) {
2854		if (off + count > sizeof(dump->driver_dump))
2855			len = sizeof(dump->driver_dump) - off;
2856		else
2857			len = count;
2858		src = (u8 *)&dump->driver_dump + off;
2859		memcpy(buf, src, len);
2860		buf += len;
2861		off += len;
2862		count -= len;
2863	}
2864
2865	off -= sizeof(dump->driver_dump);
2866
2867	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2868		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2869			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2870		else
2871			len = count;
2872		src = (u8 *)&dump->ioa_dump + off;
2873		memcpy(buf, src, len);
2874		buf += len;
2875		off += len;
2876		count -= len;
2877	}
2878
2879	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2880
2881	while (count) {
2882		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2883			len = PAGE_ALIGN(off) - off;
2884		else
2885			len = count;
2886		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2887		src += off & ~PAGE_MASK;
2888		memcpy(buf, src, len);
2889		buf += len;
2890		off += len;
2891		count -= len;
2892	}
2893
2894	kref_put(&dump->kref, ipr_release_dump);
2895	return rc;
2896}
2897
2898/**
2899 * ipr_alloc_dump - Prepare for adapter dump
2900 * @ioa_cfg:	ioa config struct
2901 *
2902 * Return value:
2903 *	0 on success / other on failure
2904 **/
2905static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2906{
2907	struct ipr_dump *dump;
2908	unsigned long lock_flags = 0;
2909
2910	ENTER;
2911	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2912
2913	if (!dump) {
2914		ipr_err("Dump memory allocation failed\n");
2915		return -ENOMEM;
2916	}
2917
2918	kref_init(&dump->kref);
2919	dump->ioa_cfg = ioa_cfg;
2920
2921	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2922
2923	if (INACTIVE != ioa_cfg->sdt_state) {
2924		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2925		kfree(dump);
2926		return 0;
2927	}
2928
2929	ioa_cfg->dump = dump;
2930	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2931	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2932		ioa_cfg->dump_taken = 1;
2933		schedule_work(&ioa_cfg->work_q);
2934	}
2935	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2936
2937	LEAVE;
2938	return 0;
2939}
2940
2941/**
2942 * ipr_free_dump - Free adapter dump memory
2943 * @ioa_cfg:	ioa config struct
2944 *
2945 * Return value:
2946 *	0 on success / other on failure
2947 **/
2948static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2949{
2950	struct ipr_dump *dump;
2951	unsigned long lock_flags = 0;
2952
2953	ENTER;
2954
2955	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2956	dump = ioa_cfg->dump;
2957	if (!dump) {
2958		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2959		return 0;
2960	}
2961
2962	ioa_cfg->dump = NULL;
2963	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964
2965	kref_put(&dump->kref, ipr_release_dump);
2966
2967	LEAVE;
2968	return 0;
2969}
2970
2971/**
2972 * ipr_write_dump - Setup dump state of adapter
2973 * @kobj:		kobject struct
2974 * @buf:		buffer
2975 * @off:		offset
2976 * @count:		buffer size
2977 *
2978 * Return value:
2979 *	number of bytes printed to buffer
2980 **/
2981static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2982			      loff_t off, size_t count)
2983{
2984	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2985	struct Scsi_Host *shost = class_to_shost(cdev);
2986	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2987	int rc;
2988
2989	if (!capable(CAP_SYS_ADMIN))
2990		return -EACCES;
2991
2992	if (buf[0] == '1')
2993		rc = ipr_alloc_dump(ioa_cfg);
2994	else if (buf[0] == '0')
2995		rc = ipr_free_dump(ioa_cfg);
2996	else
2997		return -EINVAL;
2998
2999	if (rc)
3000		return rc;
3001	else
3002		return count;
3003}
3004
3005static struct bin_attribute ipr_dump_attr = {
3006	.attr =	{
3007		.name = "dump",
3008		.mode = S_IRUSR | S_IWUSR,
3009	},
3010	.size = 0,
3011	.read = ipr_read_dump,
3012	.write = ipr_write_dump
3013};
3014#else
3015static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3016#endif
3017
3018/**
3019 * ipr_change_queue_depth - Change the device's queue depth
3020 * @sdev:	scsi device struct
3021 * @qdepth:	depth to set
3022 *
3023 * Return value:
3024 * 	actual depth set
3025 **/
3026static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3027{
3028	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3029	return sdev->queue_depth;
3030}
3031
3032/**
3033 * ipr_change_queue_type - Change the device's queue type
3034 * @dsev:		scsi device struct
3035 * @tag_type:	type of tags to use
3036 *
3037 * Return value:
3038 * 	actual queue type set
3039 **/
3040static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3041{
3042	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3043	struct ipr_resource_entry *res;
3044	unsigned long lock_flags = 0;
3045
3046	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3047	res = (struct ipr_resource_entry *)sdev->hostdata;
3048
3049	if (res) {
3050		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3051			/*
3052			 * We don't bother quiescing the device here since the
3053			 * adapter firmware does it for us.
3054			 */
3055			scsi_set_tag_type(sdev, tag_type);
3056
3057			if (tag_type)
3058				scsi_activate_tcq(sdev, sdev->queue_depth);
3059			else
3060				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3061		} else
3062			tag_type = 0;
3063	} else
3064		tag_type = 0;
3065
3066	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067	return tag_type;
3068}
3069
3070/**
3071 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3072 * @dev:	device struct
3073 * @buf:	buffer
3074 *
3075 * Return value:
3076 * 	number of bytes printed to buffer
3077 **/
3078static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3079{
3080	struct scsi_device *sdev = to_scsi_device(dev);
3081	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3082	struct ipr_resource_entry *res;
3083	unsigned long lock_flags = 0;
3084	ssize_t len = -ENXIO;
3085
3086	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3087	res = (struct ipr_resource_entry *)sdev->hostdata;
3088	if (res)
3089		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3090	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091	return len;
3092}
3093
3094static struct device_attribute ipr_adapter_handle_attr = {
3095	.attr = {
3096		.name = 	"adapter_handle",
3097		.mode =		S_IRUSR,
3098	},
3099	.show = ipr_show_adapter_handle
3100};
3101
3102static struct device_attribute *ipr_dev_attrs[] = {
3103	&ipr_adapter_handle_attr,
3104	NULL,
3105};
3106
3107/**
3108 * ipr_biosparam - Return the HSC mapping
3109 * @sdev:			scsi device struct
3110 * @block_device:	block device pointer
3111 * @capacity:		capacity of the device
3112 * @parm:			Array containing returned HSC values.
3113 *
3114 * This function generates the HSC parms that fdisk uses.
3115 * We want to make sure we return something that places partitions
3116 * on 4k boundaries for best performance with the IOA.
3117 *
3118 * Return value:
3119 * 	0 on success
3120 **/
3121static int ipr_biosparam(struct scsi_device *sdev,
3122			 struct block_device *block_device,
3123			 sector_t capacity, int *parm)
3124{
3125	int heads, sectors;
3126	sector_t cylinders;
3127
3128	heads = 128;
3129	sectors = 32;
3130
3131	cylinders = capacity;
3132	sector_div(cylinders, (128 * 32));
3133
3134	/* return result */
3135	parm[0] = heads;
3136	parm[1] = sectors;
3137	parm[2] = cylinders;
3138
3139	return 0;
3140}
3141
3142/**
3143 * ipr_slave_destroy - Unconfigure a SCSI device
3144 * @sdev:	scsi device struct
3145 *
3146 * Return value:
3147 * 	nothing
3148 **/
3149static void ipr_slave_destroy(struct scsi_device *sdev)
3150{
3151	struct ipr_resource_entry *res;
3152	struct ipr_ioa_cfg *ioa_cfg;
3153	unsigned long lock_flags = 0;
3154
3155	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3156
3157	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3158	res = (struct ipr_resource_entry *) sdev->hostdata;
3159	if (res) {
3160		sdev->hostdata = NULL;
3161		res->sdev = NULL;
3162	}
3163	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3164}
3165
3166/**
3167 * ipr_slave_configure - Configure a SCSI device
3168 * @sdev:	scsi device struct
3169 *
3170 * This function configures the specified scsi device.
3171 *
3172 * Return value:
3173 * 	0 on success
3174 **/
3175static int ipr_slave_configure(struct scsi_device *sdev)
3176{
3177	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3178	struct ipr_resource_entry *res;
3179	unsigned long lock_flags = 0;
3180
3181	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3182	res = sdev->hostdata;
3183	if (res) {
3184		if (ipr_is_af_dasd_device(res))
3185			sdev->type = TYPE_RAID;
3186		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3187			sdev->scsi_level = 4;
3188			sdev->no_uld_attach = 1;
3189		}
3190		if (ipr_is_vset_device(res)) {
3191			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3192			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3193		}
3194		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3195			sdev->allow_restart = 1;
3196		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3197	}
3198	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3199	return 0;
3200}
3201
3202/**
3203 * ipr_slave_alloc - Prepare for commands to a device.
3204 * @sdev:	scsi device struct
3205 *
3206 * This function saves a pointer to the resource entry
3207 * in the scsi device struct if the device exists. We
3208 * can then use this pointer in ipr_queuecommand when
3209 * handling new commands.
3210 *
3211 * Return value:
3212 * 	0 on success / -ENXIO if device does not exist
3213 **/
3214static int ipr_slave_alloc(struct scsi_device *sdev)
3215{
3216	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3217	struct ipr_resource_entry *res;
3218	unsigned long lock_flags;
3219	int rc = -ENXIO;
3220
3221	sdev->hostdata = NULL;
3222
3223	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224
3225	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3226		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3227		    (res->cfgte.res_addr.target == sdev->id) &&
3228		    (res->cfgte.res_addr.lun == sdev->lun)) {
3229			res->sdev = sdev;
3230			res->add_to_ml = 0;
3231			res->in_erp = 0;
3232			sdev->hostdata = res;
3233			if (!ipr_is_naca_model(res))
3234				res->needs_sync_complete = 1;
3235			rc = 0;
3236			break;
3237		}
3238	}
3239
3240	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241
3242	return rc;
3243}
3244
3245/**
3246 * ipr_eh_host_reset - Reset the host adapter
3247 * @scsi_cmd:	scsi command struct
3248 *
3249 * Return value:
3250 * 	SUCCESS / FAILED
3251 **/
3252static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3253{
3254	struct ipr_ioa_cfg *ioa_cfg;
3255	int rc;
3256
3257	ENTER;
3258	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3259
3260	dev_err(&ioa_cfg->pdev->dev,
3261		"Adapter being reset as a result of error recovery.\n");
3262
3263	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3264		ioa_cfg->sdt_state = GET_DUMP;
3265
3266	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3267
3268	LEAVE;
3269	return rc;
3270}
3271
3272static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3273{
3274	int rc;
3275
3276	spin_lock_irq(cmd->device->host->host_lock);
3277	rc = __ipr_eh_host_reset(cmd);
3278	spin_unlock_irq(cmd->device->host->host_lock);
3279
3280	return rc;
3281}
3282
3283/**
3284 * ipr_eh_dev_reset - Reset the device
3285 * @scsi_cmd:	scsi command struct
3286 *
3287 * This function issues a device reset to the affected device.
3288 * A LUN reset will be sent to the device first. If that does
3289 * not work, a target reset will be sent.
3290 *
3291 * Return value:
3292 *	SUCCESS / FAILED
3293 **/
3294static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3295{
3296	struct ipr_cmnd *ipr_cmd;
3297	struct ipr_ioa_cfg *ioa_cfg;
3298	struct ipr_resource_entry *res;
3299	struct ipr_cmd_pkt *cmd_pkt;
3300	u32 ioasc;
3301
3302	ENTER;
3303	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3304	res = scsi_cmd->device->hostdata;
3305
3306	if (!res)
3307		return FAILED;
3308
3309	/*
3310	 * If we are currently going through reset/reload, return failed. This will force the
3311	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3312	 * reset to complete
3313	 */
3314	if (ioa_cfg->in_reset_reload)
3315		return FAILED;
3316	if (ioa_cfg->ioa_is_dead)
3317		return FAILED;
3318
3319	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3320		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3321			if (ipr_cmd->scsi_cmd)
3322				ipr_cmd->done = ipr_scsi_eh_done;
3323		}
3324	}
3325
3326	res->resetting_device = 1;
3327
3328	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3329
3330	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3331	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3332	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3333	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3334
3335	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3336	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3337
3338	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3339
3340	res->resetting_device = 0;
3341
3342	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3343
3344	LEAVE;
3345	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3346}
3347
3348static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3349{
3350	int rc;
3351
3352	spin_lock_irq(cmd->device->host->host_lock);
3353	rc = __ipr_eh_dev_reset(cmd);
3354	spin_unlock_irq(cmd->device->host->host_lock);
3355
3356	return rc;
3357}
3358
3359/**
3360 * ipr_bus_reset_done - Op done function for bus reset.
3361 * @ipr_cmd:	ipr command struct
3362 *
3363 * This function is the op done function for a bus reset
3364 *
3365 * Return value:
3366 * 	none
3367 **/
3368static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3369{
3370	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3371	struct ipr_resource_entry *res;
3372
3373	ENTER;
3374	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3375		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3376			    sizeof(res->cfgte.res_handle))) {
3377			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3378			break;
3379		}
3380	}
3381
3382	/*
3383	 * If abort has not completed, indicate the reset has, else call the
3384	 * abort's done function to wake the sleeping eh thread
3385	 */
3386	if (ipr_cmd->sibling->sibling)
3387		ipr_cmd->sibling->sibling = NULL;
3388	else
3389		ipr_cmd->sibling->done(ipr_cmd->sibling);
3390
3391	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3392	LEAVE;
3393}
3394
3395/**
3396 * ipr_abort_timeout - An abort task has timed out
3397 * @ipr_cmd:	ipr command struct
3398 *
3399 * This function handles when an abort task times out. If this
3400 * happens we issue a bus reset since we have resources tied
3401 * up that must be freed before returning to the midlayer.
3402 *
3403 * Return value:
3404 *	none
3405 **/
3406static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3407{
3408	struct ipr_cmnd *reset_cmd;
3409	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3410	struct ipr_cmd_pkt *cmd_pkt;
3411	unsigned long lock_flags = 0;
3412
3413	ENTER;
3414	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3416		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3417		return;
3418	}
3419
3420	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3421	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3422	ipr_cmd->sibling = reset_cmd;
3423	reset_cmd->sibling = ipr_cmd;
3424	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3425	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3426	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3427	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3428	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3429
3430	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3431	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3432	LEAVE;
3433}
3434
3435/**
3436 * ipr_cancel_op - Cancel specified op
3437 * @scsi_cmd:	scsi command struct
3438 *
3439 * This function cancels specified op.
3440 *
3441 * Return value:
3442 *	SUCCESS / FAILED
3443 **/
3444static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3445{
3446	struct ipr_cmnd *ipr_cmd;
3447	struct ipr_ioa_cfg *ioa_cfg;
3448	struct ipr_resource_entry *res;
3449	struct ipr_cmd_pkt *cmd_pkt;
3450	u32 ioasc;
3451	int op_found = 0;
3452
3453	ENTER;
3454	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3455	res = scsi_cmd->device->hostdata;
3456
3457	/* If we are currently going through reset/reload, return failed.
3458	 * This will force the mid-layer to call ipr_eh_host_reset,
3459	 * which will then go to sleep and wait for the reset to complete
3460	 */
3461	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3462		return FAILED;
3463	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3464		return FAILED;
3465
3466	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3467		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3468			ipr_cmd->done = ipr_scsi_eh_done;
3469			op_found = 1;
3470			break;
3471		}
3472	}
3473
3474	if (!op_found)
3475		return SUCCESS;
3476
3477	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3478	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3479	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3480	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3481	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3482	ipr_cmd->u.sdev = scsi_cmd->device;
3483
3484	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3485		    scsi_cmd->cmnd[0]);
3486	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3487	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3488
3489	/*
3490	 * If the abort task timed out and we sent a bus reset, we will get
3491	 * one the following responses to the abort
3492	 */
3493	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3494		ioasc = 0;
3495		ipr_trace;
3496	}
3497
3498	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3499	if (!ipr_is_naca_model(res))
3500		res->needs_sync_complete = 1;
3501
3502	LEAVE;
3503	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3504}
3505
3506/**
3507 * ipr_eh_abort - Abort a single op
3508 * @scsi_cmd:	scsi command struct
3509 *
3510 * Return value:
3511 * 	SUCCESS / FAILED
3512 **/
3513static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3514{
3515	unsigned long flags;
3516	int rc;
3517
3518	ENTER;
3519
3520	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3521	rc = ipr_cancel_op(scsi_cmd);
3522	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3523
3524	LEAVE;
3525	return rc;
3526}
3527
3528/**
3529 * ipr_handle_other_interrupt - Handle "other" interrupts
3530 * @ioa_cfg:	ioa config struct
3531 * @int_reg:	interrupt register
3532 *
3533 * Return value:
3534 * 	IRQ_NONE / IRQ_HANDLED
3535 **/
3536static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3537					      volatile u32 int_reg)
3538{
3539	irqreturn_t rc = IRQ_HANDLED;
3540
3541	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3542		/* Mask the interrupt */
3543		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3544
3545		/* Clear the interrupt */
3546		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3547		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3548
3549		list_del(&ioa_cfg->reset_cmd->queue);
3550		del_timer(&ioa_cfg->reset_cmd->timer);
3551		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3552	} else {
3553		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3554			ioa_cfg->ioa_unit_checked = 1;
3555		else
3556			dev_err(&ioa_cfg->pdev->dev,
3557				"Permanent IOA failure. 0x%08X\n", int_reg);
3558
3559		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3560			ioa_cfg->sdt_state = GET_DUMP;
3561
3562		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3563		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3564	}
3565
3566	return rc;
3567}
3568
3569/**
3570 * ipr_isr - Interrupt service routine
3571 * @irq:	irq number
3572 * @devp:	pointer to ioa config struct
3573 * @regs:	pt_regs struct
3574 *
3575 * Return value:
3576 * 	IRQ_NONE / IRQ_HANDLED
3577 **/
3578static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3579{
3580	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3581	unsigned long lock_flags = 0;
3582	volatile u32 int_reg, int_mask_reg;
3583	u32 ioasc;
3584	u16 cmd_index;
3585	struct ipr_cmnd *ipr_cmd;
3586	irqreturn_t rc = IRQ_NONE;
3587
3588	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3589
3590	/* If interrupts are disabled, ignore the interrupt */
3591	if (!ioa_cfg->allow_interrupts) {
3592		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3593		return IRQ_NONE;
3594	}
3595
3596	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3597	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3598
3599	/* If an interrupt on the adapter did not occur, ignore it */
3600	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3601		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3602		return IRQ_NONE;
3603	}
3604
3605	while (1) {
3606		ipr_cmd = NULL;
3607
3608		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3609		       ioa_cfg->toggle_bit) {
3610
3611			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3612				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3613
3614			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3615				ioa_cfg->errors_logged++;
3616				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3617
3618				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3619					ioa_cfg->sdt_state = GET_DUMP;
3620
3621				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3622				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3623				return IRQ_HANDLED;
3624			}
3625
3626			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3627
3628			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3629
3630			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3631
3632			list_del(&ipr_cmd->queue);
3633			del_timer(&ipr_cmd->timer);
3634			ipr_cmd->done(ipr_cmd);
3635
3636			rc = IRQ_HANDLED;
3637
3638			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3639				ioa_cfg->hrrq_curr++;
3640			} else {
3641				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3642				ioa_cfg->toggle_bit ^= 1u;
3643			}
3644		}
3645
3646		if (ipr_cmd != NULL) {
3647			/* Clear the PCI interrupt */
3648			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3649			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3650		} else
3651			break;
3652	}
3653
3654	if (unlikely(rc == IRQ_NONE))
3655		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3656
3657	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3658	return rc;
3659}
3660
3661/**
3662 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3663 * @ioa_cfg:	ioa config struct
3664 * @ipr_cmd:	ipr command struct
3665 *
3666 * Return value:
3667 * 	0 on success / -1 on failure
3668 **/
3669static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3670			   struct ipr_cmnd *ipr_cmd)
3671{
3672	int i;
3673	struct scatterlist *sglist;
3674	u32 length;
3675	u32 ioadl_flags = 0;
3676	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3677	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3678	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3679
3680	length = scsi_cmd->request_bufflen;
3681
3682	if (length == 0)
3683		return 0;
3684
3685	if (scsi_cmd->use_sg) {
3686		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3687						 scsi_cmd->request_buffer,
3688						 scsi_cmd->use_sg,
3689						 scsi_cmd->sc_data_direction);
3690
3691		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3692			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3693			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3694			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3695			ioarcb->write_ioadl_len =
3696				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3697		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3698			ioadl_flags = IPR_IOADL_FLAGS_READ;
3699			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3700			ioarcb->read_ioadl_len =
3701				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3702		}
3703
3704		sglist = scsi_cmd->request_buffer;
3705
3706		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3707			ioadl[i].flags_and_data_len =
3708				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3709			ioadl[i].address =
3710				cpu_to_be32(sg_dma_address(&sglist[i]));
3711		}
3712
3713		if (likely(ipr_cmd->dma_use_sg)) {
3714			ioadl[i-1].flags_and_data_len |=
3715				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3716			return 0;
3717		} else
3718			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3719	} else {
3720		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3721			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3722			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3723			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3724			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3725		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3726			ioadl_flags = IPR_IOADL_FLAGS_READ;
3727			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3728			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3729		}
3730
3731		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3732						     scsi_cmd->request_buffer, length,
3733						     scsi_cmd->sc_data_direction);
3734
3735		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3736			ipr_cmd->dma_use_sg = 1;
3737			ioadl[0].flags_and_data_len =
3738				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3739			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3740			return 0;
3741		} else
3742			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3743	}
3744
3745	return -1;
3746}
3747
3748/**
3749 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3750 * @scsi_cmd:	scsi command struct
3751 *
3752 * Return value:
3753 * 	task attributes
3754 **/
3755static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3756{
3757	u8 tag[2];
3758	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3759
3760	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3761		switch (tag[0]) {
3762		case MSG_SIMPLE_TAG:
3763			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3764			break;
3765		case MSG_HEAD_TAG:
3766			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3767			break;
3768		case MSG_ORDERED_TAG:
3769			rc = IPR_FLAGS_LO_ORDERED_TASK;
3770			break;
3771		};
3772	}
3773
3774	return rc;
3775}
3776
3777/**
3778 * ipr_erp_done - Process completion of ERP for a device
3779 * @ipr_cmd:		ipr command struct
3780 *
3781 * This function copies the sense buffer into the scsi_cmd
3782 * struct and pushes the scsi_done function.
3783 *
3784 * Return value:
3785 * 	nothing
3786 **/
3787static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3788{
3789	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3790	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3791	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3792	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3793
3794	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3795		scsi_cmd->result |= (DID_ERROR << 16);
3796		scmd_printk(KERN_ERR, scsi_cmd,
3797			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3798	} else {
3799		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3800		       SCSI_SENSE_BUFFERSIZE);
3801	}
3802
3803	if (res) {
3804		if (!ipr_is_naca_model(res))
3805			res->needs_sync_complete = 1;
3806		res->in_erp = 0;
3807	}
3808	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3809	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3810	scsi_cmd->scsi_done(scsi_cmd);
3811}
3812
3813/**
3814 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3815 * @ipr_cmd:	ipr command struct
3816 *
3817 * Return value:
3818 * 	none
3819 **/
3820static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3821{
3822	struct ipr_ioarcb *ioarcb;
3823	struct ipr_ioasa *ioasa;
3824
3825	ioarcb = &ipr_cmd->ioarcb;
3826	ioasa = &ipr_cmd->ioasa;
3827
3828	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3829	ioarcb->write_data_transfer_length = 0;
3830	ioarcb->read_data_transfer_length = 0;
3831	ioarcb->write_ioadl_len = 0;
3832	ioarcb->read_ioadl_len = 0;
3833	ioasa->ioasc = 0;
3834	ioasa->residual_data_len = 0;
3835}
3836
3837/**
3838 * ipr_erp_request_sense - Send request sense to a device
3839 * @ipr_cmd:	ipr command struct
3840 *
3841 * This function sends a request sense to a device as a result
3842 * of a check condition.
3843 *
3844 * Return value:
3845 * 	nothing
3846 **/
3847static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3848{
3849	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3850	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3851
3852	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3853		ipr_erp_done(ipr_cmd);
3854		return;
3855	}
3856
3857	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3858
3859	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3860	cmd_pkt->cdb[0] = REQUEST_SENSE;
3861	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3862	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3863	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3864	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3865
3866	ipr_cmd->ioadl[0].flags_and_data_len =
3867		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3868	ipr_cmd->ioadl[0].address =
3869		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3870
3871	ipr_cmd->ioarcb.read_ioadl_len =
3872		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3873	ipr_cmd->ioarcb.read_data_transfer_length =
3874		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3875
3876	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3877		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3878}
3879
3880/**
3881 * ipr_erp_cancel_all - Send cancel all to a device
3882 * @ipr_cmd:	ipr command struct
3883 *
3884 * This function sends a cancel all to a device to clear the
3885 * queue. If we are running TCQ on the device, QERR is set to 1,
3886 * which means all outstanding ops have been dropped on the floor.
3887 * Cancel all will return them to us.
3888 *
3889 * Return value:
3890 * 	nothing
3891 **/
3892static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3893{
3894	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3895	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3896	struct ipr_cmd_pkt *cmd_pkt;
3897
3898	res->in_erp = 1;
3899
3900	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3901
3902	if (!scsi_get_tag_type(scsi_cmd->device)) {
3903		ipr_erp_request_sense(ipr_cmd);
3904		return;
3905	}
3906
3907	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3908	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3909	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3910
3911	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3912		   IPR_CANCEL_ALL_TIMEOUT);
3913}
3914
3915/**
3916 * ipr_dump_ioasa - Dump contents of IOASA
3917 * @ioa_cfg:	ioa config struct
3918 * @ipr_cmd:	ipr command struct
3919 * @res:		resource entry struct
3920 *
3921 * This function is invoked by the interrupt handler when ops
3922 * fail. It will log the IOASA if appropriate. Only called
3923 * for GPDD ops.
3924 *
3925 * Return value:
3926 * 	none
3927 **/
3928static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3929			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
3930{
3931	int i;
3932	u16 data_len;
3933	u32 ioasc;
3934	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3935	__be32 *ioasa_data = (__be32 *)ioasa;
3936	int error_index;
3937
3938	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3939
3940	if (0 == ioasc)
3941		return;
3942
3943	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3944		return;
3945
3946	error_index = ipr_get_error(ioasc);
3947
3948	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3949		/* Don't log an error if the IOA already logged one */
3950		if (ioasa->ilid != 0)
3951			return;
3952
3953		if (ipr_error_table[error_index].log_ioasa == 0)
3954			return;
3955	}
3956
3957	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
3958
3959	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3960		data_len = sizeof(struct ipr_ioasa);
3961	else
3962		data_len = be16_to_cpu(ioasa->ret_stat_len);
3963
3964	ipr_err("IOASA Dump:\n");
3965
3966	for (i = 0; i < data_len / 4; i += 4) {
3967		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3968			be32_to_cpu(ioasa_data[i]),
3969			be32_to_cpu(ioasa_data[i+1]),
3970			be32_to_cpu(ioasa_data[i+2]),
3971			be32_to_cpu(ioasa_data[i+3]));
3972	}
3973}
3974
3975/**
3976 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3977 * @ioasa:		IOASA
3978 * @sense_buf:	sense data buffer
3979 *
3980 * Return value:
3981 * 	none
3982 **/
3983static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3984{
3985	u32 failing_lba;
3986	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3987	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3988	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3989	u32 ioasc = be32_to_cpu(ioasa->ioasc);
3990
3991	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3992
3993	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3994		return;
3995
3996	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3997
3998	if (ipr_is_vset_device(res) &&
3999	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4000	    ioasa->u.vset.failing_lba_hi != 0) {
4001		sense_buf[0] = 0x72;
4002		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4003		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4004		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4005
4006		sense_buf[7] = 12;
4007		sense_buf[8] = 0;
4008		sense_buf[9] = 0x0A;
4009		sense_buf[10] = 0x80;
4010
4011		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4012
4013		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4014		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4015		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4016		sense_buf[15] = failing_lba & 0x000000ff;
4017
4018		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4019
4020		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4021		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4022		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4023		sense_buf[19] = failing_lba & 0x000000ff;
4024	} else {
4025		sense_buf[0] = 0x70;
4026		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4027		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4028		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4029
4030		/* Illegal request */
4031		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4032		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4033			sense_buf[7] = 10;	/* additional length */
4034
4035			/* IOARCB was in error */
4036			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4037				sense_buf[15] = 0xC0;
4038			else	/* Parameter data was invalid */
4039				sense_buf[15] = 0x80;
4040
4041			sense_buf[16] =
4042			    ((IPR_FIELD_POINTER_MASK &
4043			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4044			sense_buf[17] =
4045			    (IPR_FIELD_POINTER_MASK &
4046			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4047		} else {
4048			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4049				if (ipr_is_vset_device(res))
4050					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4051				else
4052					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4053
4054				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4055				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4056				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4057				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4058				sense_buf[6] = failing_lba & 0x000000ff;
4059			}
4060
4061			sense_buf[7] = 6;	/* additional length */
4062		}
4063	}
4064}
4065
4066/**
4067 * ipr_get_autosense - Copy autosense data to sense buffer
4068 * @ipr_cmd:	ipr command struct
4069 *
4070 * This function copies the autosense buffer to the buffer
4071 * in the scsi_cmd, if there is autosense available.
4072 *
4073 * Return value:
4074 *	1 if autosense was available / 0 if not
4075 **/
4076static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4077{
4078	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4079
4080	if ((be32_to_cpu(ioasa->ioasc_specific) &
4081	     (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4082		return 0;
4083
4084	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4085	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4086		   SCSI_SENSE_BUFFERSIZE));
4087	return 1;
4088}
4089
4090/**
4091 * ipr_erp_start - Process an error response for a SCSI op
4092 * @ioa_cfg:	ioa config struct
4093 * @ipr_cmd:	ipr command struct
4094 *
4095 * This function determines whether or not to initiate ERP
4096 * on the affected device.
4097 *
4098 * Return value:
4099 * 	nothing
4100 **/
4101static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4102			      struct ipr_cmnd *ipr_cmd)
4103{
4104	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4105	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4106	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4107
4108	if (!res) {
4109		ipr_scsi_eh_done(ipr_cmd);
4110		return;
4111	}
4112
4113	if (ipr_is_gscsi(res))
4114		ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4115	else
4116		ipr_gen_sense(ipr_cmd);
4117
4118	switch (ioasc & IPR_IOASC_IOASC_MASK) {
4119	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4120		if (ipr_is_naca_model(res))
4121			scsi_cmd->result |= (DID_ABORT << 16);
4122		else
4123			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4124		break;
4125	case IPR_IOASC_IR_RESOURCE_HANDLE:
4126	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4127		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4128		break;
4129	case IPR_IOASC_HW_SEL_TIMEOUT:
4130		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4131		if (!ipr_is_naca_model(res))
4132			res->needs_sync_complete = 1;
4133		break;
4134	case IPR_IOASC_SYNC_REQUIRED:
4135		if (!res->in_erp)
4136			res->needs_sync_complete = 1;
4137		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4138		break;
4139	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4140	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4141		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4142		break;
4143	case IPR_IOASC_BUS_WAS_RESET:
4144	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4145		/*
4146		 * Report the bus reset and ask for a retry. The device
4147		 * will give CC/UA the next command.
4148		 */
4149		if (!res->resetting_device)
4150			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4151		scsi_cmd->result |= (DID_ERROR << 16);
4152		if (!ipr_is_naca_model(res))
4153			res->needs_sync_complete = 1;
4154		break;
4155	case IPR_IOASC_HW_DEV_BUS_STATUS:
4156		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4157		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4158			if (!ipr_get_autosense(ipr_cmd)) {
4159				if (!ipr_is_naca_model(res)) {
4160					ipr_erp_cancel_all(ipr_cmd);
4161					return;
4162				}
4163			}
4164		}
4165		if (!ipr_is_naca_model(res))
4166			res->needs_sync_complete = 1;
4167		break;
4168	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4169		break;
4170	default:
4171		scsi_cmd->result |= (DID_ERROR << 16);
4172		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4173			res->needs_sync_complete = 1;
4174		break;
4175	}
4176
4177	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4178	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4179	scsi_cmd->scsi_done(scsi_cmd);
4180}
4181
4182/**
4183 * ipr_scsi_done - mid-layer done function
4184 * @ipr_cmd:	ipr command struct
4185 *
4186 * This function is invoked by the interrupt handler for
4187 * ops generated by the SCSI mid-layer
4188 *
4189 * Return value:
4190 * 	none
4191 **/
4192static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4193{
4194	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4195	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4196	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4197
4198	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4199
4200	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4201		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4202		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4203		scsi_cmd->scsi_done(scsi_cmd);
4204	} else
4205		ipr_erp_start(ioa_cfg, ipr_cmd);
4206}
4207
4208/**
4209 * ipr_queuecommand - Queue a mid-layer request
4210 * @scsi_cmd:	scsi command struct
4211 * @done:		done function
4212 *
4213 * This function queues a request generated by the mid-layer.
4214 *
4215 * Return value:
4216 *	0 on success
4217 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4218 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4219 **/
4220static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4221			    void (*done) (struct scsi_cmnd *))
4222{
4223	struct ipr_ioa_cfg *ioa_cfg;
4224	struct ipr_resource_entry *res;
4225	struct ipr_ioarcb *ioarcb;
4226	struct ipr_cmnd *ipr_cmd;
4227	int rc = 0;
4228
4229	scsi_cmd->scsi_done = done;
4230	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4231	res = scsi_cmd->device->hostdata;
4232	scsi_cmd->result = (DID_OK << 16);
4233
4234	/*
4235	 * We are currently blocking all devices due to a host reset
4236	 * We have told the host to stop giving us new requests, but
4237	 * ERP ops don't count. FIXME
4238	 */
4239	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4240		return SCSI_MLQUEUE_HOST_BUSY;
4241
4242	/*
4243	 * FIXME - Create scsi_set_host_offline interface
4244	 *  and the ioa_is_dead check can be removed
4245	 */
4246	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4247		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4248		scsi_cmd->result = (DID_NO_CONNECT << 16);
4249		scsi_cmd->scsi_done(scsi_cmd);
4250		return 0;
4251	}
4252
4253	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4254	ioarcb = &ipr_cmd->ioarcb;
4255	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4256
4257	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4258	ipr_cmd->scsi_cmd = scsi_cmd;
4259	ioarcb->res_handle = res->cfgte.res_handle;
4260	ipr_cmd->done = ipr_scsi_done;
4261	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4262
4263	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4264		if (scsi_cmd->underflow == 0)
4265			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4266
4267		if (res->needs_sync_complete) {
4268			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4269			res->needs_sync_complete = 0;
4270		}
4271
4272		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4273		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4274		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4275		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4276	}
4277
4278	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4279	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4280		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4281
4282	if (likely(rc == 0))
4283		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4284
4285	if (likely(rc == 0)) {
4286		mb();
4287		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4288		       ioa_cfg->regs.ioarrin_reg);
4289	} else {
4290		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4291		 return SCSI_MLQUEUE_HOST_BUSY;
4292	}
4293
4294	return 0;
4295}
4296
4297/**
4298 * ipr_info - Get information about the card/driver
4299 * @scsi_host:	scsi host struct
4300 *
4301 * Return value:
4302 * 	pointer to buffer with description string
4303 **/
4304static const char * ipr_ioa_info(struct Scsi_Host *host)
4305{
4306	static char buffer[512];
4307	struct ipr_ioa_cfg *ioa_cfg;
4308	unsigned long lock_flags = 0;
4309
4310	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4311
4312	spin_lock_irqsave(host->host_lock, lock_flags);
4313	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4314	spin_unlock_irqrestore(host->host_lock, lock_flags);
4315
4316	return buffer;
4317}
4318
4319static struct scsi_host_template driver_template = {
4320	.module = THIS_MODULE,
4321	.name = "IPR",
4322	.info = ipr_ioa_info,
4323	.queuecommand = ipr_queuecommand,
4324	.eh_abort_handler = ipr_eh_abort,
4325	.eh_device_reset_handler = ipr_eh_dev_reset,
4326	.eh_host_reset_handler = ipr_eh_host_reset,
4327	.slave_alloc = ipr_slave_alloc,
4328	.slave_configure = ipr_slave_configure,
4329	.slave_destroy = ipr_slave_destroy,
4330	.change_queue_depth = ipr_change_queue_depth,
4331	.change_queue_type = ipr_change_queue_type,
4332	.bios_param = ipr_biosparam,
4333	.can_queue = IPR_MAX_COMMANDS,
4334	.this_id = -1,
4335	.sg_tablesize = IPR_MAX_SGLIST,
4336	.max_sectors = IPR_IOA_MAX_SECTORS,
4337	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4338	.use_clustering = ENABLE_CLUSTERING,
4339	.shost_attrs = ipr_ioa_attrs,
4340	.sdev_attrs = ipr_dev_attrs,
4341	.proc_name = IPR_NAME
4342};
4343
4344#ifdef CONFIG_PPC_PSERIES
4345static const u16 ipr_blocked_processors[] = {
4346	PV_NORTHSTAR,
4347	PV_PULSAR,
4348	PV_POWER4,
4349	PV_ICESTAR,
4350	PV_SSTAR,
4351	PV_POWER4p,
4352	PV_630,
4353	PV_630p
4354};
4355
4356/**
4357 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4358 * @ioa_cfg:	ioa cfg struct
4359 *
4360 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4361 * certain pSeries hardware. This function determines if the given
4362 * adapter is in one of these confgurations or not.
4363 *
4364 * Return value:
4365 * 	1 if adapter is not supported / 0 if adapter is supported
4366 **/
4367static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4368{
4369	u8 rev_id;
4370	int i;
4371
4372	if (ioa_cfg->type == 0x5702) {
4373		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4374					 &rev_id) == PCIBIOS_SUCCESSFUL) {
4375			if (rev_id < 4) {
4376				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4377					if (__is_processor(ipr_blocked_processors[i]))
4378						return 1;
4379				}
4380			}
4381		}
4382	}
4383	return 0;
4384}
4385#else
4386#define ipr_invalid_adapter(ioa_cfg) 0
4387#endif
4388
4389/**
4390 * ipr_ioa_bringdown_done - IOA bring down completion.
4391 * @ipr_cmd:	ipr command struct
4392 *
4393 * This function processes the completion of an adapter bring down.
4394 * It wakes any reset sleepers.
4395 *
4396 * Return value:
4397 * 	IPR_RC_JOB_RETURN
4398 **/
4399static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4400{
4401	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4402
4403	ENTER;
4404	ioa_cfg->in_reset_reload = 0;
4405	ioa_cfg->reset_retries = 0;
4406	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4407	wake_up_all(&ioa_cfg->reset_wait_q);
4408
4409	spin_unlock_irq(ioa_cfg->host->host_lock);
4410	scsi_unblock_requests(ioa_cfg->host);
4411	spin_lock_irq(ioa_cfg->host->host_lock);
4412	LEAVE;
4413
4414	return IPR_RC_JOB_RETURN;
4415}
4416
4417/**
4418 * ipr_ioa_reset_done - IOA reset completion.
4419 * @ipr_cmd:	ipr command struct
4420 *
4421 * This function processes the completion of an adapter reset.
4422 * It schedules any necessary mid-layer add/removes and
4423 * wakes any reset sleepers.
4424 *
4425 * Return value:
4426 * 	IPR_RC_JOB_RETURN
4427 **/
4428static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4429{
4430	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4431	struct ipr_resource_entry *res;
4432	struct ipr_hostrcb *hostrcb, *temp;
4433	int i = 0;
4434
4435	ENTER;
4436	ioa_cfg->in_reset_reload = 0;
4437	ioa_cfg->allow_cmds = 1;
4438	ioa_cfg->reset_cmd = NULL;
4439	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4440
4441	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4442		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4443			ipr_trace;
4444			break;
4445		}
4446	}
4447	schedule_work(&ioa_cfg->work_q);
4448
4449	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4450		list_del(&hostrcb->queue);
4451		if (i++ < IPR_NUM_LOG_HCAMS)
4452			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4453		else
4454			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4455	}
4456
4457	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4458
4459	ioa_cfg->reset_retries = 0;
4460	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4461	wake_up_all(&ioa_cfg->reset_wait_q);
4462
4463	spin_unlock_irq(ioa_cfg->host->host_lock);
4464	scsi_unblock_requests(ioa_cfg->host);
4465	spin_lock_irq(ioa_cfg->host->host_lock);
4466
4467	if (!ioa_cfg->allow_cmds)
4468		scsi_block_requests(ioa_cfg->host);
4469
4470	LEAVE;
4471	return IPR_RC_JOB_RETURN;
4472}
4473
4474/**
4475 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4476 * @supported_dev:	supported device struct
4477 * @vpids:			vendor product id struct
4478 *
4479 * Return value:
4480 * 	none
4481 **/
4482static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4483				 struct ipr_std_inq_vpids *vpids)
4484{
4485	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4486	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4487	supported_dev->num_records = 1;
4488	supported_dev->data_length =
4489		cpu_to_be16(sizeof(struct ipr_supported_device));
4490	supported_dev->reserved = 0;
4491}
4492
4493/**
4494 * ipr_set_supported_devs - Send Set Supported Devices for a device
4495 * @ipr_cmd:	ipr command struct
4496 *
4497 * This function send a Set Supported Devices to the adapter
4498 *
4499 * Return value:
4500 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4501 **/
4502static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4503{
4504	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4505	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4506	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4507	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4508	struct ipr_resource_entry *res = ipr_cmd->u.res;
4509
4510	ipr_cmd->job_step = ipr_ioa_reset_done;
4511
4512	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4513		if (!ipr_is_scsi_disk(res))
4514			continue;
4515
4516		ipr_cmd->u.res = res;
4517		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4518
4519		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4520		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4521		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4522
4523		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4524		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4525		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4526
4527		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4528							sizeof(struct ipr_supported_device));
4529		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4530					     offsetof(struct ipr_misc_cbs, supp_dev));
4531		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4532		ioarcb->write_data_transfer_length =
4533			cpu_to_be32(sizeof(struct ipr_supported_device));
4534
4535		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4536			   IPR_SET_SUP_DEVICE_TIMEOUT);
4537
4538		ipr_cmd->job_step = ipr_set_supported_devs;
4539		return IPR_RC_JOB_RETURN;
4540	}
4541
4542	return IPR_RC_JOB_CONTINUE;
4543}
4544
4545/**
4546 * ipr_setup_write_cache - Disable write cache if needed
4547 * @ipr_cmd:	ipr command struct
4548 *
4549 * This function sets up adapters write cache to desired setting
4550 *
4551 * Return value:
4552 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4553 **/
4554static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4555{
4556	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4557
4558	ipr_cmd->job_step = ipr_set_supported_devs;
4559	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4560				    struct ipr_resource_entry, queue);
4561
4562	if (ioa_cfg->cache_state != CACHE_DISABLED)
4563		return IPR_RC_JOB_CONTINUE;
4564
4565	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4566	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4567	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4568	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4569
4570	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4571
4572	return IPR_RC_JOB_RETURN;
4573}
4574
4575/**
4576 * ipr_get_mode_page - Locate specified mode page
4577 * @mode_pages:	mode page buffer
4578 * @page_code:	page code to find
4579 * @len:		minimum required length for mode page
4580 *
4581 * Return value:
4582 * 	pointer to mode page / NULL on failure
4583 **/
4584static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4585			       u32 page_code, u32 len)
4586{
4587	struct ipr_mode_page_hdr *mode_hdr;
4588	u32 page_length;
4589	u32 length;
4590
4591	if (!mode_pages || (mode_pages->hdr.length == 0))
4592		return NULL;
4593
4594	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4595	mode_hdr = (struct ipr_mode_page_hdr *)
4596		(mode_pages->data + mode_pages->hdr.block_desc_len);
4597
4598	while (length) {
4599		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4600			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4601				return mode_hdr;
4602			break;
4603		} else {
4604			page_length = (sizeof(struct ipr_mode_page_hdr) +
4605				       mode_hdr->page_length);
4606			length -= page_length;
4607			mode_hdr = (struct ipr_mode_page_hdr *)
4608				((unsigned long)mode_hdr + page_length);
4609		}
4610	}
4611	return NULL;
4612}
4613
4614/**
4615 * ipr_check_term_power - Check for term power errors
4616 * @ioa_cfg:	ioa config struct
4617 * @mode_pages:	IOAFP mode pages buffer
4618 *
4619 * Check the IOAFP's mode page 28 for term power errors
4620 *
4621 * Return value:
4622 * 	nothing
4623 **/
4624static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4625				 struct ipr_mode_pages *mode_pages)
4626{
4627	int i;
4628	int entry_length;
4629	struct ipr_dev_bus_entry *bus;
4630	struct ipr_mode_page28 *mode_page;
4631
4632	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4633				      sizeof(struct ipr_mode_page28));
4634
4635	entry_length = mode_page->entry_length;
4636
4637	bus = mode_page->bus;
4638
4639	for (i = 0; i < mode_page->num_entries; i++) {
4640		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4641			dev_err(&ioa_cfg->pdev->dev,
4642				"Term power is absent on scsi bus %d\n",
4643				bus->res_addr.bus);
4644		}
4645
4646		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4647	}
4648}
4649
4650/**
4651 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4652 * @ioa_cfg:	ioa config struct
4653 *
4654 * Looks through the config table checking for SES devices. If
4655 * the SES device is in the SES table indicating a maximum SCSI
4656 * bus speed, the speed is limited for the bus.
4657 *
4658 * Return value:
4659 * 	none
4660 **/
4661static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4662{
4663	u32 max_xfer_rate;
4664	int i;
4665
4666	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4667		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4668						       ioa_cfg->bus_attr[i].bus_width);
4669
4670		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4671			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4672	}
4673}
4674
4675/**
4676 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4677 * @ioa_cfg:	ioa config struct
4678 * @mode_pages:	mode page 28 buffer
4679 *
4680 * Updates mode page 28 based on driver configuration
4681 *
4682 * Return value:
4683 * 	none
4684 **/
4685static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4686					  	struct ipr_mode_pages *mode_pages)
4687{
4688	int i, entry_length;
4689	struct ipr_dev_bus_entry *bus;
4690	struct ipr_bus_attributes *bus_attr;
4691	struct ipr_mode_page28 *mode_page;
4692
4693	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4694				      sizeof(struct ipr_mode_page28));
4695
4696	entry_length = mode_page->entry_length;
4697
4698	/* Loop for each device bus entry */
4699	for (i = 0, bus = mode_page->bus;
4700	     i < mode_page->num_entries;
4701	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4702		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4703			dev_err(&ioa_cfg->pdev->dev,
4704				"Invalid resource address reported: 0x%08X\n",
4705				IPR_GET_PHYS_LOC(bus->res_addr));
4706			continue;
4707		}
4708
4709		bus_attr = &ioa_cfg->bus_attr[i];
4710		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4711		bus->bus_width = bus_attr->bus_width;
4712		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4713		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4714		if (bus_attr->qas_enabled)
4715			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4716		else
4717			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4718	}
4719}
4720
4721/**
4722 * ipr_build_mode_select - Build a mode select command
4723 * @ipr_cmd:	ipr command struct
4724 * @res_handle:	resource handle to send command to
4725 * @parm:		Byte 2 of Mode Sense command
4726 * @dma_addr:	DMA buffer address
4727 * @xfer_len:	data transfer length
4728 *
4729 * Return value:
4730 * 	none
4731 **/
4732static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4733				  __be32 res_handle, u8 parm, u32 dma_addr,
4734				  u8 xfer_len)
4735{
4736	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4737	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4738
4739	ioarcb->res_handle = res_handle;
4740	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4741	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4742	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4743	ioarcb->cmd_pkt.cdb[1] = parm;
4744	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4745
4746	ioadl->flags_and_data_len =
4747		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4748	ioadl->address = cpu_to_be32(dma_addr);
4749	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4750	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4751}
4752
4753/**
4754 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4755 * @ipr_cmd:	ipr command struct
4756 *
4757 * This function sets up the SCSI bus attributes and sends
4758 * a Mode Select for Page 28 to activate them.
4759 *
4760 * Return value:
4761 * 	IPR_RC_JOB_RETURN
4762 **/
4763static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4764{
4765	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4766	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4767	int length;
4768
4769	ENTER;
4770	ipr_scsi_bus_speed_limit(ioa_cfg);
4771	ipr_check_term_power(ioa_cfg, mode_pages);
4772	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4773	length = mode_pages->hdr.length + 1;
4774	mode_pages->hdr.length = 0;
4775
4776	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4777			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4778			      length);
4779
4780	ipr_cmd->job_step = ipr_setup_write_cache;
4781	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4782
4783	LEAVE;
4784	return IPR_RC_JOB_RETURN;
4785}
4786
4787/**
4788 * ipr_build_mode_sense - Builds a mode sense command
4789 * @ipr_cmd:	ipr command struct
4790 * @res:		resource entry struct
4791 * @parm:		Byte 2 of mode sense command
4792 * @dma_addr:	DMA address of mode sense buffer
4793 * @xfer_len:	Size of DMA buffer
4794 *
4795 * Return value:
4796 * 	none
4797 **/
4798static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4799				 __be32 res_handle,
4800				 u8 parm, u32 dma_addr, u8 xfer_len)
4801{
4802	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4803	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4804
4805	ioarcb->res_handle = res_handle;
4806	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4807	ioarcb->cmd_pkt.cdb[2] = parm;
4808	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4809	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4810
4811	ioadl->flags_and_data_len =
4812		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4813	ioadl->address = cpu_to_be32(dma_addr);
4814	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4815	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4816}
4817
4818/**
4819 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4820 * @ipr_cmd:	ipr command struct
4821 *
4822 * This function handles the failure of an IOA bringup command.
4823 *
4824 * Return value:
4825 * 	IPR_RC_JOB_RETURN
4826 **/
4827static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4828{
4829	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4830	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4831
4832	dev_err(&ioa_cfg->pdev->dev,
4833		"0x%02X failed with IOASC: 0x%08X\n",
4834		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4835
4836	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4837	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4838	return IPR_RC_JOB_RETURN;
4839}
4840
4841/**
4842 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4843 * @ipr_cmd:	ipr command struct
4844 *
4845 * This function handles the failure of a Mode Sense to the IOAFP.
4846 * Some adapters do not handle all mode pages.
4847 *
4848 * Return value:
4849 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4850 **/
4851static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4852{
4853	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4854
4855	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4856		ipr_cmd->job_step = ipr_setup_write_cache;
4857		return IPR_RC_JOB_CONTINUE;
4858	}
4859
4860	return ipr_reset_cmd_failed(ipr_cmd);
4861}
4862
4863/**
4864 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4865 * @ipr_cmd:	ipr command struct
4866 *
4867 * This function send a Page 28 mode sense to the IOA to
4868 * retrieve SCSI bus attributes.
4869 *
4870 * Return value:
4871 * 	IPR_RC_JOB_RETURN
4872 **/
4873static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4874{
4875	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4876
4877	ENTER;
4878	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4879			     0x28, ioa_cfg->vpd_cbs_dma +
4880			     offsetof(struct ipr_misc_cbs, mode_pages),
4881			     sizeof(struct ipr_mode_pages));
4882
4883	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4884	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4885
4886	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4887
4888	LEAVE;
4889	return IPR_RC_JOB_RETURN;
4890}
4891
4892/**
4893 * ipr_init_res_table - Initialize the resource table
4894 * @ipr_cmd:	ipr command struct
4895 *
4896 * This function looks through the existing resource table, comparing
4897 * it with the config table. This function will take care of old/new
4898 * devices and schedule adding/removing them from the mid-layer
4899 * as appropriate.
4900 *
4901 * Return value:
4902 * 	IPR_RC_JOB_CONTINUE
4903 **/
4904static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4905{
4906	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4907	struct ipr_resource_entry *res, *temp;
4908	struct ipr_config_table_entry *cfgte;
4909	int found, i;
4910	LIST_HEAD(old_res);
4911
4912	ENTER;
4913	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4914		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4915
4916	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4917		list_move_tail(&res->queue, &old_res);
4918
4919	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4920		cfgte = &ioa_cfg->cfg_table->dev[i];
4921		found = 0;
4922
4923		list_for_each_entry_safe(res, temp, &old_res, queue) {
4924			if (!memcmp(&res->cfgte.res_addr,
4925				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4926				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4927				found = 1;
4928				break;
4929			}
4930		}
4931
4932		if (!found) {
4933			if (list_empty(&ioa_cfg->free_res_q)) {
4934				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4935				break;
4936			}
4937
4938			found = 1;
4939			res = list_entry(ioa_cfg->free_res_q.next,
4940					 struct ipr_resource_entry, queue);
4941			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4942			ipr_init_res_entry(res);
4943			res->add_to_ml = 1;
4944		}
4945
4946		if (found)
4947			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4948	}
4949
4950	list_for_each_entry_safe(res, temp, &old_res, queue) {
4951		if (res->sdev) {
4952			res->del_from_ml = 1;
4953			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
4954			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4955		} else {
4956			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4957		}
4958	}
4959
4960	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4961
4962	LEAVE;
4963	return IPR_RC_JOB_CONTINUE;
4964}
4965
4966/**
4967 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4968 * @ipr_cmd:	ipr command struct
4969 *
4970 * This function sends a Query IOA Configuration command
4971 * to the adapter to retrieve the IOA configuration table.
4972 *
4973 * Return value:
4974 * 	IPR_RC_JOB_RETURN
4975 **/
4976static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4977{
4978	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4979	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4980	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4981	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4982
4983	ENTER;
4984	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4985		 ucode_vpd->major_release, ucode_vpd->card_type,
4986		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4987	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4988	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4989
4990	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4991	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4992	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4993
4994	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4995	ioarcb->read_data_transfer_length =
4996		cpu_to_be32(sizeof(struct ipr_config_table));
4997
4998	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4999	ioadl->flags_and_data_len =
5000		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5001
5002	ipr_cmd->job_step = ipr_init_res_table;
5003
5004	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5005
5006	LEAVE;
5007	return IPR_RC_JOB_RETURN;
5008}
5009
5010/**
5011 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5012 * @ipr_cmd:	ipr command struct
5013 *
5014 * This utility function sends an inquiry to the adapter.
5015 *
5016 * Return value:
5017 * 	none
5018 **/
5019static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5020			      u32 dma_addr, u8 xfer_len)
5021{
5022	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5023	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5024
5025	ENTER;
5026	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5027	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5028
5029	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5030	ioarcb->cmd_pkt.cdb[1] = flags;
5031	ioarcb->cmd_pkt.cdb[2] = page;
5032	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5033
5034	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5035	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5036
5037	ioadl->address = cpu_to_be32(dma_addr);
5038	ioadl->flags_and_data_len =
5039		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5040
5041	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5042	LEAVE;
5043}
5044
5045/**
5046 * ipr_inquiry_page_supported - Is the given inquiry page supported
5047 * @page0:		inquiry page 0 buffer
5048 * @page:		page code.
5049 *
5050 * This function determines if the specified inquiry page is supported.
5051 *
5052 * Return value:
5053 *	1 if page is supported / 0 if not
5054 **/
5055static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5056{
5057	int i;
5058
5059	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5060		if (page0->page[i] == page)
5061			return 1;
5062
5063	return 0;
5064}
5065
5066/**
5067 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5068 * @ipr_cmd:	ipr command struct
5069 *
5070 * This function sends a Page 3 inquiry to the adapter
5071 * to retrieve software VPD information.
5072 *
5073 * Return value:
5074 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5075 **/
5076static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5077{
5078	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5079	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5080
5081	ENTER;
5082
5083	if (!ipr_inquiry_page_supported(page0, 1))
5084		ioa_cfg->cache_state = CACHE_NONE;
5085
5086	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5087
5088	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5089			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5090			  sizeof(struct ipr_inquiry_page3));
5091
5092	LEAVE;
5093	return IPR_RC_JOB_RETURN;
5094}
5095
5096/**
5097 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5098 * @ipr_cmd:	ipr command struct
5099 *
5100 * This function sends a Page 0 inquiry to the adapter
5101 * to retrieve supported inquiry pages.
5102 *
5103 * Return value:
5104 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5105 **/
5106static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5107{
5108	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5109	char type[5];
5110
5111	ENTER;
5112
5113	/* Grab the type out of the VPD and store it away */
5114	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5115	type[4] = '\0';
5116	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5117
5118	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5119
5120	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5121			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5122			  sizeof(struct ipr_inquiry_page0));
5123
5124	LEAVE;
5125	return IPR_RC_JOB_RETURN;
5126}
5127
5128/**
5129 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5130 * @ipr_cmd:	ipr command struct
5131 *
5132 * This function sends a standard inquiry to the adapter.
5133 *
5134 * Return value:
5135 * 	IPR_RC_JOB_RETURN
5136 **/
5137static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5138{
5139	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5140
5141	ENTER;
5142	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5143
5144	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5145			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5146			  sizeof(struct ipr_ioa_vpd));
5147
5148	LEAVE;
5149	return IPR_RC_JOB_RETURN;
5150}
5151
5152/**
5153 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5154 * @ipr_cmd:	ipr command struct
5155 *
5156 * This function send an Identify Host Request Response Queue
5157 * command to establish the HRRQ with the adapter.
5158 *
5159 * Return value:
5160 * 	IPR_RC_JOB_RETURN
5161 **/
5162static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5163{
5164	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5165	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5166
5167	ENTER;
5168	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5169
5170	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5171	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5172
5173	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5174	ioarcb->cmd_pkt.cdb[2] =
5175		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5176	ioarcb->cmd_pkt.cdb[3] =
5177		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5178	ioarcb->cmd_pkt.cdb[4] =
5179		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5180	ioarcb->cmd_pkt.cdb[5] =
5181		((u32) ioa_cfg->host_rrq_dma) & 0xff;
5182	ioarcb->cmd_pkt.cdb[7] =
5183		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5184	ioarcb->cmd_pkt.cdb[8] =
5185		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5186
5187	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5188
5189	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5190
5191	LEAVE;
5192	return IPR_RC_JOB_RETURN;
5193}
5194
5195/**
5196 * ipr_reset_timer_done - Adapter reset timer function
5197 * @ipr_cmd:	ipr command struct
5198 *
5199 * Description: This function is used in adapter reset processing
5200 * for timing events. If the reset_cmd pointer in the IOA
5201 * config struct is not this adapter's we are doing nested
5202 * resets and fail_all_ops will take care of freeing the
5203 * command block.
5204 *
5205 * Return value:
5206 * 	none
5207 **/
5208static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5209{
5210	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5211	unsigned long lock_flags = 0;
5212
5213	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5214
5215	if (ioa_cfg->reset_cmd == ipr_cmd) {
5216		list_del(&ipr_cmd->queue);
5217		ipr_cmd->done(ipr_cmd);
5218	}
5219
5220	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5221}
5222
5223/**
5224 * ipr_reset_start_timer - Start a timer for adapter reset job
5225 * @ipr_cmd:	ipr command struct
5226 * @timeout:	timeout value
5227 *
5228 * Description: This function is used in adapter reset processing
5229 * for timing events. If the reset_cmd pointer in the IOA
5230 * config struct is not this adapter's we are doing nested
5231 * resets and fail_all_ops will take care of freeing the
5232 * command block.
5233 *
5234 * Return value:
5235 * 	none
5236 **/
5237static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5238				  unsigned long timeout)
5239{
5240	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5241	ipr_cmd->done = ipr_reset_ioa_job;
5242
5243	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5244	ipr_cmd->timer.expires = jiffies + timeout;
5245	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5246	add_timer(&ipr_cmd->timer);
5247}
5248
5249/**
5250 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5251 * @ioa_cfg:	ioa cfg struct
5252 *
5253 * Return value:
5254 * 	nothing
5255 **/
5256static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5257{
5258	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5259
5260	/* Initialize Host RRQ pointers */
5261	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5262	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5263	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5264	ioa_cfg->toggle_bit = 1;
5265
5266	/* Zero out config table */
5267	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5268}
5269
5270/**
5271 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5272 * @ipr_cmd:	ipr command struct
5273 *
5274 * This function reinitializes some control blocks and
5275 * enables destructive diagnostics on the adapter.
5276 *
5277 * Return value:
5278 * 	IPR_RC_JOB_RETURN
5279 **/
5280static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5281{
5282	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5283	volatile u32 int_reg;
5284
5285	ENTER;
5286	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5287	ipr_init_ioa_mem(ioa_cfg);
5288
5289	ioa_cfg->allow_interrupts = 1;
5290	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5291
5292	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5293		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5294		       ioa_cfg->regs.clr_interrupt_mask_reg);
5295		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5296		return IPR_RC_JOB_CONTINUE;
5297	}
5298
5299	/* Enable destructive diagnostics on IOA */
5300	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5301
5302	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5303	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5304
5305	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5306
5307	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5308	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5309	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5310	ipr_cmd->done = ipr_reset_ioa_job;
5311	add_timer(&ipr_cmd->timer);
5312	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5313
5314	LEAVE;
5315	return IPR_RC_JOB_RETURN;
5316}
5317
5318/**
5319 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5320 * @ipr_cmd:	ipr command struct
5321 *
5322 * This function is invoked when an adapter dump has run out
5323 * of processing time.
5324 *
5325 * Return value:
5326 * 	IPR_RC_JOB_CONTINUE
5327 **/
5328static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5329{
5330	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5331
5332	if (ioa_cfg->sdt_state == GET_DUMP)
5333		ioa_cfg->sdt_state = ABORT_DUMP;
5334
5335	ipr_cmd->job_step = ipr_reset_alert;
5336
5337	return IPR_RC_JOB_CONTINUE;
5338}
5339
5340/**
5341 * ipr_unit_check_no_data - Log a unit check/no data error log
5342 * @ioa_cfg:		ioa config struct
5343 *
5344 * Logs an error indicating the adapter unit checked, but for some
5345 * reason, we were unable to fetch the unit check buffer.
5346 *
5347 * Return value:
5348 * 	nothing
5349 **/
5350static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5351{
5352	ioa_cfg->errors_logged++;
5353	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5354}
5355
5356/**
5357 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5358 * @ioa_cfg:		ioa config struct
5359 *
5360 * Fetches the unit check buffer from the adapter by clocking the data
5361 * through the mailbox register.
5362 *
5363 * Return value:
5364 * 	nothing
5365 **/
5366static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5367{
5368	unsigned long mailbox;
5369	struct ipr_hostrcb *hostrcb;
5370	struct ipr_uc_sdt sdt;
5371	int rc, length;
5372
5373	mailbox = readl(ioa_cfg->ioa_mailbox);
5374
5375	if (!ipr_sdt_is_fmt2(mailbox)) {
5376		ipr_unit_check_no_data(ioa_cfg);
5377		return;
5378	}
5379
5380	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5381	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5382					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5383
5384	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5385	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5386		ipr_unit_check_no_data(ioa_cfg);
5387		return;
5388	}
5389
5390	/* Find length of the first sdt entry (UC buffer) */
5391	length = (be32_to_cpu(sdt.entry[0].end_offset) -
5392		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5393
5394	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5395			     struct ipr_hostrcb, queue);
5396	list_del(&hostrcb->queue);
5397	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5398
5399	rc = ipr_get_ldump_data_section(ioa_cfg,
5400					be32_to_cpu(sdt.entry[0].bar_str_offset),
5401					(__be32 *)&hostrcb->hcam,
5402					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5403
5404	if (!rc)
5405		ipr_handle_log_data(ioa_cfg, hostrcb);
5406	else
5407		ipr_unit_check_no_data(ioa_cfg);
5408
5409	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5410}
5411
5412/**
5413 * ipr_reset_restore_cfg_space - Restore PCI config space.
5414 * @ipr_cmd:	ipr command struct
5415 *
5416 * Description: This function restores the saved PCI config space of
5417 * the adapter, fails all outstanding ops back to the callers, and
5418 * fetches the dump/unit check if applicable to this reset.
5419 *
5420 * Return value:
5421 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5422 **/
5423static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5424{
5425	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5426	int rc;
5427
5428	ENTER;
5429	pci_unblock_user_cfg_access(ioa_cfg->pdev);
5430	rc = pci_restore_state(ioa_cfg->pdev);
5431
5432	if (rc != PCIBIOS_SUCCESSFUL) {
5433		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5434		return IPR_RC_JOB_CONTINUE;
5435	}
5436
5437	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5438		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5439		return IPR_RC_JOB_CONTINUE;
5440	}
5441
5442	ipr_fail_all_ops(ioa_cfg);
5443
5444	if (ioa_cfg->ioa_unit_checked) {
5445		ioa_cfg->ioa_unit_checked = 0;
5446		ipr_get_unit_check_buffer(ioa_cfg);
5447		ipr_cmd->job_step = ipr_reset_alert;
5448		ipr_reset_start_timer(ipr_cmd, 0);
5449		return IPR_RC_JOB_RETURN;
5450	}
5451
5452	if (ioa_cfg->in_ioa_bringdown) {
5453		ipr_cmd->job_step = ipr_ioa_bringdown_done;
5454	} else {
5455		ipr_cmd->job_step = ipr_reset_enable_ioa;
5456
5457		if (GET_DUMP == ioa_cfg->sdt_state) {
5458			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5459			ipr_cmd->job_step = ipr_reset_wait_for_dump;
5460			schedule_work(&ioa_cfg->work_q);
5461			return IPR_RC_JOB_RETURN;
5462		}
5463	}
5464
5465	ENTER;
5466	return IPR_RC_JOB_CONTINUE;
5467}
5468
5469/**
5470 * ipr_reset_start_bist - Run BIST on the adapter.
5471 * @ipr_cmd:	ipr command struct
5472 *
5473 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5474 *
5475 * Return value:
5476 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5477 **/
5478static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5479{
5480	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5481	int rc;
5482
5483	ENTER;
5484	pci_block_user_cfg_access(ioa_cfg->pdev);
5485	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5486
5487	if (rc != PCIBIOS_SUCCESSFUL) {
5488		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5489		rc = IPR_RC_JOB_CONTINUE;
5490	} else {
5491		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5492		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5493		rc = IPR_RC_JOB_RETURN;
5494	}
5495
5496	LEAVE;
5497	return rc;
5498}
5499
5500/**
5501 * ipr_reset_allowed - Query whether or not IOA can be reset
5502 * @ioa_cfg:	ioa config struct
5503 *
5504 * Return value:
5505 * 	0 if reset not allowed / non-zero if reset is allowed
5506 **/
5507static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5508{
5509	volatile u32 temp_reg;
5510
5511	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5512	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5513}
5514
5515/**
5516 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5517 * @ipr_cmd:	ipr command struct
5518 *
5519 * Description: This function waits for adapter permission to run BIST,
5520 * then runs BIST. If the adapter does not give permission after a
5521 * reasonable time, we will reset the adapter anyway. The impact of
5522 * resetting the adapter without warning the adapter is the risk of
5523 * losing the persistent error log on the adapter. If the adapter is
5524 * reset while it is writing to the flash on the adapter, the flash
5525 * segment will have bad ECC and be zeroed.
5526 *
5527 * Return value:
5528 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5529 **/
5530static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5531{
5532	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5533	int rc = IPR_RC_JOB_RETURN;
5534
5535	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5536		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5537		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5538	} else {
5539		ipr_cmd->job_step = ipr_reset_start_bist;
5540		rc = IPR_RC_JOB_CONTINUE;
5541	}
5542
5543	return rc;
5544}
5545
5546/**
5547 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5548 * @ipr_cmd:	ipr command struct
5549 *
5550 * Description: This function alerts the adapter that it will be reset.
5551 * If memory space is not currently enabled, proceed directly
5552 * to running BIST on the adapter. The timer must always be started
5553 * so we guarantee we do not run BIST from ipr_isr.
5554 *
5555 * Return value:
5556 * 	IPR_RC_JOB_RETURN
5557 **/
5558static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5559{
5560	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5561	u16 cmd_reg;
5562	int rc;
5563
5564	ENTER;
5565	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5566
5567	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5568		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5569		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5570		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5571	} else {
5572		ipr_cmd->job_step = ipr_reset_start_bist;
5573	}
5574
5575	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5576	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5577
5578	LEAVE;
5579	return IPR_RC_JOB_RETURN;
5580}
5581
5582/**
5583 * ipr_reset_ucode_download_done - Microcode download completion
5584 * @ipr_cmd:	ipr command struct
5585 *
5586 * Description: This function unmaps the microcode download buffer.
5587 *
5588 * Return value:
5589 * 	IPR_RC_JOB_CONTINUE
5590 **/
5591static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5592{
5593	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5594	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5595
5596	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5597		     sglist->num_sg, DMA_TO_DEVICE);
5598
5599	ipr_cmd->job_step = ipr_reset_alert;
5600	return IPR_RC_JOB_CONTINUE;
5601}
5602
5603/**
5604 * ipr_reset_ucode_download - Download microcode to the adapter
5605 * @ipr_cmd:	ipr command struct
5606 *
5607 * Description: This function checks to see if it there is microcode
5608 * to download to the adapter. If there is, a download is performed.
5609 *
5610 * Return value:
5611 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5612 **/
5613static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5614{
5615	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5616	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5617
5618	ENTER;
5619	ipr_cmd->job_step = ipr_reset_alert;
5620
5621	if (!sglist)
5622		return IPR_RC_JOB_CONTINUE;
5623
5624	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5625	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5626	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5627	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5628	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5629	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5630	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5631
5632	ipr_build_ucode_ioadl(ipr_cmd, sglist);
5633	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5634
5635	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5636		   IPR_WRITE_BUFFER_TIMEOUT);
5637
5638	LEAVE;
5639	return IPR_RC_JOB_RETURN;
5640}
5641
5642/**
5643 * ipr_reset_shutdown_ioa - Shutdown the adapter
5644 * @ipr_cmd:	ipr command struct
5645 *
5646 * Description: This function issues an adapter shutdown of the
5647 * specified type to the specified adapter as part of the
5648 * adapter reset job.
5649 *
5650 * Return value:
5651 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5652 **/
5653static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5654{
5655	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5656	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5657	unsigned long timeout;
5658	int rc = IPR_RC_JOB_CONTINUE;
5659
5660	ENTER;
5661	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5662		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5663		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5664		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5665		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5666
5667		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5668			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5669		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5670			timeout = IPR_INTERNAL_TIMEOUT;
5671		else
5672			timeout = IPR_SHUTDOWN_TIMEOUT;
5673
5674		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5675
5676		rc = IPR_RC_JOB_RETURN;
5677		ipr_cmd->job_step = ipr_reset_ucode_download;
5678	} else
5679		ipr_cmd->job_step = ipr_reset_alert;
5680
5681	LEAVE;
5682	return rc;
5683}
5684
5685/**
5686 * ipr_reset_ioa_job - Adapter reset job
5687 * @ipr_cmd:	ipr command struct
5688 *
5689 * Description: This function is the job router for the adapter reset job.
5690 *
5691 * Return value:
5692 * 	none
5693 **/
5694static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5695{
5696	u32 rc, ioasc;
5697	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5698
5699	do {
5700		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5701
5702		if (ioa_cfg->reset_cmd != ipr_cmd) {
5703			/*
5704			 * We are doing nested adapter resets and this is
5705			 * not the current reset job.
5706			 */
5707			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5708			return;
5709		}
5710
5711		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5712			rc = ipr_cmd->job_step_failed(ipr_cmd);
5713			if (rc == IPR_RC_JOB_RETURN)
5714				return;
5715		}
5716
5717		ipr_reinit_ipr_cmnd(ipr_cmd);
5718		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5719		rc = ipr_cmd->job_step(ipr_cmd);
5720	} while(rc == IPR_RC_JOB_CONTINUE);
5721}
5722
5723/**
5724 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5725 * @ioa_cfg:		ioa config struct
5726 * @job_step:		first job step of reset job
5727 * @shutdown_type:	shutdown type
5728 *
5729 * Description: This function will initiate the reset of the given adapter
5730 * starting at the selected job step.
5731 * If the caller needs to wait on the completion of the reset,
5732 * the caller must sleep on the reset_wait_q.
5733 *
5734 * Return value:
5735 * 	none
5736 **/
5737static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5738				    int (*job_step) (struct ipr_cmnd *),
5739				    enum ipr_shutdown_type shutdown_type)
5740{
5741	struct ipr_cmnd *ipr_cmd;
5742
5743	ioa_cfg->in_reset_reload = 1;
5744	ioa_cfg->allow_cmds = 0;
5745	scsi_block_requests(ioa_cfg->host);
5746
5747	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5748	ioa_cfg->reset_cmd = ipr_cmd;
5749	ipr_cmd->job_step = job_step;
5750	ipr_cmd->u.shutdown_type = shutdown_type;
5751
5752	ipr_reset_ioa_job(ipr_cmd);
5753}
5754
5755/**
5756 * ipr_initiate_ioa_reset - Initiate an adapter reset
5757 * @ioa_cfg:		ioa config struct
5758 * @shutdown_type:	shutdown type
5759 *
5760 * Description: This function will initiate the reset of the given adapter.
5761 * If the caller needs to wait on the completion of the reset,
5762 * the caller must sleep on the reset_wait_q.
5763 *
5764 * Return value:
5765 * 	none
5766 **/
5767static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5768				   enum ipr_shutdown_type shutdown_type)
5769{
5770	if (ioa_cfg->ioa_is_dead)
5771		return;
5772
5773	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5774		ioa_cfg->sdt_state = ABORT_DUMP;
5775
5776	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5777		dev_err(&ioa_cfg->pdev->dev,
5778			"IOA taken offline - error recovery failed\n");
5779
5780		ioa_cfg->reset_retries = 0;
5781		ioa_cfg->ioa_is_dead = 1;
5782
5783		if (ioa_cfg->in_ioa_bringdown) {
5784			ioa_cfg->reset_cmd = NULL;
5785			ioa_cfg->in_reset_reload = 0;
5786			ipr_fail_all_ops(ioa_cfg);
5787			wake_up_all(&ioa_cfg->reset_wait_q);
5788
5789			spin_unlock_irq(ioa_cfg->host->host_lock);
5790			scsi_unblock_requests(ioa_cfg->host);
5791			spin_lock_irq(ioa_cfg->host->host_lock);
5792			return;
5793		} else {
5794			ioa_cfg->in_ioa_bringdown = 1;
5795			shutdown_type = IPR_SHUTDOWN_NONE;
5796		}
5797	}
5798
5799	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5800				shutdown_type);
5801}
5802
5803/**
5804 * ipr_reset_freeze - Hold off all I/O activity
5805 * @ipr_cmd:	ipr command struct
5806 *
5807 * Description: If the PCI slot is frozen, hold off all I/O
5808 * activity; then, as soon as the slot is available again,
5809 * initiate an adapter reset.
5810 */
5811static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5812{
5813	/* Disallow new interrupts, avoid loop */
5814	ipr_cmd->ioa_cfg->allow_interrupts = 0;
5815	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5816	ipr_cmd->done = ipr_reset_ioa_job;
5817	return IPR_RC_JOB_RETURN;
5818}
5819
5820/**
5821 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5822 * @pdev:	PCI device struct
5823 *
5824 * Description: This routine is called to tell us that the PCI bus
5825 * is down. Can't do anything here, except put the device driver
5826 * into a holding pattern, waiting for the PCI bus to come back.
5827 */
5828static void ipr_pci_frozen(struct pci_dev *pdev)
5829{
5830	unsigned long flags = 0;
5831	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5832
5833	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5834	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5835	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5836}
5837
5838/**
5839 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5840 * @pdev:	PCI device struct
5841 *
5842 * Description: This routine is called by the pci error recovery
5843 * code after the PCI slot has been reset, just before we
5844 * should resume normal operations.
5845 */
5846static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5847{
5848	unsigned long flags = 0;
5849	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5850
5851	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5852	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5853	                                 IPR_SHUTDOWN_NONE);
5854	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5855	return PCI_ERS_RESULT_RECOVERED;
5856}
5857
5858/**
5859 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5860 * @pdev:	PCI device struct
5861 *
5862 * Description: This routine is called when the PCI bus has
5863 * permanently failed.
5864 */
5865static void ipr_pci_perm_failure(struct pci_dev *pdev)
5866{
5867	unsigned long flags = 0;
5868	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5869
5870	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5871	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5872		ioa_cfg->sdt_state = ABORT_DUMP;
5873	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5874	ioa_cfg->in_ioa_bringdown = 1;
5875	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5876	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5877}
5878
5879/**
5880 * ipr_pci_error_detected - Called when a PCI error is detected.
5881 * @pdev:	PCI device struct
5882 * @state:	PCI channel state
5883 *
5884 * Description: Called when a PCI error is detected.
5885 *
5886 * Return value:
5887 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5888 */
5889static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5890					       pci_channel_state_t state)
5891{
5892	switch (state) {
5893	case pci_channel_io_frozen:
5894		ipr_pci_frozen(pdev);
5895		return PCI_ERS_RESULT_NEED_RESET;
5896	case pci_channel_io_perm_failure:
5897		ipr_pci_perm_failure(pdev);
5898		return PCI_ERS_RESULT_DISCONNECT;
5899		break;
5900	default:
5901		break;
5902	}
5903	return PCI_ERS_RESULT_NEED_RESET;
5904}
5905
5906/**
5907 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5908 * @ioa_cfg:	ioa cfg struct
5909 *
5910 * Description: This is the second phase of adapter intialization
5911 * This function takes care of initilizing the adapter to the point
5912 * where it can accept new commands.
5913
5914 * Return value:
5915 * 	0 on sucess / -EIO on failure
5916 **/
5917static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5918{
5919	int rc = 0;
5920	unsigned long host_lock_flags = 0;
5921
5922	ENTER;
5923	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5924	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5925	if (ioa_cfg->needs_hard_reset) {
5926		ioa_cfg->needs_hard_reset = 0;
5927		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5928	} else
5929		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5930					IPR_SHUTDOWN_NONE);
5931
5932	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5933	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5934	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5935
5936	if (ioa_cfg->ioa_is_dead) {
5937		rc = -EIO;
5938	} else if (ipr_invalid_adapter(ioa_cfg)) {
5939		if (!ipr_testmode)
5940			rc = -EIO;
5941
5942		dev_err(&ioa_cfg->pdev->dev,
5943			"Adapter not supported in this hardware configuration.\n");
5944	}
5945
5946	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5947
5948	LEAVE;
5949	return rc;
5950}
5951
5952/**
5953 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5954 * @ioa_cfg:	ioa config struct
5955 *
5956 * Return value:
5957 * 	none
5958 **/
5959static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5960{
5961	int i;
5962
5963	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5964		if (ioa_cfg->ipr_cmnd_list[i])
5965			pci_pool_free(ioa_cfg->ipr_cmd_pool,
5966				      ioa_cfg->ipr_cmnd_list[i],
5967				      ioa_cfg->ipr_cmnd_list_dma[i]);
5968
5969		ioa_cfg->ipr_cmnd_list[i] = NULL;
5970	}
5971
5972	if (ioa_cfg->ipr_cmd_pool)
5973		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5974
5975	ioa_cfg->ipr_cmd_pool = NULL;
5976}
5977
5978/**
5979 * ipr_free_mem - Frees memory allocated for an adapter
5980 * @ioa_cfg:	ioa cfg struct
5981 *
5982 * Return value:
5983 * 	nothing
5984 **/
5985static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5986{
5987	int i;
5988
5989	kfree(ioa_cfg->res_entries);
5990	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5991			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5992	ipr_free_cmd_blks(ioa_cfg);
5993	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5994			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5995	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5996			    ioa_cfg->cfg_table,
5997			    ioa_cfg->cfg_table_dma);
5998
5999	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6000		pci_free_consistent(ioa_cfg->pdev,
6001				    sizeof(struct ipr_hostrcb),
6002				    ioa_cfg->hostrcb[i],
6003				    ioa_cfg->hostrcb_dma[i]);
6004	}
6005
6006	ipr_free_dump(ioa_cfg);
6007	kfree(ioa_cfg->trace);
6008}
6009
6010/**
6011 * ipr_free_all_resources - Free all allocated resources for an adapter.
6012 * @ipr_cmd:	ipr command struct
6013 *
6014 * This function frees all allocated resources for the
6015 * specified adapter.
6016 *
6017 * Return value:
6018 * 	none
6019 **/
6020static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6021{
6022	struct pci_dev *pdev = ioa_cfg->pdev;
6023
6024	ENTER;
6025	free_irq(pdev->irq, ioa_cfg);
6026	iounmap(ioa_cfg->hdw_dma_regs);
6027	pci_release_regions(pdev);
6028	ipr_free_mem(ioa_cfg);
6029	scsi_host_put(ioa_cfg->host);
6030	pci_disable_device(pdev);
6031	LEAVE;
6032}
6033
6034/**
6035 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6036 * @ioa_cfg:	ioa config struct
6037 *
6038 * Return value:
6039 * 	0 on success / -ENOMEM on allocation failure
6040 **/
6041static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6042{
6043	struct ipr_cmnd *ipr_cmd;
6044	struct ipr_ioarcb *ioarcb;
6045	dma_addr_t dma_addr;
6046	int i;
6047
6048	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6049						 sizeof(struct ipr_cmnd), 8, 0);
6050
6051	if (!ioa_cfg->ipr_cmd_pool)
6052		return -ENOMEM;
6053
6054	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6055		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6056
6057		if (!ipr_cmd) {
6058			ipr_free_cmd_blks(ioa_cfg);
6059			return -ENOMEM;
6060		}
6061
6062		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6063		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6064		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6065
6066		ioarcb = &ipr_cmd->ioarcb;
6067		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6068		ioarcb->host_response_handle = cpu_to_be32(i << 2);
6069		ioarcb->write_ioadl_addr =
6070			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6071		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6072		ioarcb->ioasa_host_pci_addr =
6073			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6074		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6075		ipr_cmd->cmd_index = i;
6076		ipr_cmd->ioa_cfg = ioa_cfg;
6077		ipr_cmd->sense_buffer_dma = dma_addr +
6078			offsetof(struct ipr_cmnd, sense_buffer);
6079
6080		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6081	}
6082
6083	return 0;
6084}
6085
6086/**
6087 * ipr_alloc_mem - Allocate memory for an adapter
6088 * @ioa_cfg:	ioa config struct
6089 *
6090 * Return value:
6091 * 	0 on success / non-zero for error
6092 **/
6093static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6094{
6095	struct pci_dev *pdev = ioa_cfg->pdev;
6096	int i, rc = -ENOMEM;
6097
6098	ENTER;
6099	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6100				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6101
6102	if (!ioa_cfg->res_entries)
6103		goto out;
6104
6105	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6106		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6107
6108	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6109						sizeof(struct ipr_misc_cbs),
6110						&ioa_cfg->vpd_cbs_dma);
6111
6112	if (!ioa_cfg->vpd_cbs)
6113		goto out_free_res_entries;
6114
6115	if (ipr_alloc_cmd_blks(ioa_cfg))
6116		goto out_free_vpd_cbs;
6117
6118	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6119						 sizeof(u32) * IPR_NUM_CMD_BLKS,
6120						 &ioa_cfg->host_rrq_dma);
6121
6122	if (!ioa_cfg->host_rrq)
6123		goto out_ipr_free_cmd_blocks;
6124
6125	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6126						  sizeof(struct ipr_config_table),
6127						  &ioa_cfg->cfg_table_dma);
6128
6129	if (!ioa_cfg->cfg_table)
6130		goto out_free_host_rrq;
6131
6132	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6133		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6134							   sizeof(struct ipr_hostrcb),
6135							   &ioa_cfg->hostrcb_dma[i]);
6136
6137		if (!ioa_cfg->hostrcb[i])
6138			goto out_free_hostrcb_dma;
6139
6140		ioa_cfg->hostrcb[i]->hostrcb_dma =
6141			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6142		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6143	}
6144
6145	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6146				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6147
6148	if (!ioa_cfg->trace)
6149		goto out_free_hostrcb_dma;
6150
6151	rc = 0;
6152out:
6153	LEAVE;
6154	return rc;
6155
6156out_free_hostrcb_dma:
6157	while (i-- > 0) {
6158		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6159				    ioa_cfg->hostrcb[i],
6160				    ioa_cfg->hostrcb_dma[i]);
6161	}
6162	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6163			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6164out_free_host_rrq:
6165	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6166			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6167out_ipr_free_cmd_blocks:
6168	ipr_free_cmd_blks(ioa_cfg);
6169out_free_vpd_cbs:
6170	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6171			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6172out_free_res_entries:
6173	kfree(ioa_cfg->res_entries);
6174	goto out;
6175}
6176
6177/**
6178 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6179 * @ioa_cfg:	ioa config struct
6180 *
6181 * Return value:
6182 * 	none
6183 **/
6184static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6185{
6186	int i;
6187
6188	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6189		ioa_cfg->bus_attr[i].bus = i;
6190		ioa_cfg->bus_attr[i].qas_enabled = 0;
6191		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6192		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6193			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6194		else
6195			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6196	}
6197}
6198
6199/**
6200 * ipr_init_ioa_cfg - Initialize IOA config struct
6201 * @ioa_cfg:	ioa config struct
6202 * @host:		scsi host struct
6203 * @pdev:		PCI dev struct
6204 *
6205 * Return value:
6206 * 	none
6207 **/
6208static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6209				       struct Scsi_Host *host, struct pci_dev *pdev)
6210{
6211	const struct ipr_interrupt_offsets *p;
6212	struct ipr_interrupts *t;
6213	void __iomem *base;
6214
6215	ioa_cfg->host = host;
6216	ioa_cfg->pdev = pdev;
6217	ioa_cfg->log_level = ipr_log_level;
6218	ioa_cfg->doorbell = IPR_DOORBELL;
6219	if (!ipr_auto_create)
6220		ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6221	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6222	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6223	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6224	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6225	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6226	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6227	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6228	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6229
6230	INIT_LIST_HEAD(&ioa_cfg->free_q);
6231	INIT_LIST_HEAD(&ioa_cfg->pending_q);
6232	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6233	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6234	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6235	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6236	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6237	init_waitqueue_head(&ioa_cfg->reset_wait_q);
6238	ioa_cfg->sdt_state = INACTIVE;
6239	if (ipr_enable_cache)
6240		ioa_cfg->cache_state = CACHE_ENABLED;
6241	else
6242		ioa_cfg->cache_state = CACHE_DISABLED;
6243
6244	ipr_initialize_bus_attr(ioa_cfg);
6245
6246	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6247	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6248	host->max_channel = IPR_MAX_BUS_TO_SCAN;
6249	host->unique_id = host->host_no;
6250	host->max_cmd_len = IPR_MAX_CDB_LEN;
6251	pci_set_drvdata(pdev, ioa_cfg);
6252
6253	p = &ioa_cfg->chip_cfg->regs;
6254	t = &ioa_cfg->regs;
6255	base = ioa_cfg->hdw_dma_regs;
6256
6257	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6258	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6259	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6260	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6261	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6262	t->ioarrin_reg = base + p->ioarrin_reg;
6263	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6264	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6265	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6266}
6267
6268/**
6269 * ipr_get_chip_cfg - Find adapter chip configuration
6270 * @dev_id:		PCI device id struct
6271 *
6272 * Return value:
6273 * 	ptr to chip config on success / NULL on failure
6274 **/
6275static const struct ipr_chip_cfg_t * __devinit
6276ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6277{
6278	int i;
6279
6280	if (dev_id->driver_data)
6281		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6282
6283	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6284		if (ipr_chip[i].vendor == dev_id->vendor &&
6285		    ipr_chip[i].device == dev_id->device)
6286			return ipr_chip[i].cfg;
6287	return NULL;
6288}
6289
6290/**
6291 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6292 * @pdev:		PCI device struct
6293 * @dev_id:		PCI device id struct
6294 *
6295 * Return value:
6296 * 	0 on success / non-zero on failure
6297 **/
6298static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6299				   const struct pci_device_id *dev_id)
6300{
6301	struct ipr_ioa_cfg *ioa_cfg;
6302	struct Scsi_Host *host;
6303	unsigned long ipr_regs_pci;
6304	void __iomem *ipr_regs;
6305	u32 rc = PCIBIOS_SUCCESSFUL;
6306	volatile u32 mask, uproc;
6307
6308	ENTER;
6309
6310	if ((rc = pci_enable_device(pdev))) {
6311		dev_err(&pdev->dev, "Cannot enable adapter\n");
6312		goto out;
6313	}
6314
6315	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6316
6317	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6318
6319	if (!host) {
6320		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6321		rc = -ENOMEM;
6322		goto out_disable;
6323	}
6324
6325	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6326	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6327
6328	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6329
6330	if (!ioa_cfg->chip_cfg) {
6331		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6332			dev_id->vendor, dev_id->device);
6333		goto out_scsi_host_put;
6334	}
6335
6336	ipr_regs_pci = pci_resource_start(pdev, 0);
6337
6338	rc = pci_request_regions(pdev, IPR_NAME);
6339	if (rc < 0) {
6340		dev_err(&pdev->dev,
6341			"Couldn't register memory range of registers\n");
6342		goto out_scsi_host_put;
6343	}
6344
6345	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6346
6347	if (!ipr_regs) {
6348		dev_err(&pdev->dev,
6349			"Couldn't map memory range of registers\n");
6350		rc = -ENOMEM;
6351		goto out_release_regions;
6352	}
6353
6354	ioa_cfg->hdw_dma_regs = ipr_regs;
6355	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6356	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6357
6358	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6359
6360	pci_set_master(pdev);
6361
6362	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6363	if (rc < 0) {
6364		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6365		goto cleanup_nomem;
6366	}
6367
6368	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6369				   ioa_cfg->chip_cfg->cache_line_size);
6370
6371	if (rc != PCIBIOS_SUCCESSFUL) {
6372		dev_err(&pdev->dev, "Write of cache line size failed\n");
6373		rc = -EIO;
6374		goto cleanup_nomem;
6375	}
6376
6377	/* Save away PCI config space for use following IOA reset */
6378	rc = pci_save_state(pdev);
6379
6380	if (rc != PCIBIOS_SUCCESSFUL) {
6381		dev_err(&pdev->dev, "Failed to save PCI config space\n");
6382		rc = -EIO;
6383		goto cleanup_nomem;
6384	}
6385
6386	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6387		goto cleanup_nomem;
6388
6389	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6390		goto cleanup_nomem;
6391
6392	rc = ipr_alloc_mem(ioa_cfg);
6393	if (rc < 0) {
6394		dev_err(&pdev->dev,
6395			"Couldn't allocate enough memory for device driver!\n");
6396		goto cleanup_nomem;
6397	}
6398
6399	/*
6400	 * If HRRQ updated interrupt is not masked, or reset alert is set,
6401	 * the card is in an unknown state and needs a hard reset
6402	 */
6403	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6404	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6405	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6406		ioa_cfg->needs_hard_reset = 1;
6407
6408	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6409	rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6410
6411	if (rc) {
6412		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6413			pdev->irq, rc);
6414		goto cleanup_nolog;
6415	}
6416
6417	spin_lock(&ipr_driver_lock);
6418	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6419	spin_unlock(&ipr_driver_lock);
6420
6421	LEAVE;
6422out:
6423	return rc;
6424
6425cleanup_nolog:
6426	ipr_free_mem(ioa_cfg);
6427cleanup_nomem:
6428	iounmap(ipr_regs);
6429out_release_regions:
6430	pci_release_regions(pdev);
6431out_scsi_host_put:
6432	scsi_host_put(host);
6433out_disable:
6434	pci_disable_device(pdev);
6435	goto out;
6436}
6437
6438/**
6439 * ipr_scan_vsets - Scans for VSET devices
6440 * @ioa_cfg:	ioa config struct
6441 *
6442 * Description: Since the VSET resources do not follow SAM in that we can have
6443 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6444 *
6445 * Return value:
6446 * 	none
6447 **/
6448static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6449{
6450	int target, lun;
6451
6452	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6453		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6454			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6455}
6456
6457/**
6458 * ipr_initiate_ioa_bringdown - Bring down an adapter
6459 * @ioa_cfg:		ioa config struct
6460 * @shutdown_type:	shutdown type
6461 *
6462 * Description: This function will initiate bringing down the adapter.
6463 * This consists of issuing an IOA shutdown to the adapter
6464 * to flush the cache, and running BIST.
6465 * If the caller needs to wait on the completion of the reset,
6466 * the caller must sleep on the reset_wait_q.
6467 *
6468 * Return value:
6469 * 	none
6470 **/
6471static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6472				       enum ipr_shutdown_type shutdown_type)
6473{
6474	ENTER;
6475	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6476		ioa_cfg->sdt_state = ABORT_DUMP;
6477	ioa_cfg->reset_retries = 0;
6478	ioa_cfg->in_ioa_bringdown = 1;
6479	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6480	LEAVE;
6481}
6482
6483/**
6484 * __ipr_remove - Remove a single adapter
6485 * @pdev:	pci device struct
6486 *
6487 * Adapter hot plug remove entry point.
6488 *
6489 * Return value:
6490 * 	none
6491 **/
6492static void __ipr_remove(struct pci_dev *pdev)
6493{
6494	unsigned long host_lock_flags = 0;
6495	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6496	ENTER;
6497
6498	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6499	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6500
6501	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6502	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6503	flush_scheduled_work();
6504	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6505
6506	spin_lock(&ipr_driver_lock);
6507	list_del(&ioa_cfg->queue);
6508	spin_unlock(&ipr_driver_lock);
6509
6510	if (ioa_cfg->sdt_state == ABORT_DUMP)
6511		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6512	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6513
6514	ipr_free_all_resources(ioa_cfg);
6515
6516	LEAVE;
6517}
6518
6519/**
6520 * ipr_remove - IOA hot plug remove entry point
6521 * @pdev:	pci device struct
6522 *
6523 * Adapter hot plug remove entry point.
6524 *
6525 * Return value:
6526 * 	none
6527 **/
6528static void ipr_remove(struct pci_dev *pdev)
6529{
6530	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6531
6532	ENTER;
6533
6534	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6535			      &ipr_trace_attr);
6536	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6537			     &ipr_dump_attr);
6538	scsi_remove_host(ioa_cfg->host);
6539
6540	__ipr_remove(pdev);
6541
6542	LEAVE;
6543}
6544
6545/**
6546 * ipr_probe - Adapter hot plug add entry point
6547 *
6548 * Return value:
6549 * 	0 on success / non-zero on failure
6550 **/
6551static int __devinit ipr_probe(struct pci_dev *pdev,
6552			       const struct pci_device_id *dev_id)
6553{
6554	struct ipr_ioa_cfg *ioa_cfg;
6555	int rc;
6556
6557	rc = ipr_probe_ioa(pdev, dev_id);
6558
6559	if (rc)
6560		return rc;
6561
6562	ioa_cfg = pci_get_drvdata(pdev);
6563	rc = ipr_probe_ioa_part2(ioa_cfg);
6564
6565	if (rc) {
6566		__ipr_remove(pdev);
6567		return rc;
6568	}
6569
6570	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6571
6572	if (rc) {
6573		__ipr_remove(pdev);
6574		return rc;
6575	}
6576
6577	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6578				   &ipr_trace_attr);
6579
6580	if (rc) {
6581		scsi_remove_host(ioa_cfg->host);
6582		__ipr_remove(pdev);
6583		return rc;
6584	}
6585
6586	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6587				   &ipr_dump_attr);
6588
6589	if (rc) {
6590		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6591				      &ipr_trace_attr);
6592		scsi_remove_host(ioa_cfg->host);
6593		__ipr_remove(pdev);
6594		return rc;
6595	}
6596
6597	scsi_scan_host(ioa_cfg->host);
6598	ipr_scan_vsets(ioa_cfg);
6599	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6600	ioa_cfg->allow_ml_add_del = 1;
6601	ioa_cfg->host->max_channel = IPR_VSET_BUS;
6602	schedule_work(&ioa_cfg->work_q);
6603	return 0;
6604}
6605
6606/**
6607 * ipr_shutdown - Shutdown handler.
6608 * @pdev:	pci device struct
6609 *
6610 * This function is invoked upon system shutdown/reboot. It will issue
6611 * an adapter shutdown to the adapter to flush the write cache.
6612 *
6613 * Return value:
6614 * 	none
6615 **/
6616static void ipr_shutdown(struct pci_dev *pdev)
6617{
6618	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6619	unsigned long lock_flags = 0;
6620
6621	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6622	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6623	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6624	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6625}
6626
6627static struct pci_device_id ipr_pci_table[] __devinitdata = {
6628	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6629		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6630		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6631	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6632		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6633	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6634	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6635		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6636	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6637	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6638		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6639	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6640	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6641		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6642	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6643	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6644		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6645	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6646	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6647		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6648	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6649	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6650		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6651		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6652	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6653	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6654	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6655	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6656	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6657	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6658	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6659	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6660	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6661	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6662	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6663	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6664	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6665		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6666		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6667	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6668		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6669		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6670	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6671		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6672		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6673	{ }
6674};
6675MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6676
6677static struct pci_error_handlers ipr_err_handler = {
6678	.error_detected = ipr_pci_error_detected,
6679	.slot_reset = ipr_pci_slot_reset,
6680};
6681
6682static struct pci_driver ipr_driver = {
6683	.name = IPR_NAME,
6684	.id_table = ipr_pci_table,
6685	.probe = ipr_probe,
6686	.remove = ipr_remove,
6687	.shutdown = ipr_shutdown,
6688	.err_handler = &ipr_err_handler,
6689};
6690
6691/**
6692 * ipr_init - Module entry point
6693 *
6694 * Return value:
6695 * 	0 on success / negative value on failure
6696 **/
6697static int __init ipr_init(void)
6698{
6699	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6700		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6701
6702	return pci_module_init(&ipr_driver);
6703}
6704
6705/**
6706 * ipr_exit - Module unload
6707 *
6708 * Module unload entry point.
6709 *
6710 * Return value:
6711 * 	none
6712 **/
6713static void __exit ipr_exit(void)
6714{
6715	pci_unregister_driver(&ipr_driver);
6716}
6717
6718module_init(ipr_init);
6719module_exit(ipr_exit);
6720