ipr.c revision 0726ce26104671e3072d90b9c697c253974e823d
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 *   Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94static DEFINE_SPINLOCK(ipr_driver_lock);
95
96/* This table describes the differences between DMA controller chips */
97static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
98	{ /* Gemstone and Citrine */
99		.mailbox = 0x0042C,
100		.cache_line_size = 0x20,
101		{
102			.set_interrupt_mask_reg = 0x0022C,
103			.clr_interrupt_mask_reg = 0x00230,
104			.sense_interrupt_mask_reg = 0x0022C,
105			.clr_interrupt_reg = 0x00228,
106			.sense_interrupt_reg = 0x00224,
107			.ioarrin_reg = 0x00404,
108			.sense_uproc_interrupt_reg = 0x00214,
109			.set_uproc_interrupt_reg = 0x00214,
110			.clr_uproc_interrupt_reg = 0x00218
111		}
112	},
113	{ /* Snipe and Scamp */
114		.mailbox = 0x0052C,
115		.cache_line_size = 0x20,
116		{
117			.set_interrupt_mask_reg = 0x00288,
118			.clr_interrupt_mask_reg = 0x0028C,
119			.sense_interrupt_mask_reg = 0x00288,
120			.clr_interrupt_reg = 0x00284,
121			.sense_interrupt_reg = 0x00280,
122			.ioarrin_reg = 0x00504,
123			.sense_uproc_interrupt_reg = 0x00290,
124			.set_uproc_interrupt_reg = 0x00290,
125			.clr_uproc_interrupt_reg = 0x00294
126		}
127	},
128};
129
130static const struct ipr_chip_t ipr_chip[] = {
131	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
132	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
133	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
134	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
135};
136
137static int ipr_max_bus_speeds [] = {
138	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
139};
140
141MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
142MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
143module_param_named(max_speed, ipr_max_speed, uint, 0);
144MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
145module_param_named(log_level, ipr_log_level, uint, 0);
146MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
147module_param_named(testmode, ipr_testmode, int, 0);
148MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
149module_param_named(fastfail, ipr_fastfail, int, 0);
150MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
151module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
152MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
153MODULE_LICENSE("GPL");
154MODULE_VERSION(IPR_DRIVER_VERSION);
155
156static const char *ipr_gpdd_dev_end_states[] = {
157	"Command complete",
158	"Terminated by host",
159	"Terminated by device reset",
160	"Terminated by bus reset",
161	"Unknown",
162	"Command not started"
163};
164
165static const char *ipr_gpdd_dev_bus_phases[] = {
166	"Bus free",
167	"Arbitration",
168	"Selection",
169	"Message out",
170	"Command",
171	"Message in",
172	"Data out",
173	"Data in",
174	"Status",
175	"Reselection",
176	"Unknown"
177};
178
179/*  A constant array of IOASCs/URCs/Error Messages */
180static const
181struct ipr_error_table_t ipr_error_table[] = {
182	{0x00000000, 1, 1,
183	"8155: An unknown error was received"},
184	{0x00330000, 0, 0,
185	"Soft underlength error"},
186	{0x005A0000, 0, 0,
187	"Command to be cancelled not found"},
188	{0x00808000, 0, 0,
189	"Qualified success"},
190	{0x01080000, 1, 1,
191	"FFFE: Soft device bus error recovered by the IOA"},
192	{0x01170600, 0, 1,
193	"FFF9: Device sector reassign successful"},
194	{0x01170900, 0, 1,
195	"FFF7: Media error recovered by device rewrite procedures"},
196	{0x01180200, 0, 1,
197	"7001: IOA sector reassignment successful"},
198	{0x01180500, 0, 1,
199	"FFF9: Soft media error. Sector reassignment recommended"},
200	{0x01180600, 0, 1,
201	"FFF7: Media error recovered by IOA rewrite procedures"},
202	{0x01418000, 0, 1,
203	"FF3D: Soft PCI bus error recovered by the IOA"},
204	{0x01440000, 1, 1,
205	"FFF6: Device hardware error recovered by the IOA"},
206	{0x01448100, 0, 1,
207	"FFF6: Device hardware error recovered by the device"},
208	{0x01448200, 1, 1,
209	"FF3D: Soft IOA error recovered by the IOA"},
210	{0x01448300, 0, 1,
211	"FFFA: Undefined device response recovered by the IOA"},
212	{0x014A0000, 1, 1,
213	"FFF6: Device bus error, message or command phase"},
214	{0x015D0000, 0, 1,
215	"FFF6: Failure prediction threshold exceeded"},
216	{0x015D9200, 0, 1,
217	"8009: Impending cache battery pack failure"},
218	{0x02040400, 0, 0,
219	"34FF: Disk device format in progress"},
220	{0x023F0000, 0, 0,
221	"Synchronization required"},
222	{0x024E0000, 0, 0,
223	"No ready, IOA shutdown"},
224	{0x025A0000, 0, 0,
225	"Not ready, IOA has been shutdown"},
226	{0x02670100, 0, 1,
227	"3020: Storage subsystem configuration error"},
228	{0x03110B00, 0, 0,
229	"FFF5: Medium error, data unreadable, recommend reassign"},
230	{0x03110C00, 0, 0,
231	"7000: Medium error, data unreadable, do not reassign"},
232	{0x03310000, 0, 1,
233	"FFF3: Disk media format bad"},
234	{0x04050000, 0, 1,
235	"3002: Addressed device failed to respond to selection"},
236	{0x04080000, 1, 1,
237	"3100: Device bus error"},
238	{0x04080100, 0, 1,
239	"3109: IOA timed out a device command"},
240	{0x04088000, 0, 0,
241	"3120: SCSI bus is not operational"},
242	{0x04118000, 0, 1,
243	"9000: IOA reserved area data check"},
244	{0x04118100, 0, 1,
245	"9001: IOA reserved area invalid data pattern"},
246	{0x04118200, 0, 1,
247	"9002: IOA reserved area LRC error"},
248	{0x04320000, 0, 1,
249	"102E: Out of alternate sectors for disk storage"},
250	{0x04330000, 1, 1,
251	"FFF4: Data transfer underlength error"},
252	{0x04338000, 1, 1,
253	"FFF4: Data transfer overlength error"},
254	{0x043E0100, 0, 1,
255	"3400: Logical unit failure"},
256	{0x04408500, 0, 1,
257	"FFF4: Device microcode is corrupt"},
258	{0x04418000, 1, 1,
259	"8150: PCI bus error"},
260	{0x04430000, 1, 0,
261	"Unsupported device bus message received"},
262	{0x04440000, 1, 1,
263	"FFF4: Disk device problem"},
264	{0x04448200, 1, 1,
265	"8150: Permanent IOA failure"},
266	{0x04448300, 0, 1,
267	"3010: Disk device returned wrong response to IOA"},
268	{0x04448400, 0, 1,
269	"8151: IOA microcode error"},
270	{0x04448500, 0, 0,
271	"Device bus status error"},
272	{0x04448600, 0, 1,
273	"8157: IOA error requiring IOA reset to recover"},
274	{0x04490000, 0, 0,
275	"Message reject received from the device"},
276	{0x04449200, 0, 1,
277	"8008: A permanent cache battery pack failure occurred"},
278	{0x0444A000, 0, 1,
279	"9090: Disk unit has been modified after the last known status"},
280	{0x0444A200, 0, 1,
281	"9081: IOA detected device error"},
282	{0x0444A300, 0, 1,
283	"9082: IOA detected device error"},
284	{0x044A0000, 1, 1,
285	"3110: Device bus error, message or command phase"},
286	{0x04670400, 0, 1,
287	"9091: Incorrect hardware configuration change has been detected"},
288	{0x046E0000, 0, 1,
289	"FFF4: Command to logical unit failed"},
290	{0x05240000, 1, 0,
291	"Illegal request, invalid request type or request packet"},
292	{0x05250000, 0, 0,
293	"Illegal request, invalid resource handle"},
294	{0x05260000, 0, 0,
295	"Illegal request, invalid field in parameter list"},
296	{0x05260100, 0, 0,
297	"Illegal request, parameter not supported"},
298	{0x05260200, 0, 0,
299	"Illegal request, parameter value invalid"},
300	{0x052C0000, 0, 0,
301	"Illegal request, command sequence error"},
302	{0x06040500, 0, 1,
303	"9031: Array protection temporarily suspended, protection resuming"},
304	{0x06040600, 0, 1,
305	"9040: Array protection temporarily suspended, protection resuming"},
306	{0x06290000, 0, 1,
307	"FFFB: SCSI bus was reset"},
308	{0x06290500, 0, 0,
309	"FFFE: SCSI bus transition to single ended"},
310	{0x06290600, 0, 0,
311	"FFFE: SCSI bus transition to LVD"},
312	{0x06298000, 0, 1,
313	"FFFB: SCSI bus was reset by another initiator"},
314	{0x063F0300, 0, 1,
315	"3029: A device replacement has occurred"},
316	{0x064C8000, 0, 1,
317	"9051: IOA cache data exists for a missing or failed device"},
318	{0x06670100, 0, 1,
319	"9025: Disk unit is not supported at its physical location"},
320	{0x06670600, 0, 1,
321	"3020: IOA detected a SCSI bus configuration error"},
322	{0x06678000, 0, 1,
323	"3150: SCSI bus configuration error"},
324	{0x06690200, 0, 1,
325	"9041: Array protection temporarily suspended"},
326	{0x06698200, 0, 1,
327	"9042: Corrupt array parity detected on specified device"},
328	{0x066B0200, 0, 1,
329	"9030: Array no longer protected due to missing or failed disk unit"},
330	{0x066B8200, 0, 1,
331	"9032: Array exposed but still protected"},
332	{0x07270000, 0, 0,
333	"Failure due to other device"},
334	{0x07278000, 0, 1,
335	"9008: IOA does not support functions expected by devices"},
336	{0x07278100, 0, 1,
337	"9010: Cache data associated with attached devices cannot be found"},
338	{0x07278200, 0, 1,
339	"9011: Cache data belongs to devices other than those attached"},
340	{0x07278400, 0, 1,
341	"9020: Array missing 2 or more devices with only 1 device present"},
342	{0x07278500, 0, 1,
343	"9021: Array missing 2 or more devices with 2 or more devices present"},
344	{0x07278600, 0, 1,
345	"9022: Exposed array is missing a required device"},
346	{0x07278700, 0, 1,
347	"9023: Array member(s) not at required physical locations"},
348	{0x07278800, 0, 1,
349	"9024: Array not functional due to present hardware configuration"},
350	{0x07278900, 0, 1,
351	"9026: Array not functional due to present hardware configuration"},
352	{0x07278A00, 0, 1,
353	"9027: Array is missing a device and parity is out of sync"},
354	{0x07278B00, 0, 1,
355	"9028: Maximum number of arrays already exist"},
356	{0x07278C00, 0, 1,
357	"9050: Required cache data cannot be located for a disk unit"},
358	{0x07278D00, 0, 1,
359	"9052: Cache data exists for a device that has been modified"},
360	{0x07278F00, 0, 1,
361	"9054: IOA resources not available due to previous problems"},
362	{0x07279100, 0, 1,
363	"9092: Disk unit requires initialization before use"},
364	{0x07279200, 0, 1,
365	"9029: Incorrect hardware configuration change has been detected"},
366	{0x07279600, 0, 1,
367	"9060: One or more disk pairs are missing from an array"},
368	{0x07279700, 0, 1,
369	"9061: One or more disks are missing from an array"},
370	{0x07279800, 0, 1,
371	"9062: One or more disks are missing from an array"},
372	{0x07279900, 0, 1,
373	"9063: Maximum number of functional arrays has been exceeded"},
374	{0x0B260000, 0, 0,
375	"Aborted command, invalid descriptor"},
376	{0x0B5A0000, 0, 0,
377	"Command terminated by host"}
378};
379
380static const struct ipr_ses_table_entry ipr_ses_table[] = {
381	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
382	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
383	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
384	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
385	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
386	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
387	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
388	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
389	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
390	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
391	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
392	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
393	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
394};
395
396/*
397 *  Function Prototypes
398 */
399static int ipr_reset_alert(struct ipr_cmnd *);
400static void ipr_process_ccn(struct ipr_cmnd *);
401static void ipr_process_error(struct ipr_cmnd *);
402static void ipr_reset_ioa_job(struct ipr_cmnd *);
403static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
404				   enum ipr_shutdown_type);
405
406#ifdef CONFIG_SCSI_IPR_TRACE
407/**
408 * ipr_trc_hook - Add a trace entry to the driver trace
409 * @ipr_cmd:	ipr command struct
410 * @type:		trace type
411 * @add_data:	additional data
412 *
413 * Return value:
414 * 	none
415 **/
416static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
417			 u8 type, u32 add_data)
418{
419	struct ipr_trace_entry *trace_entry;
420	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
421
422	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
423	trace_entry->time = jiffies;
424	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
425	trace_entry->type = type;
426	trace_entry->cmd_index = ipr_cmd->cmd_index;
427	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
428	trace_entry->u.add_data = add_data;
429}
430#else
431#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
432#endif
433
434/**
435 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
436 * @ipr_cmd:	ipr command struct
437 *
438 * Return value:
439 * 	none
440 **/
441static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
442{
443	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
444	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
445
446	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
447	ioarcb->write_data_transfer_length = 0;
448	ioarcb->read_data_transfer_length = 0;
449	ioarcb->write_ioadl_len = 0;
450	ioarcb->read_ioadl_len = 0;
451	ioasa->ioasc = 0;
452	ioasa->residual_data_len = 0;
453
454	ipr_cmd->scsi_cmd = NULL;
455	ipr_cmd->sense_buffer[0] = 0;
456	ipr_cmd->dma_use_sg = 0;
457}
458
459/**
460 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
461 * @ipr_cmd:	ipr command struct
462 *
463 * Return value:
464 * 	none
465 **/
466static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
467{
468	ipr_reinit_ipr_cmnd(ipr_cmd);
469	ipr_cmd->u.scratch = 0;
470	ipr_cmd->sibling = NULL;
471	init_timer(&ipr_cmd->timer);
472}
473
474/**
475 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
476 * @ioa_cfg:	ioa config struct
477 *
478 * Return value:
479 * 	pointer to ipr command struct
480 **/
481static
482struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
483{
484	struct ipr_cmnd *ipr_cmd;
485
486	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
487	list_del(&ipr_cmd->queue);
488	ipr_init_ipr_cmnd(ipr_cmd);
489
490	return ipr_cmd;
491}
492
493/**
494 * ipr_unmap_sglist - Unmap scatterlist if mapped
495 * @ioa_cfg:	ioa config struct
496 * @ipr_cmd:	ipr command struct
497 *
498 * Return value:
499 * 	nothing
500 **/
501static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
502			     struct ipr_cmnd *ipr_cmd)
503{
504	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
505
506	if (ipr_cmd->dma_use_sg) {
507		if (scsi_cmd->use_sg > 0) {
508			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
509				     scsi_cmd->use_sg,
510				     scsi_cmd->sc_data_direction);
511		} else {
512			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
513					 scsi_cmd->request_bufflen,
514					 scsi_cmd->sc_data_direction);
515		}
516	}
517}
518
519/**
520 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
521 * @ioa_cfg:	ioa config struct
522 * @clr_ints:     interrupts to clear
523 *
524 * This function masks all interrupts on the adapter, then clears the
525 * interrupts specified in the mask
526 *
527 * Return value:
528 * 	none
529 **/
530static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
531					  u32 clr_ints)
532{
533	volatile u32 int_reg;
534
535	/* Stop new interrupts */
536	ioa_cfg->allow_interrupts = 0;
537
538	/* Set interrupt mask to stop all new interrupts */
539	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
540
541	/* Clear any pending interrupts */
542	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
543	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
544}
545
546/**
547 * ipr_save_pcix_cmd_reg - Save PCI-X command register
548 * @ioa_cfg:	ioa config struct
549 *
550 * Return value:
551 * 	0 on success / -EIO on failure
552 **/
553static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
554{
555	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
556
557	if (pcix_cmd_reg == 0) {
558		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
559		return -EIO;
560	}
561
562	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
563				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
564		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
565		return -EIO;
566	}
567
568	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
569	return 0;
570}
571
572/**
573 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
574 * @ioa_cfg:	ioa config struct
575 *
576 * Return value:
577 * 	0 on success / -EIO on failure
578 **/
579static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
580{
581	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
582
583	if (pcix_cmd_reg) {
584		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
585					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
586			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
587			return -EIO;
588		}
589	} else {
590		dev_err(&ioa_cfg->pdev->dev,
591			"Failed to setup PCI-X command register\n");
592		return -EIO;
593	}
594
595	return 0;
596}
597
598/**
599 * ipr_scsi_eh_done - mid-layer done function for aborted ops
600 * @ipr_cmd:	ipr command struct
601 *
602 * This function is invoked by the interrupt handler for
603 * ops generated by the SCSI mid-layer which are being aborted.
604 *
605 * Return value:
606 * 	none
607 **/
608static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
609{
610	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
611	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
612
613	scsi_cmd->result |= (DID_ERROR << 16);
614
615	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
616	scsi_cmd->scsi_done(scsi_cmd);
617	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
618}
619
620/**
621 * ipr_fail_all_ops - Fails all outstanding ops.
622 * @ioa_cfg:	ioa config struct
623 *
624 * This function fails all outstanding ops.
625 *
626 * Return value:
627 * 	none
628 **/
629static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
630{
631	struct ipr_cmnd *ipr_cmd, *temp;
632
633	ENTER;
634	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
635		list_del(&ipr_cmd->queue);
636
637		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
638		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
639
640		if (ipr_cmd->scsi_cmd)
641			ipr_cmd->done = ipr_scsi_eh_done;
642
643		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
644		del_timer(&ipr_cmd->timer);
645		ipr_cmd->done(ipr_cmd);
646	}
647
648	LEAVE;
649}
650
651/**
652 * ipr_do_req -  Send driver initiated requests.
653 * @ipr_cmd:		ipr command struct
654 * @done:			done function
655 * @timeout_func:	timeout function
656 * @timeout:		timeout value
657 *
658 * This function sends the specified command to the adapter with the
659 * timeout given. The done function is invoked on command completion.
660 *
661 * Return value:
662 * 	none
663 **/
664static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
665		       void (*done) (struct ipr_cmnd *),
666		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
667{
668	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
669
670	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
671
672	ipr_cmd->done = done;
673
674	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
675	ipr_cmd->timer.expires = jiffies + timeout;
676	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
677
678	add_timer(&ipr_cmd->timer);
679
680	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
681
682	mb();
683	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
684	       ioa_cfg->regs.ioarrin_reg);
685}
686
687/**
688 * ipr_internal_cmd_done - Op done function for an internally generated op.
689 * @ipr_cmd:	ipr command struct
690 *
691 * This function is the op done function for an internally generated,
692 * blocking op. It simply wakes the sleeping thread.
693 *
694 * Return value:
695 * 	none
696 **/
697static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
698{
699	if (ipr_cmd->sibling)
700		ipr_cmd->sibling = NULL;
701	else
702		complete(&ipr_cmd->completion);
703}
704
705/**
706 * ipr_send_blocking_cmd - Send command and sleep on its completion.
707 * @ipr_cmd:	ipr command struct
708 * @timeout_func:	function to invoke if command times out
709 * @timeout:	timeout
710 *
711 * Return value:
712 * 	none
713 **/
714static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
715				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
716				  u32 timeout)
717{
718	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
719
720	init_completion(&ipr_cmd->completion);
721	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
722
723	spin_unlock_irq(ioa_cfg->host->host_lock);
724	wait_for_completion(&ipr_cmd->completion);
725	spin_lock_irq(ioa_cfg->host->host_lock);
726}
727
728/**
729 * ipr_send_hcam - Send an HCAM to the adapter.
730 * @ioa_cfg:	ioa config struct
731 * @type:		HCAM type
732 * @hostrcb:	hostrcb struct
733 *
734 * This function will send a Host Controlled Async command to the adapter.
735 * If HCAMs are currently not allowed to be issued to the adapter, it will
736 * place the hostrcb on the free queue.
737 *
738 * Return value:
739 * 	none
740 **/
741static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
742			  struct ipr_hostrcb *hostrcb)
743{
744	struct ipr_cmnd *ipr_cmd;
745	struct ipr_ioarcb *ioarcb;
746
747	if (ioa_cfg->allow_cmds) {
748		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
749		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
750		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
751
752		ipr_cmd->u.hostrcb = hostrcb;
753		ioarcb = &ipr_cmd->ioarcb;
754
755		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
756		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
757		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
758		ioarcb->cmd_pkt.cdb[1] = type;
759		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
760		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
761
762		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
763		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
764		ipr_cmd->ioadl[0].flags_and_data_len =
765			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
766		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
767
768		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
769			ipr_cmd->done = ipr_process_ccn;
770		else
771			ipr_cmd->done = ipr_process_error;
772
773		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
774
775		mb();
776		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
777		       ioa_cfg->regs.ioarrin_reg);
778	} else {
779		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
780	}
781}
782
783/**
784 * ipr_init_res_entry - Initialize a resource entry struct.
785 * @res:	resource entry struct
786 *
787 * Return value:
788 * 	none
789 **/
790static void ipr_init_res_entry(struct ipr_resource_entry *res)
791{
792	res->needs_sync_complete = 1;
793	res->in_erp = 0;
794	res->add_to_ml = 0;
795	res->del_from_ml = 0;
796	res->resetting_device = 0;
797	res->sdev = NULL;
798}
799
800/**
801 * ipr_handle_config_change - Handle a config change from the adapter
802 * @ioa_cfg:	ioa config struct
803 * @hostrcb:	hostrcb
804 *
805 * Return value:
806 * 	none
807 **/
808static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
809			      struct ipr_hostrcb *hostrcb)
810{
811	struct ipr_resource_entry *res = NULL;
812	struct ipr_config_table_entry *cfgte;
813	u32 is_ndn = 1;
814
815	cfgte = &hostrcb->hcam.u.ccn.cfgte;
816
817	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
818		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
819			    sizeof(cfgte->res_addr))) {
820			is_ndn = 0;
821			break;
822		}
823	}
824
825	if (is_ndn) {
826		if (list_empty(&ioa_cfg->free_res_q)) {
827			ipr_send_hcam(ioa_cfg,
828				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
829				      hostrcb);
830			return;
831		}
832
833		res = list_entry(ioa_cfg->free_res_q.next,
834				 struct ipr_resource_entry, queue);
835
836		list_del(&res->queue);
837		ipr_init_res_entry(res);
838		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
839	}
840
841	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
842
843	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
844		if (res->sdev) {
845			res->sdev->hostdata = NULL;
846			res->del_from_ml = 1;
847			if (ioa_cfg->allow_ml_add_del)
848				schedule_work(&ioa_cfg->work_q);
849		} else
850			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
851	} else if (!res->sdev) {
852		res->add_to_ml = 1;
853		if (ioa_cfg->allow_ml_add_del)
854			schedule_work(&ioa_cfg->work_q);
855	}
856
857	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
858}
859
860/**
861 * ipr_process_ccn - Op done function for a CCN.
862 * @ipr_cmd:	ipr command struct
863 *
864 * This function is the op done function for a configuration
865 * change notification host controlled async from the adapter.
866 *
867 * Return value:
868 * 	none
869 **/
870static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
871{
872	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
873	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
874	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
875
876	list_del(&hostrcb->queue);
877	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
878
879	if (ioasc) {
880		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
881			dev_err(&ioa_cfg->pdev->dev,
882				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
883
884		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
885	} else {
886		ipr_handle_config_change(ioa_cfg, hostrcb);
887	}
888}
889
890/**
891 * ipr_log_vpd - Log the passed VPD to the error log.
892 * @vpd:		vendor/product id/sn struct
893 *
894 * Return value:
895 * 	none
896 **/
897static void ipr_log_vpd(struct ipr_vpd *vpd)
898{
899	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
900		    + IPR_SERIAL_NUM_LEN];
901
902	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
903	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
904	       IPR_PROD_ID_LEN);
905	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
906	ipr_err("Vendor/Product ID: %s\n", buffer);
907
908	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
909	buffer[IPR_SERIAL_NUM_LEN] = '\0';
910	ipr_err("    Serial Number: %s\n", buffer);
911}
912
913/**
914 * ipr_log_cache_error - Log a cache error.
915 * @ioa_cfg:	ioa config struct
916 * @hostrcb:	hostrcb struct
917 *
918 * Return value:
919 * 	none
920 **/
921static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
922				struct ipr_hostrcb *hostrcb)
923{
924	struct ipr_hostrcb_type_02_error *error =
925		&hostrcb->hcam.u.error.u.type_02_error;
926
927	ipr_err("-----Current Configuration-----\n");
928	ipr_err("Cache Directory Card Information:\n");
929	ipr_log_vpd(&error->ioa_vpd);
930	ipr_err("Adapter Card Information:\n");
931	ipr_log_vpd(&error->cfc_vpd);
932
933	ipr_err("-----Expected Configuration-----\n");
934	ipr_err("Cache Directory Card Information:\n");
935	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
936	ipr_err("Adapter Card Information:\n");
937	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
938
939	ipr_err("Additional IOA Data: %08X %08X %08X\n",
940		     be32_to_cpu(error->ioa_data[0]),
941		     be32_to_cpu(error->ioa_data[1]),
942		     be32_to_cpu(error->ioa_data[2]));
943}
944
945/**
946 * ipr_log_config_error - Log a configuration error.
947 * @ioa_cfg:	ioa config struct
948 * @hostrcb:	hostrcb struct
949 *
950 * Return value:
951 * 	none
952 **/
953static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
954				 struct ipr_hostrcb *hostrcb)
955{
956	int errors_logged, i;
957	struct ipr_hostrcb_device_data_entry *dev_entry;
958	struct ipr_hostrcb_type_03_error *error;
959
960	error = &hostrcb->hcam.u.error.u.type_03_error;
961	errors_logged = be32_to_cpu(error->errors_logged);
962
963	ipr_err("Device Errors Detected/Logged: %d/%d\n",
964		be32_to_cpu(error->errors_detected), errors_logged);
965
966	dev_entry = error->dev;
967
968	for (i = 0; i < errors_logged; i++, dev_entry++) {
969		ipr_err_separator;
970
971		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
972		ipr_log_vpd(&dev_entry->vpd);
973
974		ipr_err("-----New Device Information-----\n");
975		ipr_log_vpd(&dev_entry->new_vpd);
976
977		ipr_err("Cache Directory Card Information:\n");
978		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
979
980		ipr_err("Adapter Card Information:\n");
981		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
982
983		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
984			be32_to_cpu(dev_entry->ioa_data[0]),
985			be32_to_cpu(dev_entry->ioa_data[1]),
986			be32_to_cpu(dev_entry->ioa_data[2]),
987			be32_to_cpu(dev_entry->ioa_data[3]),
988			be32_to_cpu(dev_entry->ioa_data[4]));
989	}
990}
991
992/**
993 * ipr_log_array_error - Log an array configuration error.
994 * @ioa_cfg:	ioa config struct
995 * @hostrcb:	hostrcb struct
996 *
997 * Return value:
998 * 	none
999 **/
1000static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1001				struct ipr_hostrcb *hostrcb)
1002{
1003	int i;
1004	struct ipr_hostrcb_type_04_error *error;
1005	struct ipr_hostrcb_array_data_entry *array_entry;
1006	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1007
1008	error = &hostrcb->hcam.u.error.u.type_04_error;
1009
1010	ipr_err_separator;
1011
1012	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1013		error->protection_level,
1014		ioa_cfg->host->host_no,
1015		error->last_func_vset_res_addr.bus,
1016		error->last_func_vset_res_addr.target,
1017		error->last_func_vset_res_addr.lun);
1018
1019	ipr_err_separator;
1020
1021	array_entry = error->array_member;
1022
1023	for (i = 0; i < 18; i++) {
1024		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1025			continue;
1026
1027		if (be32_to_cpu(error->exposed_mode_adn) == i)
1028			ipr_err("Exposed Array Member %d:\n", i);
1029		else
1030			ipr_err("Array Member %d:\n", i);
1031
1032		ipr_log_vpd(&array_entry->vpd);
1033
1034		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1035		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1036				 "Expected Location");
1037
1038		ipr_err_separator;
1039
1040		if (i == 9)
1041			array_entry = error->array_member2;
1042		else
1043			array_entry++;
1044	}
1045}
1046
1047/**
1048 * ipr_log_generic_error - Log an adapter error.
1049 * @ioa_cfg:	ioa config struct
1050 * @hostrcb:	hostrcb struct
1051 *
1052 * Return value:
1053 * 	none
1054 **/
1055static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1056				  struct ipr_hostrcb *hostrcb)
1057{
1058	int i;
1059	int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1060
1061	if (ioa_data_len == 0)
1062		return;
1063
1064	for (i = 0; i < ioa_data_len / 4; i += 4) {
1065		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1066			be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1067			be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1068			be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1069			be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1070	}
1071}
1072
1073/**
1074 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1075 * @ioasc:	IOASC
1076 *
1077 * This function will return the index of into the ipr_error_table
1078 * for the specified IOASC. If the IOASC is not in the table,
1079 * 0 will be returned, which points to the entry used for unknown errors.
1080 *
1081 * Return value:
1082 * 	index into the ipr_error_table
1083 **/
1084static u32 ipr_get_error(u32 ioasc)
1085{
1086	int i;
1087
1088	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1089		if (ipr_error_table[i].ioasc == ioasc)
1090			return i;
1091
1092	return 0;
1093}
1094
1095/**
1096 * ipr_handle_log_data - Log an adapter error.
1097 * @ioa_cfg:	ioa config struct
1098 * @hostrcb:	hostrcb struct
1099 *
1100 * This function logs an adapter error to the system.
1101 *
1102 * Return value:
1103 * 	none
1104 **/
1105static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1106				struct ipr_hostrcb *hostrcb)
1107{
1108	u32 ioasc;
1109	int error_index;
1110
1111	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1112		return;
1113
1114	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1115		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1116
1117	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1118
1119	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1120	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1121		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1122		scsi_report_bus_reset(ioa_cfg->host,
1123				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1124	}
1125
1126	error_index = ipr_get_error(ioasc);
1127
1128	if (!ipr_error_table[error_index].log_hcam)
1129		return;
1130
1131	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1132		ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1133			    "%s\n", ipr_error_table[error_index].error);
1134	} else {
1135		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1136			ipr_error_table[error_index].error);
1137	}
1138
1139	/* Set indication we have logged an error */
1140	ioa_cfg->errors_logged++;
1141
1142	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1143		return;
1144	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1145		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1146
1147	switch (hostrcb->hcam.overlay_id) {
1148	case IPR_HOST_RCB_OVERLAY_ID_2:
1149		ipr_log_cache_error(ioa_cfg, hostrcb);
1150		break;
1151	case IPR_HOST_RCB_OVERLAY_ID_3:
1152		ipr_log_config_error(ioa_cfg, hostrcb);
1153		break;
1154	case IPR_HOST_RCB_OVERLAY_ID_4:
1155	case IPR_HOST_RCB_OVERLAY_ID_6:
1156		ipr_log_array_error(ioa_cfg, hostrcb);
1157		break;
1158	case IPR_HOST_RCB_OVERLAY_ID_1:
1159	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1160	default:
1161		ipr_log_generic_error(ioa_cfg, hostrcb);
1162		break;
1163	}
1164}
1165
1166/**
1167 * ipr_process_error - Op done function for an adapter error log.
1168 * @ipr_cmd:	ipr command struct
1169 *
1170 * This function is the op done function for an error log host
1171 * controlled async from the adapter. It will log the error and
1172 * send the HCAM back to the adapter.
1173 *
1174 * Return value:
1175 * 	none
1176 **/
1177static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1178{
1179	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1180	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1181	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1182
1183	list_del(&hostrcb->queue);
1184	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1185
1186	if (!ioasc) {
1187		ipr_handle_log_data(ioa_cfg, hostrcb);
1188	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1189		dev_err(&ioa_cfg->pdev->dev,
1190			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1191	}
1192
1193	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1194}
1195
1196/**
1197 * ipr_timeout -  An internally generated op has timed out.
1198 * @ipr_cmd:	ipr command struct
1199 *
1200 * This function blocks host requests and initiates an
1201 * adapter reset.
1202 *
1203 * Return value:
1204 * 	none
1205 **/
1206static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1207{
1208	unsigned long lock_flags = 0;
1209	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1210
1211	ENTER;
1212	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1213
1214	ioa_cfg->errors_logged++;
1215	dev_err(&ioa_cfg->pdev->dev,
1216		"Adapter being reset due to command timeout.\n");
1217
1218	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1219		ioa_cfg->sdt_state = GET_DUMP;
1220
1221	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1222		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1223
1224	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1225	LEAVE;
1226}
1227
1228/**
1229 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1230 * @ipr_cmd:	ipr command struct
1231 *
1232 * This function blocks host requests and initiates an
1233 * adapter reset.
1234 *
1235 * Return value:
1236 * 	none
1237 **/
1238static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1239{
1240	unsigned long lock_flags = 0;
1241	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1242
1243	ENTER;
1244	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1245
1246	ioa_cfg->errors_logged++;
1247	dev_err(&ioa_cfg->pdev->dev,
1248		"Adapter timed out transitioning to operational.\n");
1249
1250	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1251		ioa_cfg->sdt_state = GET_DUMP;
1252
1253	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1254		if (ipr_fastfail)
1255			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1256		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1257	}
1258
1259	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1260	LEAVE;
1261}
1262
1263/**
1264 * ipr_reset_reload - Reset/Reload the IOA
1265 * @ioa_cfg:		ioa config struct
1266 * @shutdown_type:	shutdown type
1267 *
1268 * This function resets the adapter and re-initializes it.
1269 * This function assumes that all new host commands have been stopped.
1270 * Return value:
1271 * 	SUCCESS / FAILED
1272 **/
1273static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1274			    enum ipr_shutdown_type shutdown_type)
1275{
1276	if (!ioa_cfg->in_reset_reload)
1277		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1278
1279	spin_unlock_irq(ioa_cfg->host->host_lock);
1280	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1281	spin_lock_irq(ioa_cfg->host->host_lock);
1282
1283	/* If we got hit with a host reset while we were already resetting
1284	 the adapter for some reason, and the reset failed. */
1285	if (ioa_cfg->ioa_is_dead) {
1286		ipr_trace;
1287		return FAILED;
1288	}
1289
1290	return SUCCESS;
1291}
1292
1293/**
1294 * ipr_find_ses_entry - Find matching SES in SES table
1295 * @res:	resource entry struct of SES
1296 *
1297 * Return value:
1298 * 	pointer to SES table entry / NULL on failure
1299 **/
1300static const struct ipr_ses_table_entry *
1301ipr_find_ses_entry(struct ipr_resource_entry *res)
1302{
1303	int i, j, matches;
1304	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1305
1306	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1307		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1308			if (ste->compare_product_id_byte[j] == 'X') {
1309				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1310					matches++;
1311				else
1312					break;
1313			} else
1314				matches++;
1315		}
1316
1317		if (matches == IPR_PROD_ID_LEN)
1318			return ste;
1319	}
1320
1321	return NULL;
1322}
1323
1324/**
1325 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1326 * @ioa_cfg:	ioa config struct
1327 * @bus:		SCSI bus
1328 * @bus_width:	bus width
1329 *
1330 * Return value:
1331 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1332 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1333 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1334 *	max 160MHz = max 320MB/sec).
1335 **/
1336static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1337{
1338	struct ipr_resource_entry *res;
1339	const struct ipr_ses_table_entry *ste;
1340	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1341
1342	/* Loop through each config table entry in the config table buffer */
1343	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1344		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1345			continue;
1346
1347		if (bus != res->cfgte.res_addr.bus)
1348			continue;
1349
1350		if (!(ste = ipr_find_ses_entry(res)))
1351			continue;
1352
1353		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1354	}
1355
1356	return max_xfer_rate;
1357}
1358
1359/**
1360 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1361 * @ioa_cfg:		ioa config struct
1362 * @max_delay:		max delay in micro-seconds to wait
1363 *
1364 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1365 *
1366 * Return value:
1367 * 	0 on success / other on failure
1368 **/
1369static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1370{
1371	volatile u32 pcii_reg;
1372	int delay = 1;
1373
1374	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1375	while (delay < max_delay) {
1376		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1377
1378		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1379			return 0;
1380
1381		/* udelay cannot be used if delay is more than a few milliseconds */
1382		if ((delay / 1000) > MAX_UDELAY_MS)
1383			mdelay(delay / 1000);
1384		else
1385			udelay(delay);
1386
1387		delay += delay;
1388	}
1389	return -EIO;
1390}
1391
1392/**
1393 * ipr_get_ldump_data_section - Dump IOA memory
1394 * @ioa_cfg:			ioa config struct
1395 * @start_addr:			adapter address to dump
1396 * @dest:				destination kernel buffer
1397 * @length_in_words:	length to dump in 4 byte words
1398 *
1399 * Return value:
1400 * 	0 on success / -EIO on failure
1401 **/
1402static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1403				      u32 start_addr,
1404				      __be32 *dest, u32 length_in_words)
1405{
1406	volatile u32 temp_pcii_reg;
1407	int i, delay = 0;
1408
1409	/* Write IOA interrupt reg starting LDUMP state  */
1410	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1411	       ioa_cfg->regs.set_uproc_interrupt_reg);
1412
1413	/* Wait for IO debug acknowledge */
1414	if (ipr_wait_iodbg_ack(ioa_cfg,
1415			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1416		dev_err(&ioa_cfg->pdev->dev,
1417			"IOA dump long data transfer timeout\n");
1418		return -EIO;
1419	}
1420
1421	/* Signal LDUMP interlocked - clear IO debug ack */
1422	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1423	       ioa_cfg->regs.clr_interrupt_reg);
1424
1425	/* Write Mailbox with starting address */
1426	writel(start_addr, ioa_cfg->ioa_mailbox);
1427
1428	/* Signal address valid - clear IOA Reset alert */
1429	writel(IPR_UPROCI_RESET_ALERT,
1430	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1431
1432	for (i = 0; i < length_in_words; i++) {
1433		/* Wait for IO debug acknowledge */
1434		if (ipr_wait_iodbg_ack(ioa_cfg,
1435				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1436			dev_err(&ioa_cfg->pdev->dev,
1437				"IOA dump short data transfer timeout\n");
1438			return -EIO;
1439		}
1440
1441		/* Read data from mailbox and increment destination pointer */
1442		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1443		dest++;
1444
1445		/* For all but the last word of data, signal data received */
1446		if (i < (length_in_words - 1)) {
1447			/* Signal dump data received - Clear IO debug Ack */
1448			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1449			       ioa_cfg->regs.clr_interrupt_reg);
1450		}
1451	}
1452
1453	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1454	writel(IPR_UPROCI_RESET_ALERT,
1455	       ioa_cfg->regs.set_uproc_interrupt_reg);
1456
1457	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1458	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1459
1460	/* Signal dump data received - Clear IO debug Ack */
1461	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1462	       ioa_cfg->regs.clr_interrupt_reg);
1463
1464	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1465	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1466		temp_pcii_reg =
1467		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1468
1469		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1470			return 0;
1471
1472		udelay(10);
1473		delay += 10;
1474	}
1475
1476	return 0;
1477}
1478
1479#ifdef CONFIG_SCSI_IPR_DUMP
1480/**
1481 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1482 * @ioa_cfg:		ioa config struct
1483 * @pci_address:	adapter address
1484 * @length:			length of data to copy
1485 *
1486 * Copy data from PCI adapter to kernel buffer.
1487 * Note: length MUST be a 4 byte multiple
1488 * Return value:
1489 * 	0 on success / other on failure
1490 **/
1491static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1492			unsigned long pci_address, u32 length)
1493{
1494	int bytes_copied = 0;
1495	int cur_len, rc, rem_len, rem_page_len;
1496	__be32 *page;
1497	unsigned long lock_flags = 0;
1498	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1499
1500	while (bytes_copied < length &&
1501	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1502		if (ioa_dump->page_offset >= PAGE_SIZE ||
1503		    ioa_dump->page_offset == 0) {
1504			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1505
1506			if (!page) {
1507				ipr_trace;
1508				return bytes_copied;
1509			}
1510
1511			ioa_dump->page_offset = 0;
1512			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1513			ioa_dump->next_page_index++;
1514		} else
1515			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1516
1517		rem_len = length - bytes_copied;
1518		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1519		cur_len = min(rem_len, rem_page_len);
1520
1521		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1522		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1523			rc = -EIO;
1524		} else {
1525			rc = ipr_get_ldump_data_section(ioa_cfg,
1526							pci_address + bytes_copied,
1527							&page[ioa_dump->page_offset / 4],
1528							(cur_len / sizeof(u32)));
1529		}
1530		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1531
1532		if (!rc) {
1533			ioa_dump->page_offset += cur_len;
1534			bytes_copied += cur_len;
1535		} else {
1536			ipr_trace;
1537			break;
1538		}
1539		schedule();
1540	}
1541
1542	return bytes_copied;
1543}
1544
1545/**
1546 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1547 * @hdr:	dump entry header struct
1548 *
1549 * Return value:
1550 * 	nothing
1551 **/
1552static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1553{
1554	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1555	hdr->num_elems = 1;
1556	hdr->offset = sizeof(*hdr);
1557	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1558}
1559
1560/**
1561 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1562 * @ioa_cfg:	ioa config struct
1563 * @driver_dump:	driver dump struct
1564 *
1565 * Return value:
1566 * 	nothing
1567 **/
1568static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1569				   struct ipr_driver_dump *driver_dump)
1570{
1571	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1572
1573	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1574	driver_dump->ioa_type_entry.hdr.len =
1575		sizeof(struct ipr_dump_ioa_type_entry) -
1576		sizeof(struct ipr_dump_entry_header);
1577	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1578	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1579	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1580	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1581		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1582		ucode_vpd->minor_release[1];
1583	driver_dump->hdr.num_entries++;
1584}
1585
1586/**
1587 * ipr_dump_version_data - Fill in the driver version in the dump.
1588 * @ioa_cfg:	ioa config struct
1589 * @driver_dump:	driver dump struct
1590 *
1591 * Return value:
1592 * 	nothing
1593 **/
1594static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1595				  struct ipr_driver_dump *driver_dump)
1596{
1597	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1598	driver_dump->version_entry.hdr.len =
1599		sizeof(struct ipr_dump_version_entry) -
1600		sizeof(struct ipr_dump_entry_header);
1601	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1602	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1603	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1604	driver_dump->hdr.num_entries++;
1605}
1606
1607/**
1608 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1609 * @ioa_cfg:	ioa config struct
1610 * @driver_dump:	driver dump struct
1611 *
1612 * Return value:
1613 * 	nothing
1614 **/
1615static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1616				   struct ipr_driver_dump *driver_dump)
1617{
1618	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1619	driver_dump->trace_entry.hdr.len =
1620		sizeof(struct ipr_dump_trace_entry) -
1621		sizeof(struct ipr_dump_entry_header);
1622	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1623	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1624	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1625	driver_dump->hdr.num_entries++;
1626}
1627
1628/**
1629 * ipr_dump_location_data - Fill in the IOA location in the dump.
1630 * @ioa_cfg:	ioa config struct
1631 * @driver_dump:	driver dump struct
1632 *
1633 * Return value:
1634 * 	nothing
1635 **/
1636static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1637				   struct ipr_driver_dump *driver_dump)
1638{
1639	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1640	driver_dump->location_entry.hdr.len =
1641		sizeof(struct ipr_dump_location_entry) -
1642		sizeof(struct ipr_dump_entry_header);
1643	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1644	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1645	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1646	driver_dump->hdr.num_entries++;
1647}
1648
1649/**
1650 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1651 * @ioa_cfg:	ioa config struct
1652 * @dump:		dump struct
1653 *
1654 * Return value:
1655 * 	nothing
1656 **/
1657static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1658{
1659	unsigned long start_addr, sdt_word;
1660	unsigned long lock_flags = 0;
1661	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1662	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1663	u32 num_entries, start_off, end_off;
1664	u32 bytes_to_copy, bytes_copied, rc;
1665	struct ipr_sdt *sdt;
1666	int i;
1667
1668	ENTER;
1669
1670	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1671
1672	if (ioa_cfg->sdt_state != GET_DUMP) {
1673		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1674		return;
1675	}
1676
1677	start_addr = readl(ioa_cfg->ioa_mailbox);
1678
1679	if (!ipr_sdt_is_fmt2(start_addr)) {
1680		dev_err(&ioa_cfg->pdev->dev,
1681			"Invalid dump table format: %lx\n", start_addr);
1682		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1683		return;
1684	}
1685
1686	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1687
1688	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1689
1690	/* Initialize the overall dump header */
1691	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1692	driver_dump->hdr.num_entries = 1;
1693	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1694	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1695	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1696	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1697
1698	ipr_dump_version_data(ioa_cfg, driver_dump);
1699	ipr_dump_location_data(ioa_cfg, driver_dump);
1700	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1701	ipr_dump_trace_data(ioa_cfg, driver_dump);
1702
1703	/* Update dump_header */
1704	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1705
1706	/* IOA Dump entry */
1707	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1708	ioa_dump->format = IPR_SDT_FMT2;
1709	ioa_dump->hdr.len = 0;
1710	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1711	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1712
1713	/* First entries in sdt are actually a list of dump addresses and
1714	 lengths to gather the real dump data.  sdt represents the pointer
1715	 to the ioa generated dump table.  Dump data will be extracted based
1716	 on entries in this table */
1717	sdt = &ioa_dump->sdt;
1718
1719	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1720					sizeof(struct ipr_sdt) / sizeof(__be32));
1721
1722	/* Smart Dump table is ready to use and the first entry is valid */
1723	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1724		dev_err(&ioa_cfg->pdev->dev,
1725			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1726			rc, be32_to_cpu(sdt->hdr.state));
1727		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1728		ioa_cfg->sdt_state = DUMP_OBTAINED;
1729		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1730		return;
1731	}
1732
1733	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1734
1735	if (num_entries > IPR_NUM_SDT_ENTRIES)
1736		num_entries = IPR_NUM_SDT_ENTRIES;
1737
1738	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1739
1740	for (i = 0; i < num_entries; i++) {
1741		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1742			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1743			break;
1744		}
1745
1746		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1747			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1748			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1749			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1750
1751			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1752				bytes_to_copy = end_off - start_off;
1753				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1754					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1755					continue;
1756				}
1757
1758				/* Copy data from adapter to driver buffers */
1759				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1760							    bytes_to_copy);
1761
1762				ioa_dump->hdr.len += bytes_copied;
1763
1764				if (bytes_copied != bytes_to_copy) {
1765					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1766					break;
1767				}
1768			}
1769		}
1770	}
1771
1772	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1773
1774	/* Update dump_header */
1775	driver_dump->hdr.len += ioa_dump->hdr.len;
1776	wmb();
1777	ioa_cfg->sdt_state = DUMP_OBTAINED;
1778	LEAVE;
1779}
1780
1781#else
1782#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1783#endif
1784
1785/**
1786 * ipr_release_dump - Free adapter dump memory
1787 * @kref:	kref struct
1788 *
1789 * Return value:
1790 *	nothing
1791 **/
1792static void ipr_release_dump(struct kref *kref)
1793{
1794	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1795	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1796	unsigned long lock_flags = 0;
1797	int i;
1798
1799	ENTER;
1800	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1801	ioa_cfg->dump = NULL;
1802	ioa_cfg->sdt_state = INACTIVE;
1803	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1804
1805	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1806		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1807
1808	kfree(dump);
1809	LEAVE;
1810}
1811
1812/**
1813 * ipr_worker_thread - Worker thread
1814 * @data:		ioa config struct
1815 *
1816 * Called at task level from a work thread. This function takes care
1817 * of adding and removing device from the mid-layer as configuration
1818 * changes are detected by the adapter.
1819 *
1820 * Return value:
1821 * 	nothing
1822 **/
1823static void ipr_worker_thread(void *data)
1824{
1825	unsigned long lock_flags;
1826	struct ipr_resource_entry *res;
1827	struct scsi_device *sdev;
1828	struct ipr_dump *dump;
1829	struct ipr_ioa_cfg *ioa_cfg = data;
1830	u8 bus, target, lun;
1831	int did_work;
1832
1833	ENTER;
1834	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1835
1836	if (ioa_cfg->sdt_state == GET_DUMP) {
1837		dump = ioa_cfg->dump;
1838		if (!dump) {
1839			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1840			return;
1841		}
1842		kref_get(&dump->kref);
1843		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1844		ipr_get_ioa_dump(ioa_cfg, dump);
1845		kref_put(&dump->kref, ipr_release_dump);
1846
1847		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1848		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1849			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1850		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1851		return;
1852	}
1853
1854restart:
1855	do {
1856		did_work = 0;
1857		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1858			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1859			return;
1860		}
1861
1862		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1863			if (res->del_from_ml && res->sdev) {
1864				did_work = 1;
1865				sdev = res->sdev;
1866				if (!scsi_device_get(sdev)) {
1867					res->sdev = NULL;
1868					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1869					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1870					scsi_remove_device(sdev);
1871					scsi_device_put(sdev);
1872					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1873				}
1874				break;
1875			}
1876		}
1877	} while(did_work);
1878
1879	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1880		if (res->add_to_ml) {
1881			bus = res->cfgte.res_addr.bus;
1882			target = res->cfgte.res_addr.target;
1883			lun = res->cfgte.res_addr.lun;
1884			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1885			scsi_add_device(ioa_cfg->host, bus, target, lun);
1886			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1887			goto restart;
1888		}
1889	}
1890
1891	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1892	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1893	LEAVE;
1894}
1895
1896#ifdef CONFIG_SCSI_IPR_TRACE
1897/**
1898 * ipr_read_trace - Dump the adapter trace
1899 * @kobj:		kobject struct
1900 * @buf:		buffer
1901 * @off:		offset
1902 * @count:		buffer size
1903 *
1904 * Return value:
1905 *	number of bytes printed to buffer
1906 **/
1907static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1908			      loff_t off, size_t count)
1909{
1910	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1911	struct Scsi_Host *shost = class_to_shost(cdev);
1912	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1913	unsigned long lock_flags = 0;
1914	int size = IPR_TRACE_SIZE;
1915	char *src = (char *)ioa_cfg->trace;
1916
1917	if (off > size)
1918		return 0;
1919	if (off + count > size) {
1920		size -= off;
1921		count = size;
1922	}
1923
1924	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1925	memcpy(buf, &src[off], count);
1926	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1927	return count;
1928}
1929
1930static struct bin_attribute ipr_trace_attr = {
1931	.attr =	{
1932		.name = "trace",
1933		.mode = S_IRUGO,
1934	},
1935	.size = 0,
1936	.read = ipr_read_trace,
1937};
1938#endif
1939
1940/**
1941 * ipr_show_fw_version - Show the firmware version
1942 * @class_dev:	class device struct
1943 * @buf:		buffer
1944 *
1945 * Return value:
1946 *	number of bytes printed to buffer
1947 **/
1948static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
1949{
1950	struct Scsi_Host *shost = class_to_shost(class_dev);
1951	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1952	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1953	unsigned long lock_flags = 0;
1954	int len;
1955
1956	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1957	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
1958		       ucode_vpd->major_release, ucode_vpd->card_type,
1959		       ucode_vpd->minor_release[0],
1960		       ucode_vpd->minor_release[1]);
1961	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1962	return len;
1963}
1964
1965static struct class_device_attribute ipr_fw_version_attr = {
1966	.attr = {
1967		.name =		"fw_version",
1968		.mode =		S_IRUGO,
1969	},
1970	.show = ipr_show_fw_version,
1971};
1972
1973/**
1974 * ipr_show_log_level - Show the adapter's error logging level
1975 * @class_dev:	class device struct
1976 * @buf:		buffer
1977 *
1978 * Return value:
1979 * 	number of bytes printed to buffer
1980 **/
1981static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
1982{
1983	struct Scsi_Host *shost = class_to_shost(class_dev);
1984	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1985	unsigned long lock_flags = 0;
1986	int len;
1987
1988	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1989	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
1990	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1991	return len;
1992}
1993
1994/**
1995 * ipr_store_log_level - Change the adapter's error logging level
1996 * @class_dev:	class device struct
1997 * @buf:		buffer
1998 *
1999 * Return value:
2000 * 	number of bytes printed to buffer
2001 **/
2002static ssize_t ipr_store_log_level(struct class_device *class_dev,
2003				   const char *buf, size_t count)
2004{
2005	struct Scsi_Host *shost = class_to_shost(class_dev);
2006	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2007	unsigned long lock_flags = 0;
2008
2009	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2010	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2011	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2012	return strlen(buf);
2013}
2014
2015static struct class_device_attribute ipr_log_level_attr = {
2016	.attr = {
2017		.name =		"log_level",
2018		.mode =		S_IRUGO | S_IWUSR,
2019	},
2020	.show = ipr_show_log_level,
2021	.store = ipr_store_log_level
2022};
2023
2024/**
2025 * ipr_store_diagnostics - IOA Diagnostics interface
2026 * @class_dev:	class_device struct
2027 * @buf:		buffer
2028 * @count:		buffer size
2029 *
2030 * This function will reset the adapter and wait a reasonable
2031 * amount of time for any errors that the adapter might log.
2032 *
2033 * Return value:
2034 * 	count on success / other on failure
2035 **/
2036static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2037				     const char *buf, size_t count)
2038{
2039	struct Scsi_Host *shost = class_to_shost(class_dev);
2040	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2041	unsigned long lock_flags = 0;
2042	int rc = count;
2043
2044	if (!capable(CAP_SYS_ADMIN))
2045		return -EACCES;
2046
2047	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2048	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2049	ioa_cfg->errors_logged = 0;
2050	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2051
2052	if (ioa_cfg->in_reset_reload) {
2053		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2054		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2055
2056		/* Wait for a second for any errors to be logged */
2057		msleep(1000);
2058	} else {
2059		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2060		return -EIO;
2061	}
2062
2063	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2064	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2065		rc = -EIO;
2066	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2067
2068	return rc;
2069}
2070
2071static struct class_device_attribute ipr_diagnostics_attr = {
2072	.attr = {
2073		.name =		"run_diagnostics",
2074		.mode =		S_IWUSR,
2075	},
2076	.store = ipr_store_diagnostics
2077};
2078
2079/**
2080 * ipr_store_reset_adapter - Reset the adapter
2081 * @class_dev:	class_device struct
2082 * @buf:		buffer
2083 * @count:		buffer size
2084 *
2085 * This function will reset the adapter.
2086 *
2087 * Return value:
2088 * 	count on success / other on failure
2089 **/
2090static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2091				       const char *buf, size_t count)
2092{
2093	struct Scsi_Host *shost = class_to_shost(class_dev);
2094	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2095	unsigned long lock_flags;
2096	int result = count;
2097
2098	if (!capable(CAP_SYS_ADMIN))
2099		return -EACCES;
2100
2101	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2102	if (!ioa_cfg->in_reset_reload)
2103		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2104	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2105	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2106
2107	return result;
2108}
2109
2110static struct class_device_attribute ipr_ioa_reset_attr = {
2111	.attr = {
2112		.name =		"reset_host",
2113		.mode =		S_IWUSR,
2114	},
2115	.store = ipr_store_reset_adapter
2116};
2117
2118/**
2119 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2120 * @buf_len:		buffer length
2121 *
2122 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2123 * list to use for microcode download
2124 *
2125 * Return value:
2126 * 	pointer to sglist / NULL on failure
2127 **/
2128static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2129{
2130	int sg_size, order, bsize_elem, num_elem, i, j;
2131	struct ipr_sglist *sglist;
2132	struct scatterlist *scatterlist;
2133	struct page *page;
2134
2135	/* Get the minimum size per scatter/gather element */
2136	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2137
2138	/* Get the actual size per element */
2139	order = get_order(sg_size);
2140
2141	/* Determine the actual number of bytes per element */
2142	bsize_elem = PAGE_SIZE * (1 << order);
2143
2144	/* Determine the actual number of sg entries needed */
2145	if (buf_len % bsize_elem)
2146		num_elem = (buf_len / bsize_elem) + 1;
2147	else
2148		num_elem = buf_len / bsize_elem;
2149
2150	/* Allocate a scatter/gather list for the DMA */
2151	sglist = kmalloc(sizeof(struct ipr_sglist) +
2152			 (sizeof(struct scatterlist) * (num_elem - 1)),
2153			 GFP_KERNEL);
2154
2155	if (sglist == NULL) {
2156		ipr_trace;
2157		return NULL;
2158	}
2159
2160	memset(sglist, 0, sizeof(struct ipr_sglist) +
2161	       (sizeof(struct scatterlist) * (num_elem - 1)));
2162
2163	scatterlist = sglist->scatterlist;
2164
2165	sglist->order = order;
2166	sglist->num_sg = num_elem;
2167
2168	/* Allocate a bunch of sg elements */
2169	for (i = 0; i < num_elem; i++) {
2170		page = alloc_pages(GFP_KERNEL, order);
2171		if (!page) {
2172			ipr_trace;
2173
2174			/* Free up what we already allocated */
2175			for (j = i - 1; j >= 0; j--)
2176				__free_pages(scatterlist[j].page, order);
2177			kfree(sglist);
2178			return NULL;
2179		}
2180
2181		scatterlist[i].page = page;
2182	}
2183
2184	return sglist;
2185}
2186
2187/**
2188 * ipr_free_ucode_buffer - Frees a microcode download buffer
2189 * @p_dnld:		scatter/gather list pointer
2190 *
2191 * Free a DMA'able ucode download buffer previously allocated with
2192 * ipr_alloc_ucode_buffer
2193 *
2194 * Return value:
2195 * 	nothing
2196 **/
2197static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2198{
2199	int i;
2200
2201	for (i = 0; i < sglist->num_sg; i++)
2202		__free_pages(sglist->scatterlist[i].page, sglist->order);
2203
2204	kfree(sglist);
2205}
2206
2207/**
2208 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2209 * @sglist:		scatter/gather list pointer
2210 * @buffer:		buffer pointer
2211 * @len:		buffer length
2212 *
2213 * Copy a microcode image from a user buffer into a buffer allocated by
2214 * ipr_alloc_ucode_buffer
2215 *
2216 * Return value:
2217 * 	0 on success / other on failure
2218 **/
2219static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2220				 u8 *buffer, u32 len)
2221{
2222	int bsize_elem, i, result = 0;
2223	struct scatterlist *scatterlist;
2224	void *kaddr;
2225
2226	/* Determine the actual number of bytes per element */
2227	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2228
2229	scatterlist = sglist->scatterlist;
2230
2231	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2232		kaddr = kmap(scatterlist[i].page);
2233		memcpy(kaddr, buffer, bsize_elem);
2234		kunmap(scatterlist[i].page);
2235
2236		scatterlist[i].length = bsize_elem;
2237
2238		if (result != 0) {
2239			ipr_trace;
2240			return result;
2241		}
2242	}
2243
2244	if (len % bsize_elem) {
2245		kaddr = kmap(scatterlist[i].page);
2246		memcpy(kaddr, buffer, len % bsize_elem);
2247		kunmap(scatterlist[i].page);
2248
2249		scatterlist[i].length = len % bsize_elem;
2250	}
2251
2252	sglist->buffer_len = len;
2253	return result;
2254}
2255
2256/**
2257 * ipr_map_ucode_buffer - Map a microcode download buffer
2258 * @ipr_cmd:	ipr command struct
2259 * @sglist:		scatter/gather list
2260 * @len:		total length of download buffer
2261 *
2262 * Maps a microcode download scatter/gather list for DMA and
2263 * builds the IOADL.
2264 *
2265 * Return value:
2266 * 	0 on success / -EIO on failure
2267 **/
2268static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
2269				struct ipr_sglist *sglist, int len)
2270{
2271	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2272	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2273	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2274	struct scatterlist *scatterlist = sglist->scatterlist;
2275	int i;
2276
2277	ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
2278					 sglist->num_sg, DMA_TO_DEVICE);
2279
2280	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2281	ioarcb->write_data_transfer_length = cpu_to_be32(len);
2282	ioarcb->write_ioadl_len =
2283		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2284
2285	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2286		ioadl[i].flags_and_data_len =
2287			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2288		ioadl[i].address =
2289			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2290	}
2291
2292	if (likely(ipr_cmd->dma_use_sg)) {
2293		ioadl[i-1].flags_and_data_len |=
2294			cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2295	}
2296	else {
2297		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
2298		return -EIO;
2299	}
2300
2301	return 0;
2302}
2303
2304/**
2305 * ipr_store_update_fw - Update the firmware on the adapter
2306 * @class_dev:	class_device struct
2307 * @buf:		buffer
2308 * @count:		buffer size
2309 *
2310 * This function will update the firmware on the adapter.
2311 *
2312 * Return value:
2313 * 	count on success / other on failure
2314 **/
2315static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2316				       const char *buf, size_t count)
2317{
2318	struct Scsi_Host *shost = class_to_shost(class_dev);
2319	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2320	struct ipr_ucode_image_header *image_hdr;
2321	const struct firmware *fw_entry;
2322	struct ipr_sglist *sglist;
2323	unsigned long lock_flags;
2324	char fname[100];
2325	char *src;
2326	int len, result, dnld_size;
2327
2328	if (!capable(CAP_SYS_ADMIN))
2329		return -EACCES;
2330
2331	len = snprintf(fname, 99, "%s", buf);
2332	fname[len-1] = '\0';
2333
2334	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2335		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2336		return -EIO;
2337	}
2338
2339	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2340
2341	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2342	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2343	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2344		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2345		release_firmware(fw_entry);
2346		return -EINVAL;
2347	}
2348
2349	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2350	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2351	sglist = ipr_alloc_ucode_buffer(dnld_size);
2352
2353	if (!sglist) {
2354		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2355		release_firmware(fw_entry);
2356		return -ENOMEM;
2357	}
2358
2359	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2360
2361	if (result) {
2362		dev_err(&ioa_cfg->pdev->dev,
2363			"Microcode buffer copy to DMA buffer failed\n");
2364		ipr_free_ucode_buffer(sglist);
2365		release_firmware(fw_entry);
2366		return result;
2367	}
2368
2369	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2370
2371	if (ioa_cfg->ucode_sglist) {
2372		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2373		dev_err(&ioa_cfg->pdev->dev,
2374			"Microcode download already in progress\n");
2375		ipr_free_ucode_buffer(sglist);
2376		release_firmware(fw_entry);
2377		return -EIO;
2378	}
2379
2380	ioa_cfg->ucode_sglist = sglist;
2381	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2382	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2383	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2384
2385	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2386	ioa_cfg->ucode_sglist = NULL;
2387	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2388
2389	ipr_free_ucode_buffer(sglist);
2390	release_firmware(fw_entry);
2391
2392	return count;
2393}
2394
2395static struct class_device_attribute ipr_update_fw_attr = {
2396	.attr = {
2397		.name =		"update_fw",
2398		.mode =		S_IWUSR,
2399	},
2400	.store = ipr_store_update_fw
2401};
2402
2403static struct class_device_attribute *ipr_ioa_attrs[] = {
2404	&ipr_fw_version_attr,
2405	&ipr_log_level_attr,
2406	&ipr_diagnostics_attr,
2407	&ipr_ioa_reset_attr,
2408	&ipr_update_fw_attr,
2409	NULL,
2410};
2411
2412#ifdef CONFIG_SCSI_IPR_DUMP
2413/**
2414 * ipr_read_dump - Dump the adapter
2415 * @kobj:		kobject struct
2416 * @buf:		buffer
2417 * @off:		offset
2418 * @count:		buffer size
2419 *
2420 * Return value:
2421 *	number of bytes printed to buffer
2422 **/
2423static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2424			      loff_t off, size_t count)
2425{
2426	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2427	struct Scsi_Host *shost = class_to_shost(cdev);
2428	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2429	struct ipr_dump *dump;
2430	unsigned long lock_flags = 0;
2431	char *src;
2432	int len;
2433	size_t rc = count;
2434
2435	if (!capable(CAP_SYS_ADMIN))
2436		return -EACCES;
2437
2438	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2439	dump = ioa_cfg->dump;
2440
2441	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2442		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2443		return 0;
2444	}
2445	kref_get(&dump->kref);
2446	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2447
2448	if (off > dump->driver_dump.hdr.len) {
2449		kref_put(&dump->kref, ipr_release_dump);
2450		return 0;
2451	}
2452
2453	if (off + count > dump->driver_dump.hdr.len) {
2454		count = dump->driver_dump.hdr.len - off;
2455		rc = count;
2456	}
2457
2458	if (count && off < sizeof(dump->driver_dump)) {
2459		if (off + count > sizeof(dump->driver_dump))
2460			len = sizeof(dump->driver_dump) - off;
2461		else
2462			len = count;
2463		src = (u8 *)&dump->driver_dump + off;
2464		memcpy(buf, src, len);
2465		buf += len;
2466		off += len;
2467		count -= len;
2468	}
2469
2470	off -= sizeof(dump->driver_dump);
2471
2472	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2473		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2474			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2475		else
2476			len = count;
2477		src = (u8 *)&dump->ioa_dump + off;
2478		memcpy(buf, src, len);
2479		buf += len;
2480		off += len;
2481		count -= len;
2482	}
2483
2484	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2485
2486	while (count) {
2487		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2488			len = PAGE_ALIGN(off) - off;
2489		else
2490			len = count;
2491		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2492		src += off & ~PAGE_MASK;
2493		memcpy(buf, src, len);
2494		buf += len;
2495		off += len;
2496		count -= len;
2497	}
2498
2499	kref_put(&dump->kref, ipr_release_dump);
2500	return rc;
2501}
2502
2503/**
2504 * ipr_alloc_dump - Prepare for adapter dump
2505 * @ioa_cfg:	ioa config struct
2506 *
2507 * Return value:
2508 *	0 on success / other on failure
2509 **/
2510static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2511{
2512	struct ipr_dump *dump;
2513	unsigned long lock_flags = 0;
2514
2515	ENTER;
2516	dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2517
2518	if (!dump) {
2519		ipr_err("Dump memory allocation failed\n");
2520		return -ENOMEM;
2521	}
2522
2523	memset(dump, 0, sizeof(struct ipr_dump));
2524	kref_init(&dump->kref);
2525	dump->ioa_cfg = ioa_cfg;
2526
2527	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2528
2529	if (INACTIVE != ioa_cfg->sdt_state) {
2530		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2531		kfree(dump);
2532		return 0;
2533	}
2534
2535	ioa_cfg->dump = dump;
2536	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2537	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2538		ioa_cfg->dump_taken = 1;
2539		schedule_work(&ioa_cfg->work_q);
2540	}
2541	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2542
2543	LEAVE;
2544	return 0;
2545}
2546
2547/**
2548 * ipr_free_dump - Free adapter dump memory
2549 * @ioa_cfg:	ioa config struct
2550 *
2551 * Return value:
2552 *	0 on success / other on failure
2553 **/
2554static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2555{
2556	struct ipr_dump *dump;
2557	unsigned long lock_flags = 0;
2558
2559	ENTER;
2560
2561	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2562	dump = ioa_cfg->dump;
2563	if (!dump) {
2564		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2565		return 0;
2566	}
2567
2568	ioa_cfg->dump = NULL;
2569	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2570
2571	kref_put(&dump->kref, ipr_release_dump);
2572
2573	LEAVE;
2574	return 0;
2575}
2576
2577/**
2578 * ipr_write_dump - Setup dump state of adapter
2579 * @kobj:		kobject struct
2580 * @buf:		buffer
2581 * @off:		offset
2582 * @count:		buffer size
2583 *
2584 * Return value:
2585 *	number of bytes printed to buffer
2586 **/
2587static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2588			      loff_t off, size_t count)
2589{
2590	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2591	struct Scsi_Host *shost = class_to_shost(cdev);
2592	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2593	int rc;
2594
2595	if (!capable(CAP_SYS_ADMIN))
2596		return -EACCES;
2597
2598	if (buf[0] == '1')
2599		rc = ipr_alloc_dump(ioa_cfg);
2600	else if (buf[0] == '0')
2601		rc = ipr_free_dump(ioa_cfg);
2602	else
2603		return -EINVAL;
2604
2605	if (rc)
2606		return rc;
2607	else
2608		return count;
2609}
2610
2611static struct bin_attribute ipr_dump_attr = {
2612	.attr =	{
2613		.name = "dump",
2614		.mode = S_IRUSR | S_IWUSR,
2615	},
2616	.size = 0,
2617	.read = ipr_read_dump,
2618	.write = ipr_write_dump
2619};
2620#else
2621static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2622#endif
2623
2624/**
2625 * ipr_change_queue_depth - Change the device's queue depth
2626 * @sdev:	scsi device struct
2627 * @qdepth:	depth to set
2628 *
2629 * Return value:
2630 * 	actual depth set
2631 **/
2632static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2633{
2634	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2635	return sdev->queue_depth;
2636}
2637
2638/**
2639 * ipr_change_queue_type - Change the device's queue type
2640 * @dsev:		scsi device struct
2641 * @tag_type:	type of tags to use
2642 *
2643 * Return value:
2644 * 	actual queue type set
2645 **/
2646static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2647{
2648	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2649	struct ipr_resource_entry *res;
2650	unsigned long lock_flags = 0;
2651
2652	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2653	res = (struct ipr_resource_entry *)sdev->hostdata;
2654
2655	if (res) {
2656		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2657			/*
2658			 * We don't bother quiescing the device here since the
2659			 * adapter firmware does it for us.
2660			 */
2661			scsi_set_tag_type(sdev, tag_type);
2662
2663			if (tag_type)
2664				scsi_activate_tcq(sdev, sdev->queue_depth);
2665			else
2666				scsi_deactivate_tcq(sdev, sdev->queue_depth);
2667		} else
2668			tag_type = 0;
2669	} else
2670		tag_type = 0;
2671
2672	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2673	return tag_type;
2674}
2675
2676/**
2677 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2678 * @dev:	device struct
2679 * @buf:	buffer
2680 *
2681 * Return value:
2682 * 	number of bytes printed to buffer
2683 **/
2684static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
2685{
2686	struct scsi_device *sdev = to_scsi_device(dev);
2687	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2688	struct ipr_resource_entry *res;
2689	unsigned long lock_flags = 0;
2690	ssize_t len = -ENXIO;
2691
2692	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2693	res = (struct ipr_resource_entry *)sdev->hostdata;
2694	if (res)
2695		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2696	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2697	return len;
2698}
2699
2700static struct device_attribute ipr_adapter_handle_attr = {
2701	.attr = {
2702		.name = 	"adapter_handle",
2703		.mode =		S_IRUSR,
2704	},
2705	.show = ipr_show_adapter_handle
2706};
2707
2708static struct device_attribute *ipr_dev_attrs[] = {
2709	&ipr_adapter_handle_attr,
2710	NULL,
2711};
2712
2713/**
2714 * ipr_biosparam - Return the HSC mapping
2715 * @sdev:			scsi device struct
2716 * @block_device:	block device pointer
2717 * @capacity:		capacity of the device
2718 * @parm:			Array containing returned HSC values.
2719 *
2720 * This function generates the HSC parms that fdisk uses.
2721 * We want to make sure we return something that places partitions
2722 * on 4k boundaries for best performance with the IOA.
2723 *
2724 * Return value:
2725 * 	0 on success
2726 **/
2727static int ipr_biosparam(struct scsi_device *sdev,
2728			 struct block_device *block_device,
2729			 sector_t capacity, int *parm)
2730{
2731	int heads, sectors;
2732	sector_t cylinders;
2733
2734	heads = 128;
2735	sectors = 32;
2736
2737	cylinders = capacity;
2738	sector_div(cylinders, (128 * 32));
2739
2740	/* return result */
2741	parm[0] = heads;
2742	parm[1] = sectors;
2743	parm[2] = cylinders;
2744
2745	return 0;
2746}
2747
2748/**
2749 * ipr_slave_destroy - Unconfigure a SCSI device
2750 * @sdev:	scsi device struct
2751 *
2752 * Return value:
2753 * 	nothing
2754 **/
2755static void ipr_slave_destroy(struct scsi_device *sdev)
2756{
2757	struct ipr_resource_entry *res;
2758	struct ipr_ioa_cfg *ioa_cfg;
2759	unsigned long lock_flags = 0;
2760
2761	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2762
2763	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2764	res = (struct ipr_resource_entry *) sdev->hostdata;
2765	if (res) {
2766		sdev->hostdata = NULL;
2767		res->sdev = NULL;
2768	}
2769	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2770}
2771
2772/**
2773 * ipr_slave_configure - Configure a SCSI device
2774 * @sdev:	scsi device struct
2775 *
2776 * This function configures the specified scsi device.
2777 *
2778 * Return value:
2779 * 	0 on success
2780 **/
2781static int ipr_slave_configure(struct scsi_device *sdev)
2782{
2783	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2784	struct ipr_resource_entry *res;
2785	unsigned long lock_flags = 0;
2786
2787	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2788	res = sdev->hostdata;
2789	if (res) {
2790		if (ipr_is_af_dasd_device(res))
2791			sdev->type = TYPE_RAID;
2792		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
2793			sdev->scsi_level = 4;
2794			sdev->no_uld_attach = 1;
2795		}
2796		if (ipr_is_vset_device(res)) {
2797			sdev->timeout = IPR_VSET_RW_TIMEOUT;
2798			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2799		}
2800		if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2801			sdev->allow_restart = 1;
2802		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2803	}
2804	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2805	return 0;
2806}
2807
2808/**
2809 * ipr_slave_alloc - Prepare for commands to a device.
2810 * @sdev:	scsi device struct
2811 *
2812 * This function saves a pointer to the resource entry
2813 * in the scsi device struct if the device exists. We
2814 * can then use this pointer in ipr_queuecommand when
2815 * handling new commands.
2816 *
2817 * Return value:
2818 * 	0 on success
2819 **/
2820static int ipr_slave_alloc(struct scsi_device *sdev)
2821{
2822	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2823	struct ipr_resource_entry *res;
2824	unsigned long lock_flags;
2825
2826	sdev->hostdata = NULL;
2827
2828	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2829
2830	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2831		if ((res->cfgte.res_addr.bus == sdev->channel) &&
2832		    (res->cfgte.res_addr.target == sdev->id) &&
2833		    (res->cfgte.res_addr.lun == sdev->lun)) {
2834			res->sdev = sdev;
2835			res->add_to_ml = 0;
2836			res->in_erp = 0;
2837			sdev->hostdata = res;
2838			res->needs_sync_complete = 1;
2839			break;
2840		}
2841	}
2842
2843	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2844
2845	return 0;
2846}
2847
2848/**
2849 * ipr_eh_host_reset - Reset the host adapter
2850 * @scsi_cmd:	scsi command struct
2851 *
2852 * Return value:
2853 * 	SUCCESS / FAILED
2854 **/
2855static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
2856{
2857	struct ipr_ioa_cfg *ioa_cfg;
2858	int rc;
2859
2860	ENTER;
2861	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2862
2863	dev_err(&ioa_cfg->pdev->dev,
2864		"Adapter being reset as a result of error recovery.\n");
2865
2866	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2867		ioa_cfg->sdt_state = GET_DUMP;
2868
2869	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2870
2871	LEAVE;
2872	return rc;
2873}
2874
2875static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
2876{
2877	int rc;
2878
2879	spin_lock_irq(cmd->device->host->host_lock);
2880	rc = __ipr_eh_host_reset(cmd);
2881	spin_unlock_irq(cmd->device->host->host_lock);
2882
2883	return rc;
2884}
2885
2886/**
2887 * ipr_eh_dev_reset - Reset the device
2888 * @scsi_cmd:	scsi command struct
2889 *
2890 * This function issues a device reset to the affected device.
2891 * A LUN reset will be sent to the device first. If that does
2892 * not work, a target reset will be sent.
2893 *
2894 * Return value:
2895 *	SUCCESS / FAILED
2896 **/
2897static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
2898{
2899	struct ipr_cmnd *ipr_cmd;
2900	struct ipr_ioa_cfg *ioa_cfg;
2901	struct ipr_resource_entry *res;
2902	struct ipr_cmd_pkt *cmd_pkt;
2903	u32 ioasc;
2904
2905	ENTER;
2906	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2907	res = scsi_cmd->device->hostdata;
2908
2909	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
2910		return FAILED;
2911
2912	/*
2913	 * If we are currently going through reset/reload, return failed. This will force the
2914	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
2915	 * reset to complete
2916	 */
2917	if (ioa_cfg->in_reset_reload)
2918		return FAILED;
2919	if (ioa_cfg->ioa_is_dead)
2920		return FAILED;
2921
2922	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
2923		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
2924			if (ipr_cmd->scsi_cmd)
2925				ipr_cmd->done = ipr_scsi_eh_done;
2926		}
2927	}
2928
2929	res->resetting_device = 1;
2930
2931	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
2932
2933	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
2934	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
2935	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
2936	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
2937
2938	ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
2939	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
2940
2941	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2942
2943	res->resetting_device = 0;
2944
2945	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2946
2947	LEAVE;
2948	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
2949}
2950
2951static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
2952{
2953	int rc;
2954
2955	spin_lock_irq(cmd->device->host->host_lock);
2956	rc = __ipr_eh_dev_reset(cmd);
2957	spin_unlock_irq(cmd->device->host->host_lock);
2958
2959	return rc;
2960}
2961
2962/**
2963 * ipr_bus_reset_done - Op done function for bus reset.
2964 * @ipr_cmd:	ipr command struct
2965 *
2966 * This function is the op done function for a bus reset
2967 *
2968 * Return value:
2969 * 	none
2970 **/
2971static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
2972{
2973	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2974	struct ipr_resource_entry *res;
2975
2976	ENTER;
2977	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2978		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
2979			    sizeof(res->cfgte.res_handle))) {
2980			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
2981			break;
2982		}
2983	}
2984
2985	/*
2986	 * If abort has not completed, indicate the reset has, else call the
2987	 * abort's done function to wake the sleeping eh thread
2988	 */
2989	if (ipr_cmd->sibling->sibling)
2990		ipr_cmd->sibling->sibling = NULL;
2991	else
2992		ipr_cmd->sibling->done(ipr_cmd->sibling);
2993
2994	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2995	LEAVE;
2996}
2997
2998/**
2999 * ipr_abort_timeout - An abort task has timed out
3000 * @ipr_cmd:	ipr command struct
3001 *
3002 * This function handles when an abort task times out. If this
3003 * happens we issue a bus reset since we have resources tied
3004 * up that must be freed before returning to the midlayer.
3005 *
3006 * Return value:
3007 *	none
3008 **/
3009static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3010{
3011	struct ipr_cmnd *reset_cmd;
3012	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3013	struct ipr_cmd_pkt *cmd_pkt;
3014	unsigned long lock_flags = 0;
3015
3016	ENTER;
3017	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3018	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3019		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3020		return;
3021	}
3022
3023	ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3024	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3025	ipr_cmd->sibling = reset_cmd;
3026	reset_cmd->sibling = ipr_cmd;
3027	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3028	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3029	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3030	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3031	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3032
3033	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3034	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3035	LEAVE;
3036}
3037
3038/**
3039 * ipr_cancel_op - Cancel specified op
3040 * @scsi_cmd:	scsi command struct
3041 *
3042 * This function cancels specified op.
3043 *
3044 * Return value:
3045 *	SUCCESS / FAILED
3046 **/
3047static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3048{
3049	struct ipr_cmnd *ipr_cmd;
3050	struct ipr_ioa_cfg *ioa_cfg;
3051	struct ipr_resource_entry *res;
3052	struct ipr_cmd_pkt *cmd_pkt;
3053	u32 ioasc;
3054	int op_found = 0;
3055
3056	ENTER;
3057	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3058	res = scsi_cmd->device->hostdata;
3059
3060	/* If we are currently going through reset/reload, return failed.
3061	 * This will force the mid-layer to call ipr_eh_host_reset,
3062	 * which will then go to sleep and wait for the reset to complete
3063	 */
3064	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3065		return FAILED;
3066	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3067		return FAILED;
3068
3069	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3070		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3071			ipr_cmd->done = ipr_scsi_eh_done;
3072			op_found = 1;
3073			break;
3074		}
3075	}
3076
3077	if (!op_found)
3078		return SUCCESS;
3079
3080	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3081	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3082	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3083	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3084	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3085	ipr_cmd->u.sdev = scsi_cmd->device;
3086
3087	ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3088	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3089	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3090
3091	/*
3092	 * If the abort task timed out and we sent a bus reset, we will get
3093	 * one the following responses to the abort
3094	 */
3095	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3096		ioasc = 0;
3097		ipr_trace;
3098	}
3099
3100	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3101	res->needs_sync_complete = 1;
3102
3103	LEAVE;
3104	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3105}
3106
3107/**
3108 * ipr_eh_abort - Abort a single op
3109 * @scsi_cmd:	scsi command struct
3110 *
3111 * Return value:
3112 * 	SUCCESS / FAILED
3113 **/
3114static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3115{
3116	unsigned long flags;
3117	int rc;
3118
3119	ENTER;
3120
3121	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3122	rc = ipr_cancel_op(scsi_cmd);
3123	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3124
3125	LEAVE;
3126	return rc;
3127}
3128
3129/**
3130 * ipr_handle_other_interrupt - Handle "other" interrupts
3131 * @ioa_cfg:	ioa config struct
3132 * @int_reg:	interrupt register
3133 *
3134 * Return value:
3135 * 	IRQ_NONE / IRQ_HANDLED
3136 **/
3137static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3138					      volatile u32 int_reg)
3139{
3140	irqreturn_t rc = IRQ_HANDLED;
3141
3142	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3143		/* Mask the interrupt */
3144		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3145
3146		/* Clear the interrupt */
3147		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3148		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3149
3150		list_del(&ioa_cfg->reset_cmd->queue);
3151		del_timer(&ioa_cfg->reset_cmd->timer);
3152		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3153	} else {
3154		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3155			ioa_cfg->ioa_unit_checked = 1;
3156		else
3157			dev_err(&ioa_cfg->pdev->dev,
3158				"Permanent IOA failure. 0x%08X\n", int_reg);
3159
3160		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3161			ioa_cfg->sdt_state = GET_DUMP;
3162
3163		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3164		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3165	}
3166
3167	return rc;
3168}
3169
3170/**
3171 * ipr_isr - Interrupt service routine
3172 * @irq:	irq number
3173 * @devp:	pointer to ioa config struct
3174 * @regs:	pt_regs struct
3175 *
3176 * Return value:
3177 * 	IRQ_NONE / IRQ_HANDLED
3178 **/
3179static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3180{
3181	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3182	unsigned long lock_flags = 0;
3183	volatile u32 int_reg, int_mask_reg;
3184	u32 ioasc;
3185	u16 cmd_index;
3186	struct ipr_cmnd *ipr_cmd;
3187	irqreturn_t rc = IRQ_NONE;
3188
3189	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3190
3191	/* If interrupts are disabled, ignore the interrupt */
3192	if (!ioa_cfg->allow_interrupts) {
3193		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194		return IRQ_NONE;
3195	}
3196
3197	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3198	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3199
3200	/* If an interrupt on the adapter did not occur, ignore it */
3201	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3202		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3203		return IRQ_NONE;
3204	}
3205
3206	while (1) {
3207		ipr_cmd = NULL;
3208
3209		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3210		       ioa_cfg->toggle_bit) {
3211
3212			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3213				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3214
3215			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3216				ioa_cfg->errors_logged++;
3217				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3218
3219				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3220					ioa_cfg->sdt_state = GET_DUMP;
3221
3222				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3223				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3224				return IRQ_HANDLED;
3225			}
3226
3227			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3228
3229			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3230
3231			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3232
3233			list_del(&ipr_cmd->queue);
3234			del_timer(&ipr_cmd->timer);
3235			ipr_cmd->done(ipr_cmd);
3236
3237			rc = IRQ_HANDLED;
3238
3239			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3240				ioa_cfg->hrrq_curr++;
3241			} else {
3242				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3243				ioa_cfg->toggle_bit ^= 1u;
3244			}
3245		}
3246
3247		if (ipr_cmd != NULL) {
3248			/* Clear the PCI interrupt */
3249			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3250			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3251		} else
3252			break;
3253	}
3254
3255	if (unlikely(rc == IRQ_NONE))
3256		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3257
3258	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259	return rc;
3260}
3261
3262/**
3263 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3264 * @ioa_cfg:	ioa config struct
3265 * @ipr_cmd:	ipr command struct
3266 *
3267 * Return value:
3268 * 	0 on success / -1 on failure
3269 **/
3270static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3271			   struct ipr_cmnd *ipr_cmd)
3272{
3273	int i;
3274	struct scatterlist *sglist;
3275	u32 length;
3276	u32 ioadl_flags = 0;
3277	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3278	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3279	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3280
3281	length = scsi_cmd->request_bufflen;
3282
3283	if (length == 0)
3284		return 0;
3285
3286	if (scsi_cmd->use_sg) {
3287		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3288						 scsi_cmd->request_buffer,
3289						 scsi_cmd->use_sg,
3290						 scsi_cmd->sc_data_direction);
3291
3292		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3293			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3294			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3295			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3296			ioarcb->write_ioadl_len =
3297				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3298		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3299			ioadl_flags = IPR_IOADL_FLAGS_READ;
3300			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3301			ioarcb->read_ioadl_len =
3302				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3303		}
3304
3305		sglist = scsi_cmd->request_buffer;
3306
3307		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3308			ioadl[i].flags_and_data_len =
3309				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3310			ioadl[i].address =
3311				cpu_to_be32(sg_dma_address(&sglist[i]));
3312		}
3313
3314		if (likely(ipr_cmd->dma_use_sg)) {
3315			ioadl[i-1].flags_and_data_len |=
3316				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3317			return 0;
3318		} else
3319			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3320	} else {
3321		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3322			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3323			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3324			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3325			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3326		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3327			ioadl_flags = IPR_IOADL_FLAGS_READ;
3328			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3329			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3330		}
3331
3332		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3333						     scsi_cmd->request_buffer, length,
3334						     scsi_cmd->sc_data_direction);
3335
3336		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3337			ipr_cmd->dma_use_sg = 1;
3338			ioadl[0].flags_and_data_len =
3339				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3340			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3341			return 0;
3342		} else
3343			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3344	}
3345
3346	return -1;
3347}
3348
3349/**
3350 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3351 * @scsi_cmd:	scsi command struct
3352 *
3353 * Return value:
3354 * 	task attributes
3355 **/
3356static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3357{
3358	u8 tag[2];
3359	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3360
3361	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3362		switch (tag[0]) {
3363		case MSG_SIMPLE_TAG:
3364			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3365			break;
3366		case MSG_HEAD_TAG:
3367			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3368			break;
3369		case MSG_ORDERED_TAG:
3370			rc = IPR_FLAGS_LO_ORDERED_TASK;
3371			break;
3372		};
3373	}
3374
3375	return rc;
3376}
3377
3378/**
3379 * ipr_erp_done - Process completion of ERP for a device
3380 * @ipr_cmd:		ipr command struct
3381 *
3382 * This function copies the sense buffer into the scsi_cmd
3383 * struct and pushes the scsi_done function.
3384 *
3385 * Return value:
3386 * 	nothing
3387 **/
3388static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3389{
3390	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3391	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3392	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3393	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3394
3395	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3396		scsi_cmd->result |= (DID_ERROR << 16);
3397		ipr_sdev_err(scsi_cmd->device,
3398			     "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3399	} else {
3400		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3401		       SCSI_SENSE_BUFFERSIZE);
3402	}
3403
3404	if (res) {
3405		res->needs_sync_complete = 1;
3406		res->in_erp = 0;
3407	}
3408	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3409	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3410	scsi_cmd->scsi_done(scsi_cmd);
3411}
3412
3413/**
3414 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3415 * @ipr_cmd:	ipr command struct
3416 *
3417 * Return value:
3418 * 	none
3419 **/
3420static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3421{
3422	struct ipr_ioarcb *ioarcb;
3423	struct ipr_ioasa *ioasa;
3424
3425	ioarcb = &ipr_cmd->ioarcb;
3426	ioasa = &ipr_cmd->ioasa;
3427
3428	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3429	ioarcb->write_data_transfer_length = 0;
3430	ioarcb->read_data_transfer_length = 0;
3431	ioarcb->write_ioadl_len = 0;
3432	ioarcb->read_ioadl_len = 0;
3433	ioasa->ioasc = 0;
3434	ioasa->residual_data_len = 0;
3435}
3436
3437/**
3438 * ipr_erp_request_sense - Send request sense to a device
3439 * @ipr_cmd:	ipr command struct
3440 *
3441 * This function sends a request sense to a device as a result
3442 * of a check condition.
3443 *
3444 * Return value:
3445 * 	nothing
3446 **/
3447static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3448{
3449	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3450	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3451
3452	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3453		ipr_erp_done(ipr_cmd);
3454		return;
3455	}
3456
3457	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3458
3459	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3460	cmd_pkt->cdb[0] = REQUEST_SENSE;
3461	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3462	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3463	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3464	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3465
3466	ipr_cmd->ioadl[0].flags_and_data_len =
3467		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3468	ipr_cmd->ioadl[0].address =
3469		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3470
3471	ipr_cmd->ioarcb.read_ioadl_len =
3472		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3473	ipr_cmd->ioarcb.read_data_transfer_length =
3474		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3475
3476	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3477		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3478}
3479
3480/**
3481 * ipr_erp_cancel_all - Send cancel all to a device
3482 * @ipr_cmd:	ipr command struct
3483 *
3484 * This function sends a cancel all to a device to clear the
3485 * queue. If we are running TCQ on the device, QERR is set to 1,
3486 * which means all outstanding ops have been dropped on the floor.
3487 * Cancel all will return them to us.
3488 *
3489 * Return value:
3490 * 	nothing
3491 **/
3492static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3493{
3494	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3495	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3496	struct ipr_cmd_pkt *cmd_pkt;
3497
3498	res->in_erp = 1;
3499
3500	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3501
3502	if (!scsi_get_tag_type(scsi_cmd->device)) {
3503		ipr_erp_request_sense(ipr_cmd);
3504		return;
3505	}
3506
3507	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3508	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3509	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3510
3511	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3512		   IPR_CANCEL_ALL_TIMEOUT);
3513}
3514
3515/**
3516 * ipr_dump_ioasa - Dump contents of IOASA
3517 * @ioa_cfg:	ioa config struct
3518 * @ipr_cmd:	ipr command struct
3519 *
3520 * This function is invoked by the interrupt handler when ops
3521 * fail. It will log the IOASA if appropriate. Only called
3522 * for GPDD ops.
3523 *
3524 * Return value:
3525 * 	none
3526 **/
3527static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3528			   struct ipr_cmnd *ipr_cmd)
3529{
3530	int i;
3531	u16 data_len;
3532	u32 ioasc;
3533	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3534	__be32 *ioasa_data = (__be32 *)ioasa;
3535	int error_index;
3536
3537	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3538
3539	if (0 == ioasc)
3540		return;
3541
3542	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3543		return;
3544
3545	error_index = ipr_get_error(ioasc);
3546
3547	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3548		/* Don't log an error if the IOA already logged one */
3549		if (ioasa->ilid != 0)
3550			return;
3551
3552		if (ipr_error_table[error_index].log_ioasa == 0)
3553			return;
3554	}
3555
3556	ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3557		     ipr_error_table[error_index].error);
3558
3559	if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3560	    (ioasa->u.gpdd.bus_phase <=  ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3561		ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3562			     "Device End state: %s Phase: %s\n",
3563			     ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3564			     ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3565	}
3566
3567	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3568		data_len = sizeof(struct ipr_ioasa);
3569	else
3570		data_len = be16_to_cpu(ioasa->ret_stat_len);
3571
3572	ipr_err("IOASA Dump:\n");
3573
3574	for (i = 0; i < data_len / 4; i += 4) {
3575		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3576			be32_to_cpu(ioasa_data[i]),
3577			be32_to_cpu(ioasa_data[i+1]),
3578			be32_to_cpu(ioasa_data[i+2]),
3579			be32_to_cpu(ioasa_data[i+3]));
3580	}
3581}
3582
3583/**
3584 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3585 * @ioasa:		IOASA
3586 * @sense_buf:	sense data buffer
3587 *
3588 * Return value:
3589 * 	none
3590 **/
3591static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3592{
3593	u32 failing_lba;
3594	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3595	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3596	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3597	u32 ioasc = be32_to_cpu(ioasa->ioasc);
3598
3599	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3600
3601	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3602		return;
3603
3604	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3605
3606	if (ipr_is_vset_device(res) &&
3607	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3608	    ioasa->u.vset.failing_lba_hi != 0) {
3609		sense_buf[0] = 0x72;
3610		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3611		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3612		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3613
3614		sense_buf[7] = 12;
3615		sense_buf[8] = 0;
3616		sense_buf[9] = 0x0A;
3617		sense_buf[10] = 0x80;
3618
3619		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3620
3621		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3622		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3623		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3624		sense_buf[15] = failing_lba & 0x000000ff;
3625
3626		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3627
3628		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3629		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3630		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3631		sense_buf[19] = failing_lba & 0x000000ff;
3632	} else {
3633		sense_buf[0] = 0x70;
3634		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3635		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3636		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3637
3638		/* Illegal request */
3639		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3640		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3641			sense_buf[7] = 10;	/* additional length */
3642
3643			/* IOARCB was in error */
3644			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3645				sense_buf[15] = 0xC0;
3646			else	/* Parameter data was invalid */
3647				sense_buf[15] = 0x80;
3648
3649			sense_buf[16] =
3650			    ((IPR_FIELD_POINTER_MASK &
3651			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3652			sense_buf[17] =
3653			    (IPR_FIELD_POINTER_MASK &
3654			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3655		} else {
3656			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3657				if (ipr_is_vset_device(res))
3658					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3659				else
3660					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3661
3662				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
3663				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3664				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3665				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3666				sense_buf[6] = failing_lba & 0x000000ff;
3667			}
3668
3669			sense_buf[7] = 6;	/* additional length */
3670		}
3671	}
3672}
3673
3674/**
3675 * ipr_erp_start - Process an error response for a SCSI op
3676 * @ioa_cfg:	ioa config struct
3677 * @ipr_cmd:	ipr command struct
3678 *
3679 * This function determines whether or not to initiate ERP
3680 * on the affected device.
3681 *
3682 * Return value:
3683 * 	nothing
3684 **/
3685static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3686			      struct ipr_cmnd *ipr_cmd)
3687{
3688	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3689	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3690	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3691
3692	if (!res) {
3693		ipr_scsi_eh_done(ipr_cmd);
3694		return;
3695	}
3696
3697	if (ipr_is_gscsi(res))
3698		ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3699	else
3700		ipr_gen_sense(ipr_cmd);
3701
3702	switch (ioasc & IPR_IOASC_IOASC_MASK) {
3703	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3704		scsi_cmd->result |= (DID_IMM_RETRY << 16);
3705		break;
3706	case IPR_IOASC_IR_RESOURCE_HANDLE:
3707		scsi_cmd->result |= (DID_NO_CONNECT << 16);
3708		break;
3709	case IPR_IOASC_HW_SEL_TIMEOUT:
3710		scsi_cmd->result |= (DID_NO_CONNECT << 16);
3711		res->needs_sync_complete = 1;
3712		break;
3713	case IPR_IOASC_SYNC_REQUIRED:
3714		if (!res->in_erp)
3715			res->needs_sync_complete = 1;
3716		scsi_cmd->result |= (DID_IMM_RETRY << 16);
3717		break;
3718	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3719		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3720		break;
3721	case IPR_IOASC_BUS_WAS_RESET:
3722	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3723		/*
3724		 * Report the bus reset and ask for a retry. The device
3725		 * will give CC/UA the next command.
3726		 */
3727		if (!res->resetting_device)
3728			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3729		scsi_cmd->result |= (DID_ERROR << 16);
3730		res->needs_sync_complete = 1;
3731		break;
3732	case IPR_IOASC_HW_DEV_BUS_STATUS:
3733		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3734		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3735			ipr_erp_cancel_all(ipr_cmd);
3736			return;
3737		}
3738		res->needs_sync_complete = 1;
3739		break;
3740	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3741		break;
3742	default:
3743		scsi_cmd->result |= (DID_ERROR << 16);
3744		if (!ipr_is_vset_device(res))
3745			res->needs_sync_complete = 1;
3746		break;
3747	}
3748
3749	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3750	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3751	scsi_cmd->scsi_done(scsi_cmd);
3752}
3753
3754/**
3755 * ipr_scsi_done - mid-layer done function
3756 * @ipr_cmd:	ipr command struct
3757 *
3758 * This function is invoked by the interrupt handler for
3759 * ops generated by the SCSI mid-layer
3760 *
3761 * Return value:
3762 * 	none
3763 **/
3764static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3765{
3766	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3767	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3768	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3769
3770	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3771
3772	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3773		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3774		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3775		scsi_cmd->scsi_done(scsi_cmd);
3776	} else
3777		ipr_erp_start(ioa_cfg, ipr_cmd);
3778}
3779
3780/**
3781 * ipr_save_ioafp_mode_select - Save adapters mode select data
3782 * @ioa_cfg:	ioa config struct
3783 * @scsi_cmd:	scsi command struct
3784 *
3785 * This function saves mode select data for the adapter to
3786 * use following an adapter reset.
3787 *
3788 * Return value:
3789 *	0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3790 **/
3791static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3792				       struct scsi_cmnd *scsi_cmd)
3793{
3794	if (!ioa_cfg->saved_mode_pages) {
3795		ioa_cfg->saved_mode_pages  = kmalloc(sizeof(struct ipr_mode_pages),
3796						     GFP_ATOMIC);
3797		if (!ioa_cfg->saved_mode_pages) {
3798			dev_err(&ioa_cfg->pdev->dev,
3799				"IOA mode select buffer allocation failed\n");
3800			return SCSI_MLQUEUE_HOST_BUSY;
3801		}
3802	}
3803
3804	memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3805	ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3806	return 0;
3807}
3808
3809/**
3810 * ipr_queuecommand - Queue a mid-layer request
3811 * @scsi_cmd:	scsi command struct
3812 * @done:		done function
3813 *
3814 * This function queues a request generated by the mid-layer.
3815 *
3816 * Return value:
3817 *	0 on success
3818 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3819 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
3820 **/
3821static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
3822			    void (*done) (struct scsi_cmnd *))
3823{
3824	struct ipr_ioa_cfg *ioa_cfg;
3825	struct ipr_resource_entry *res;
3826	struct ipr_ioarcb *ioarcb;
3827	struct ipr_cmnd *ipr_cmd;
3828	int rc = 0;
3829
3830	scsi_cmd->scsi_done = done;
3831	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3832	res = scsi_cmd->device->hostdata;
3833	scsi_cmd->result = (DID_OK << 16);
3834
3835	/*
3836	 * We are currently blocking all devices due to a host reset
3837	 * We have told the host to stop giving us new requests, but
3838	 * ERP ops don't count. FIXME
3839	 */
3840	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
3841		return SCSI_MLQUEUE_HOST_BUSY;
3842
3843	/*
3844	 * FIXME - Create scsi_set_host_offline interface
3845	 *  and the ioa_is_dead check can be removed
3846	 */
3847	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
3848		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3849		scsi_cmd->result = (DID_NO_CONNECT << 16);
3850		scsi_cmd->scsi_done(scsi_cmd);
3851		return 0;
3852	}
3853
3854	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3855	ioarcb = &ipr_cmd->ioarcb;
3856	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
3857
3858	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3859	ipr_cmd->scsi_cmd = scsi_cmd;
3860	ioarcb->res_handle = res->cfgte.res_handle;
3861	ipr_cmd->done = ipr_scsi_done;
3862	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
3863
3864	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
3865		if (scsi_cmd->underflow == 0)
3866			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3867
3868		if (res->needs_sync_complete) {
3869			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
3870			res->needs_sync_complete = 0;
3871		}
3872
3873		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
3874		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
3875		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
3876		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
3877	}
3878
3879	if (scsi_cmd->cmnd[0] >= 0xC0 &&
3880	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
3881		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
3882
3883	if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
3884		rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
3885
3886	if (likely(rc == 0))
3887		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
3888
3889	if (likely(rc == 0)) {
3890		mb();
3891		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
3892		       ioa_cfg->regs.ioarrin_reg);
3893	} else {
3894		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3895		 return SCSI_MLQUEUE_HOST_BUSY;
3896	}
3897
3898	return 0;
3899}
3900
3901/**
3902 * ipr_info - Get information about the card/driver
3903 * @scsi_host:	scsi host struct
3904 *
3905 * Return value:
3906 * 	pointer to buffer with description string
3907 **/
3908static const char * ipr_ioa_info(struct Scsi_Host *host)
3909{
3910	static char buffer[512];
3911	struct ipr_ioa_cfg *ioa_cfg;
3912	unsigned long lock_flags = 0;
3913
3914	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
3915
3916	spin_lock_irqsave(host->host_lock, lock_flags);
3917	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
3918	spin_unlock_irqrestore(host->host_lock, lock_flags);
3919
3920	return buffer;
3921}
3922
3923static struct scsi_host_template driver_template = {
3924	.module = THIS_MODULE,
3925	.name = "IPR",
3926	.info = ipr_ioa_info,
3927	.queuecommand = ipr_queuecommand,
3928	.eh_abort_handler = ipr_eh_abort,
3929	.eh_device_reset_handler = ipr_eh_dev_reset,
3930	.eh_host_reset_handler = ipr_eh_host_reset,
3931	.slave_alloc = ipr_slave_alloc,
3932	.slave_configure = ipr_slave_configure,
3933	.slave_destroy = ipr_slave_destroy,
3934	.change_queue_depth = ipr_change_queue_depth,
3935	.change_queue_type = ipr_change_queue_type,
3936	.bios_param = ipr_biosparam,
3937	.can_queue = IPR_MAX_COMMANDS,
3938	.this_id = -1,
3939	.sg_tablesize = IPR_MAX_SGLIST,
3940	.max_sectors = IPR_IOA_MAX_SECTORS,
3941	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
3942	.use_clustering = ENABLE_CLUSTERING,
3943	.shost_attrs = ipr_ioa_attrs,
3944	.sdev_attrs = ipr_dev_attrs,
3945	.proc_name = IPR_NAME
3946};
3947
3948#ifdef CONFIG_PPC_PSERIES
3949static const u16 ipr_blocked_processors[] = {
3950	PV_NORTHSTAR,
3951	PV_PULSAR,
3952	PV_POWER4,
3953	PV_ICESTAR,
3954	PV_SSTAR,
3955	PV_POWER4p,
3956	PV_630,
3957	PV_630p
3958};
3959
3960/**
3961 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
3962 * @ioa_cfg:	ioa cfg struct
3963 *
3964 * Adapters that use Gemstone revision < 3.1 do not work reliably on
3965 * certain pSeries hardware. This function determines if the given
3966 * adapter is in one of these confgurations or not.
3967 *
3968 * Return value:
3969 * 	1 if adapter is not supported / 0 if adapter is supported
3970 **/
3971static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
3972{
3973	u8 rev_id;
3974	int i;
3975
3976	if (ioa_cfg->type == 0x5702) {
3977		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
3978					 &rev_id) == PCIBIOS_SUCCESSFUL) {
3979			if (rev_id < 4) {
3980				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
3981					if (__is_processor(ipr_blocked_processors[i]))
3982						return 1;
3983				}
3984			}
3985		}
3986	}
3987	return 0;
3988}
3989#else
3990#define ipr_invalid_adapter(ioa_cfg) 0
3991#endif
3992
3993/**
3994 * ipr_ioa_bringdown_done - IOA bring down completion.
3995 * @ipr_cmd:	ipr command struct
3996 *
3997 * This function processes the completion of an adapter bring down.
3998 * It wakes any reset sleepers.
3999 *
4000 * Return value:
4001 * 	IPR_RC_JOB_RETURN
4002 **/
4003static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4004{
4005	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4006
4007	ENTER;
4008	ioa_cfg->in_reset_reload = 0;
4009	ioa_cfg->reset_retries = 0;
4010	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4011	wake_up_all(&ioa_cfg->reset_wait_q);
4012
4013	spin_unlock_irq(ioa_cfg->host->host_lock);
4014	scsi_unblock_requests(ioa_cfg->host);
4015	spin_lock_irq(ioa_cfg->host->host_lock);
4016	LEAVE;
4017
4018	return IPR_RC_JOB_RETURN;
4019}
4020
4021/**
4022 * ipr_ioa_reset_done - IOA reset completion.
4023 * @ipr_cmd:	ipr command struct
4024 *
4025 * This function processes the completion of an adapter reset.
4026 * It schedules any necessary mid-layer add/removes and
4027 * wakes any reset sleepers.
4028 *
4029 * Return value:
4030 * 	IPR_RC_JOB_RETURN
4031 **/
4032static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4033{
4034	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4035	struct ipr_resource_entry *res;
4036	struct ipr_hostrcb *hostrcb, *temp;
4037	int i = 0;
4038
4039	ENTER;
4040	ioa_cfg->in_reset_reload = 0;
4041	ioa_cfg->allow_cmds = 1;
4042	ioa_cfg->reset_cmd = NULL;
4043
4044	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4045		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4046			ipr_trace;
4047			break;
4048		}
4049	}
4050	schedule_work(&ioa_cfg->work_q);
4051
4052	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4053		list_del(&hostrcb->queue);
4054		if (i++ < IPR_NUM_LOG_HCAMS)
4055			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4056		else
4057			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4058	}
4059
4060	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4061
4062	ioa_cfg->reset_retries = 0;
4063	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4064	wake_up_all(&ioa_cfg->reset_wait_q);
4065
4066	spin_unlock_irq(ioa_cfg->host->host_lock);
4067	scsi_unblock_requests(ioa_cfg->host);
4068	spin_lock_irq(ioa_cfg->host->host_lock);
4069
4070	if (!ioa_cfg->allow_cmds)
4071		scsi_block_requests(ioa_cfg->host);
4072
4073	LEAVE;
4074	return IPR_RC_JOB_RETURN;
4075}
4076
4077/**
4078 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4079 * @supported_dev:	supported device struct
4080 * @vpids:			vendor product id struct
4081 *
4082 * Return value:
4083 * 	none
4084 **/
4085static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4086				 struct ipr_std_inq_vpids *vpids)
4087{
4088	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4089	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4090	supported_dev->num_records = 1;
4091	supported_dev->data_length =
4092		cpu_to_be16(sizeof(struct ipr_supported_device));
4093	supported_dev->reserved = 0;
4094}
4095
4096/**
4097 * ipr_set_supported_devs - Send Set Supported Devices for a device
4098 * @ipr_cmd:	ipr command struct
4099 *
4100 * This function send a Set Supported Devices to the adapter
4101 *
4102 * Return value:
4103 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4104 **/
4105static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4106{
4107	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4108	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4109	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4110	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4111	struct ipr_resource_entry *res = ipr_cmd->u.res;
4112
4113	ipr_cmd->job_step = ipr_ioa_reset_done;
4114
4115	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4116		if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
4117			continue;
4118
4119		ipr_cmd->u.res = res;
4120		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4121
4122		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4123		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4124		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4125
4126		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4127		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4128		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4129
4130		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4131							sizeof(struct ipr_supported_device));
4132		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4133					     offsetof(struct ipr_misc_cbs, supp_dev));
4134		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4135		ioarcb->write_data_transfer_length =
4136			cpu_to_be32(sizeof(struct ipr_supported_device));
4137
4138		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4139			   IPR_SET_SUP_DEVICE_TIMEOUT);
4140
4141		ipr_cmd->job_step = ipr_set_supported_devs;
4142		return IPR_RC_JOB_RETURN;
4143	}
4144
4145	return IPR_RC_JOB_CONTINUE;
4146}
4147
4148/**
4149 * ipr_get_mode_page - Locate specified mode page
4150 * @mode_pages:	mode page buffer
4151 * @page_code:	page code to find
4152 * @len:		minimum required length for mode page
4153 *
4154 * Return value:
4155 * 	pointer to mode page / NULL on failure
4156 **/
4157static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4158			       u32 page_code, u32 len)
4159{
4160	struct ipr_mode_page_hdr *mode_hdr;
4161	u32 page_length;
4162	u32 length;
4163
4164	if (!mode_pages || (mode_pages->hdr.length == 0))
4165		return NULL;
4166
4167	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4168	mode_hdr = (struct ipr_mode_page_hdr *)
4169		(mode_pages->data + mode_pages->hdr.block_desc_len);
4170
4171	while (length) {
4172		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4173			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4174				return mode_hdr;
4175			break;
4176		} else {
4177			page_length = (sizeof(struct ipr_mode_page_hdr) +
4178				       mode_hdr->page_length);
4179			length -= page_length;
4180			mode_hdr = (struct ipr_mode_page_hdr *)
4181				((unsigned long)mode_hdr + page_length);
4182		}
4183	}
4184	return NULL;
4185}
4186
4187/**
4188 * ipr_check_term_power - Check for term power errors
4189 * @ioa_cfg:	ioa config struct
4190 * @mode_pages:	IOAFP mode pages buffer
4191 *
4192 * Check the IOAFP's mode page 28 for term power errors
4193 *
4194 * Return value:
4195 * 	nothing
4196 **/
4197static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4198				 struct ipr_mode_pages *mode_pages)
4199{
4200	int i;
4201	int entry_length;
4202	struct ipr_dev_bus_entry *bus;
4203	struct ipr_mode_page28 *mode_page;
4204
4205	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4206				      sizeof(struct ipr_mode_page28));
4207
4208	entry_length = mode_page->entry_length;
4209
4210	bus = mode_page->bus;
4211
4212	for (i = 0; i < mode_page->num_entries; i++) {
4213		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4214			dev_err(&ioa_cfg->pdev->dev,
4215				"Term power is absent on scsi bus %d\n",
4216				bus->res_addr.bus);
4217		}
4218
4219		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4220	}
4221}
4222
4223/**
4224 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4225 * @ioa_cfg:	ioa config struct
4226 *
4227 * Looks through the config table checking for SES devices. If
4228 * the SES device is in the SES table indicating a maximum SCSI
4229 * bus speed, the speed is limited for the bus.
4230 *
4231 * Return value:
4232 * 	none
4233 **/
4234static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4235{
4236	u32 max_xfer_rate;
4237	int i;
4238
4239	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4240		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4241						       ioa_cfg->bus_attr[i].bus_width);
4242
4243		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4244			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4245	}
4246}
4247
4248/**
4249 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4250 * @ioa_cfg:	ioa config struct
4251 * @mode_pages:	mode page 28 buffer
4252 *
4253 * Updates mode page 28 based on driver configuration
4254 *
4255 * Return value:
4256 * 	none
4257 **/
4258static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4259					  	struct ipr_mode_pages *mode_pages)
4260{
4261	int i, entry_length;
4262	struct ipr_dev_bus_entry *bus;
4263	struct ipr_bus_attributes *bus_attr;
4264	struct ipr_mode_page28 *mode_page;
4265
4266	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4267				      sizeof(struct ipr_mode_page28));
4268
4269	entry_length = mode_page->entry_length;
4270
4271	/* Loop for each device bus entry */
4272	for (i = 0, bus = mode_page->bus;
4273	     i < mode_page->num_entries;
4274	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4275		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4276			dev_err(&ioa_cfg->pdev->dev,
4277				"Invalid resource address reported: 0x%08X\n",
4278				IPR_GET_PHYS_LOC(bus->res_addr));
4279			continue;
4280		}
4281
4282		bus_attr = &ioa_cfg->bus_attr[i];
4283		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4284		bus->bus_width = bus_attr->bus_width;
4285		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4286		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4287		if (bus_attr->qas_enabled)
4288			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4289		else
4290			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4291	}
4292}
4293
4294/**
4295 * ipr_build_mode_select - Build a mode select command
4296 * @ipr_cmd:	ipr command struct
4297 * @res_handle:	resource handle to send command to
4298 * @parm:		Byte 2 of Mode Sense command
4299 * @dma_addr:	DMA buffer address
4300 * @xfer_len:	data transfer length
4301 *
4302 * Return value:
4303 * 	none
4304 **/
4305static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4306				  __be32 res_handle, u8 parm, u32 dma_addr,
4307				  u8 xfer_len)
4308{
4309	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4310	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4311
4312	ioarcb->res_handle = res_handle;
4313	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4314	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4315	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4316	ioarcb->cmd_pkt.cdb[1] = parm;
4317	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4318
4319	ioadl->flags_and_data_len =
4320		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4321	ioadl->address = cpu_to_be32(dma_addr);
4322	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4323	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4324}
4325
4326/**
4327 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4328 * @ipr_cmd:	ipr command struct
4329 *
4330 * This function sets up the SCSI bus attributes and sends
4331 * a Mode Select for Page 28 to activate them.
4332 *
4333 * Return value:
4334 * 	IPR_RC_JOB_RETURN
4335 **/
4336static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4337{
4338	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4339	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4340	int length;
4341
4342	ENTER;
4343	if (ioa_cfg->saved_mode_pages) {
4344		memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4345		       ioa_cfg->saved_mode_page_len);
4346		length = ioa_cfg->saved_mode_page_len;
4347	} else {
4348		ipr_scsi_bus_speed_limit(ioa_cfg);
4349		ipr_check_term_power(ioa_cfg, mode_pages);
4350		ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4351		length = mode_pages->hdr.length + 1;
4352		mode_pages->hdr.length = 0;
4353	}
4354
4355	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4356			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4357			      length);
4358
4359	ipr_cmd->job_step = ipr_set_supported_devs;
4360	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4361				    struct ipr_resource_entry, queue);
4362
4363	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4364
4365	LEAVE;
4366	return IPR_RC_JOB_RETURN;
4367}
4368
4369/**
4370 * ipr_build_mode_sense - Builds a mode sense command
4371 * @ipr_cmd:	ipr command struct
4372 * @res:		resource entry struct
4373 * @parm:		Byte 2 of mode sense command
4374 * @dma_addr:	DMA address of mode sense buffer
4375 * @xfer_len:	Size of DMA buffer
4376 *
4377 * Return value:
4378 * 	none
4379 **/
4380static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4381				 __be32 res_handle,
4382				 u8 parm, u32 dma_addr, u8 xfer_len)
4383{
4384	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4385	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4386
4387	ioarcb->res_handle = res_handle;
4388	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4389	ioarcb->cmd_pkt.cdb[2] = parm;
4390	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4391	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4392
4393	ioadl->flags_and_data_len =
4394		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4395	ioadl->address = cpu_to_be32(dma_addr);
4396	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4397	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4398}
4399
4400/**
4401 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4402 * @ipr_cmd:	ipr command struct
4403 *
4404 * This function send a Page 28 mode sense to the IOA to
4405 * retrieve SCSI bus attributes.
4406 *
4407 * Return value:
4408 * 	IPR_RC_JOB_RETURN
4409 **/
4410static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4411{
4412	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4413
4414	ENTER;
4415	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4416			     0x28, ioa_cfg->vpd_cbs_dma +
4417			     offsetof(struct ipr_misc_cbs, mode_pages),
4418			     sizeof(struct ipr_mode_pages));
4419
4420	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4421
4422	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4423
4424	LEAVE;
4425	return IPR_RC_JOB_RETURN;
4426}
4427
4428/**
4429 * ipr_init_res_table - Initialize the resource table
4430 * @ipr_cmd:	ipr command struct
4431 *
4432 * This function looks through the existing resource table, comparing
4433 * it with the config table. This function will take care of old/new
4434 * devices and schedule adding/removing them from the mid-layer
4435 * as appropriate.
4436 *
4437 * Return value:
4438 * 	IPR_RC_JOB_CONTINUE
4439 **/
4440static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4441{
4442	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4443	struct ipr_resource_entry *res, *temp;
4444	struct ipr_config_table_entry *cfgte;
4445	int found, i;
4446	LIST_HEAD(old_res);
4447
4448	ENTER;
4449	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4450		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4451
4452	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4453		list_move_tail(&res->queue, &old_res);
4454
4455	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4456		cfgte = &ioa_cfg->cfg_table->dev[i];
4457		found = 0;
4458
4459		list_for_each_entry_safe(res, temp, &old_res, queue) {
4460			if (!memcmp(&res->cfgte.res_addr,
4461				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4462				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4463				found = 1;
4464				break;
4465			}
4466		}
4467
4468		if (!found) {
4469			if (list_empty(&ioa_cfg->free_res_q)) {
4470				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4471				break;
4472			}
4473
4474			found = 1;
4475			res = list_entry(ioa_cfg->free_res_q.next,
4476					 struct ipr_resource_entry, queue);
4477			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4478			ipr_init_res_entry(res);
4479			res->add_to_ml = 1;
4480		}
4481
4482		if (found)
4483			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4484	}
4485
4486	list_for_each_entry_safe(res, temp, &old_res, queue) {
4487		if (res->sdev) {
4488			res->del_from_ml = 1;
4489			res->sdev->hostdata = NULL;
4490			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4491		} else {
4492			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4493		}
4494	}
4495
4496	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4497
4498	LEAVE;
4499	return IPR_RC_JOB_CONTINUE;
4500}
4501
4502/**
4503 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4504 * @ipr_cmd:	ipr command struct
4505 *
4506 * This function sends a Query IOA Configuration command
4507 * to the adapter to retrieve the IOA configuration table.
4508 *
4509 * Return value:
4510 * 	IPR_RC_JOB_RETURN
4511 **/
4512static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4513{
4514	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4515	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4516	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4517	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4518
4519	ENTER;
4520	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4521		 ucode_vpd->major_release, ucode_vpd->card_type,
4522		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4523	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4524	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4525
4526	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4527	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4528	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4529
4530	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4531	ioarcb->read_data_transfer_length =
4532		cpu_to_be32(sizeof(struct ipr_config_table));
4533
4534	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4535	ioadl->flags_and_data_len =
4536		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4537
4538	ipr_cmd->job_step = ipr_init_res_table;
4539
4540	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4541
4542	LEAVE;
4543	return IPR_RC_JOB_RETURN;
4544}
4545
4546/**
4547 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4548 * @ipr_cmd:	ipr command struct
4549 *
4550 * This utility function sends an inquiry to the adapter.
4551 *
4552 * Return value:
4553 * 	none
4554 **/
4555static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4556			      u32 dma_addr, u8 xfer_len)
4557{
4558	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4559	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4560
4561	ENTER;
4562	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4563	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4564
4565	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4566	ioarcb->cmd_pkt.cdb[1] = flags;
4567	ioarcb->cmd_pkt.cdb[2] = page;
4568	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4569
4570	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4571	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4572
4573	ioadl->address = cpu_to_be32(dma_addr);
4574	ioadl->flags_and_data_len =
4575		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4576
4577	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4578	LEAVE;
4579}
4580
4581/**
4582 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4583 * @ipr_cmd:	ipr command struct
4584 *
4585 * This function sends a Page 3 inquiry to the adapter
4586 * to retrieve software VPD information.
4587 *
4588 * Return value:
4589 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4590 **/
4591static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4592{
4593	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4594	char type[5];
4595
4596	ENTER;
4597
4598	/* Grab the type out of the VPD and store it away */
4599	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4600	type[4] = '\0';
4601	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4602
4603	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4604
4605	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4606			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4607			  sizeof(struct ipr_inquiry_page3));
4608
4609	LEAVE;
4610	return IPR_RC_JOB_RETURN;
4611}
4612
4613/**
4614 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4615 * @ipr_cmd:	ipr command struct
4616 *
4617 * This function sends a standard inquiry to the adapter.
4618 *
4619 * Return value:
4620 * 	IPR_RC_JOB_RETURN
4621 **/
4622static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4623{
4624	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4625
4626	ENTER;
4627	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4628
4629	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4630			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4631			  sizeof(struct ipr_ioa_vpd));
4632
4633	LEAVE;
4634	return IPR_RC_JOB_RETURN;
4635}
4636
4637/**
4638 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4639 * @ipr_cmd:	ipr command struct
4640 *
4641 * This function send an Identify Host Request Response Queue
4642 * command to establish the HRRQ with the adapter.
4643 *
4644 * Return value:
4645 * 	IPR_RC_JOB_RETURN
4646 **/
4647static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4648{
4649	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4650	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4651
4652	ENTER;
4653	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4654
4655	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4656	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4657
4658	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4659	ioarcb->cmd_pkt.cdb[2] =
4660		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4661	ioarcb->cmd_pkt.cdb[3] =
4662		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4663	ioarcb->cmd_pkt.cdb[4] =
4664		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4665	ioarcb->cmd_pkt.cdb[5] =
4666		((u32) ioa_cfg->host_rrq_dma) & 0xff;
4667	ioarcb->cmd_pkt.cdb[7] =
4668		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4669	ioarcb->cmd_pkt.cdb[8] =
4670		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4671
4672	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4673
4674	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4675
4676	LEAVE;
4677	return IPR_RC_JOB_RETURN;
4678}
4679
4680/**
4681 * ipr_reset_timer_done - Adapter reset timer function
4682 * @ipr_cmd:	ipr command struct
4683 *
4684 * Description: This function is used in adapter reset processing
4685 * for timing events. If the reset_cmd pointer in the IOA
4686 * config struct is not this adapter's we are doing nested
4687 * resets and fail_all_ops will take care of freeing the
4688 * command block.
4689 *
4690 * Return value:
4691 * 	none
4692 **/
4693static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4694{
4695	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4696	unsigned long lock_flags = 0;
4697
4698	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4699
4700	if (ioa_cfg->reset_cmd == ipr_cmd) {
4701		list_del(&ipr_cmd->queue);
4702		ipr_cmd->done(ipr_cmd);
4703	}
4704
4705	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4706}
4707
4708/**
4709 * ipr_reset_start_timer - Start a timer for adapter reset job
4710 * @ipr_cmd:	ipr command struct
4711 * @timeout:	timeout value
4712 *
4713 * Description: This function is used in adapter reset processing
4714 * for timing events. If the reset_cmd pointer in the IOA
4715 * config struct is not this adapter's we are doing nested
4716 * resets and fail_all_ops will take care of freeing the
4717 * command block.
4718 *
4719 * Return value:
4720 * 	none
4721 **/
4722static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4723				  unsigned long timeout)
4724{
4725	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4726	ipr_cmd->done = ipr_reset_ioa_job;
4727
4728	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4729	ipr_cmd->timer.expires = jiffies + timeout;
4730	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4731	add_timer(&ipr_cmd->timer);
4732}
4733
4734/**
4735 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4736 * @ioa_cfg:	ioa cfg struct
4737 *
4738 * Return value:
4739 * 	nothing
4740 **/
4741static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
4742{
4743	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
4744
4745	/* Initialize Host RRQ pointers */
4746	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
4747	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
4748	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4749	ioa_cfg->toggle_bit = 1;
4750
4751	/* Zero out config table */
4752	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
4753}
4754
4755/**
4756 * ipr_reset_enable_ioa - Enable the IOA following a reset.
4757 * @ipr_cmd:	ipr command struct
4758 *
4759 * This function reinitializes some control blocks and
4760 * enables destructive diagnostics on the adapter.
4761 *
4762 * Return value:
4763 * 	IPR_RC_JOB_RETURN
4764 **/
4765static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
4766{
4767	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4768	volatile u32 int_reg;
4769
4770	ENTER;
4771	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
4772	ipr_init_ioa_mem(ioa_cfg);
4773
4774	ioa_cfg->allow_interrupts = 1;
4775	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4776
4777	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4778		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
4779		       ioa_cfg->regs.clr_interrupt_mask_reg);
4780		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4781		return IPR_RC_JOB_CONTINUE;
4782	}
4783
4784	/* Enable destructive diagnostics on IOA */
4785	writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
4786
4787	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
4788	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4789
4790	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
4791
4792	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4793	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
4794	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
4795	ipr_cmd->done = ipr_reset_ioa_job;
4796	add_timer(&ipr_cmd->timer);
4797	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4798
4799	LEAVE;
4800	return IPR_RC_JOB_RETURN;
4801}
4802
4803/**
4804 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
4805 * @ipr_cmd:	ipr command struct
4806 *
4807 * This function is invoked when an adapter dump has run out
4808 * of processing time.
4809 *
4810 * Return value:
4811 * 	IPR_RC_JOB_CONTINUE
4812 **/
4813static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
4814{
4815	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4816
4817	if (ioa_cfg->sdt_state == GET_DUMP)
4818		ioa_cfg->sdt_state = ABORT_DUMP;
4819
4820	ipr_cmd->job_step = ipr_reset_alert;
4821
4822	return IPR_RC_JOB_CONTINUE;
4823}
4824
4825/**
4826 * ipr_unit_check_no_data - Log a unit check/no data error log
4827 * @ioa_cfg:		ioa config struct
4828 *
4829 * Logs an error indicating the adapter unit checked, but for some
4830 * reason, we were unable to fetch the unit check buffer.
4831 *
4832 * Return value:
4833 * 	nothing
4834 **/
4835static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
4836{
4837	ioa_cfg->errors_logged++;
4838	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
4839}
4840
4841/**
4842 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
4843 * @ioa_cfg:		ioa config struct
4844 *
4845 * Fetches the unit check buffer from the adapter by clocking the data
4846 * through the mailbox register.
4847 *
4848 * Return value:
4849 * 	nothing
4850 **/
4851static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
4852{
4853	unsigned long mailbox;
4854	struct ipr_hostrcb *hostrcb;
4855	struct ipr_uc_sdt sdt;
4856	int rc, length;
4857
4858	mailbox = readl(ioa_cfg->ioa_mailbox);
4859
4860	if (!ipr_sdt_is_fmt2(mailbox)) {
4861		ipr_unit_check_no_data(ioa_cfg);
4862		return;
4863	}
4864
4865	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
4866	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
4867					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
4868
4869	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
4870	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
4871		ipr_unit_check_no_data(ioa_cfg);
4872		return;
4873	}
4874
4875	/* Find length of the first sdt entry (UC buffer) */
4876	length = (be32_to_cpu(sdt.entry[0].end_offset) -
4877		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
4878
4879	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
4880			     struct ipr_hostrcb, queue);
4881	list_del(&hostrcb->queue);
4882	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
4883
4884	rc = ipr_get_ldump_data_section(ioa_cfg,
4885					be32_to_cpu(sdt.entry[0].bar_str_offset),
4886					(__be32 *)&hostrcb->hcam,
4887					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
4888
4889	if (!rc)
4890		ipr_handle_log_data(ioa_cfg, hostrcb);
4891	else
4892		ipr_unit_check_no_data(ioa_cfg);
4893
4894	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4895}
4896
4897/**
4898 * ipr_reset_restore_cfg_space - Restore PCI config space.
4899 * @ipr_cmd:	ipr command struct
4900 *
4901 * Description: This function restores the saved PCI config space of
4902 * the adapter, fails all outstanding ops back to the callers, and
4903 * fetches the dump/unit check if applicable to this reset.
4904 *
4905 * Return value:
4906 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4907 **/
4908static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
4909{
4910	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4911	int rc;
4912
4913	ENTER;
4914	pci_unblock_user_cfg_access(ioa_cfg->pdev);
4915	rc = pci_restore_state(ioa_cfg->pdev);
4916
4917	if (rc != PCIBIOS_SUCCESSFUL) {
4918		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4919		return IPR_RC_JOB_CONTINUE;
4920	}
4921
4922	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
4923		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4924		return IPR_RC_JOB_CONTINUE;
4925	}
4926
4927	ipr_fail_all_ops(ioa_cfg);
4928
4929	if (ioa_cfg->ioa_unit_checked) {
4930		ioa_cfg->ioa_unit_checked = 0;
4931		ipr_get_unit_check_buffer(ioa_cfg);
4932		ipr_cmd->job_step = ipr_reset_alert;
4933		ipr_reset_start_timer(ipr_cmd, 0);
4934		return IPR_RC_JOB_RETURN;
4935	}
4936
4937	if (ioa_cfg->in_ioa_bringdown) {
4938		ipr_cmd->job_step = ipr_ioa_bringdown_done;
4939	} else {
4940		ipr_cmd->job_step = ipr_reset_enable_ioa;
4941
4942		if (GET_DUMP == ioa_cfg->sdt_state) {
4943			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
4944			ipr_cmd->job_step = ipr_reset_wait_for_dump;
4945			schedule_work(&ioa_cfg->work_q);
4946			return IPR_RC_JOB_RETURN;
4947		}
4948	}
4949
4950	ENTER;
4951	return IPR_RC_JOB_CONTINUE;
4952}
4953
4954/**
4955 * ipr_reset_start_bist - Run BIST on the adapter.
4956 * @ipr_cmd:	ipr command struct
4957 *
4958 * Description: This function runs BIST on the adapter, then delays 2 seconds.
4959 *
4960 * Return value:
4961 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4962 **/
4963static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
4964{
4965	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4966	int rc;
4967
4968	ENTER;
4969	pci_block_user_cfg_access(ioa_cfg->pdev);
4970	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
4971
4972	if (rc != PCIBIOS_SUCCESSFUL) {
4973		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
4974		rc = IPR_RC_JOB_CONTINUE;
4975	} else {
4976		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
4977		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
4978		rc = IPR_RC_JOB_RETURN;
4979	}
4980
4981	LEAVE;
4982	return rc;
4983}
4984
4985/**
4986 * ipr_reset_allowed - Query whether or not IOA can be reset
4987 * @ioa_cfg:	ioa config struct
4988 *
4989 * Return value:
4990 * 	0 if reset not allowed / non-zero if reset is allowed
4991 **/
4992static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
4993{
4994	volatile u32 temp_reg;
4995
4996	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4997	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
4998}
4999
5000/**
5001 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5002 * @ipr_cmd:	ipr command struct
5003 *
5004 * Description: This function waits for adapter permission to run BIST,
5005 * then runs BIST. If the adapter does not give permission after a
5006 * reasonable time, we will reset the adapter anyway. The impact of
5007 * resetting the adapter without warning the adapter is the risk of
5008 * losing the persistent error log on the adapter. If the adapter is
5009 * reset while it is writing to the flash on the adapter, the flash
5010 * segment will have bad ECC and be zeroed.
5011 *
5012 * Return value:
5013 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5014 **/
5015static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5016{
5017	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5018	int rc = IPR_RC_JOB_RETURN;
5019
5020	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5021		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5022		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5023	} else {
5024		ipr_cmd->job_step = ipr_reset_start_bist;
5025		rc = IPR_RC_JOB_CONTINUE;
5026	}
5027
5028	return rc;
5029}
5030
5031/**
5032 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5033 * @ipr_cmd:	ipr command struct
5034 *
5035 * Description: This function alerts the adapter that it will be reset.
5036 * If memory space is not currently enabled, proceed directly
5037 * to running BIST on the adapter. The timer must always be started
5038 * so we guarantee we do not run BIST from ipr_isr.
5039 *
5040 * Return value:
5041 * 	IPR_RC_JOB_RETURN
5042 **/
5043static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5044{
5045	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5046	u16 cmd_reg;
5047	int rc;
5048
5049	ENTER;
5050	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5051
5052	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5053		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5054		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5055		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5056	} else {
5057		ipr_cmd->job_step = ipr_reset_start_bist;
5058	}
5059
5060	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5061	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5062
5063	LEAVE;
5064	return IPR_RC_JOB_RETURN;
5065}
5066
5067/**
5068 * ipr_reset_ucode_download_done - Microcode download completion
5069 * @ipr_cmd:	ipr command struct
5070 *
5071 * Description: This function unmaps the microcode download buffer.
5072 *
5073 * Return value:
5074 * 	IPR_RC_JOB_CONTINUE
5075 **/
5076static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5077{
5078	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5079	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5080
5081	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5082		     sglist->num_sg, DMA_TO_DEVICE);
5083
5084	ipr_cmd->job_step = ipr_reset_alert;
5085	return IPR_RC_JOB_CONTINUE;
5086}
5087
5088/**
5089 * ipr_reset_ucode_download - Download microcode to the adapter
5090 * @ipr_cmd:	ipr command struct
5091 *
5092 * Description: This function checks to see if it there is microcode
5093 * to download to the adapter. If there is, a download is performed.
5094 *
5095 * Return value:
5096 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5097 **/
5098static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5099{
5100	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5101	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5102
5103	ENTER;
5104	ipr_cmd->job_step = ipr_reset_alert;
5105
5106	if (!sglist)
5107		return IPR_RC_JOB_CONTINUE;
5108
5109	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5110	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5111	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5112	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5113	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5114	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5115	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5116
5117	if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
5118		dev_err(&ioa_cfg->pdev->dev,
5119			"Failed to map microcode download buffer\n");
5120		return IPR_RC_JOB_CONTINUE;
5121	}
5122
5123	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5124
5125	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5126		   IPR_WRITE_BUFFER_TIMEOUT);
5127
5128	LEAVE;
5129	return IPR_RC_JOB_RETURN;
5130}
5131
5132/**
5133 * ipr_reset_shutdown_ioa - Shutdown the adapter
5134 * @ipr_cmd:	ipr command struct
5135 *
5136 * Description: This function issues an adapter shutdown of the
5137 * specified type to the specified adapter as part of the
5138 * adapter reset job.
5139 *
5140 * Return value:
5141 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5142 **/
5143static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5144{
5145	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5146	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5147	unsigned long timeout;
5148	int rc = IPR_RC_JOB_CONTINUE;
5149
5150	ENTER;
5151	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5152		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5153		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5154		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5155		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5156
5157		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5158			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5159		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5160			timeout = IPR_INTERNAL_TIMEOUT;
5161		else
5162			timeout = IPR_SHUTDOWN_TIMEOUT;
5163
5164		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5165
5166		rc = IPR_RC_JOB_RETURN;
5167		ipr_cmd->job_step = ipr_reset_ucode_download;
5168	} else
5169		ipr_cmd->job_step = ipr_reset_alert;
5170
5171	LEAVE;
5172	return rc;
5173}
5174
5175/**
5176 * ipr_reset_ioa_job - Adapter reset job
5177 * @ipr_cmd:	ipr command struct
5178 *
5179 * Description: This function is the job router for the adapter reset job.
5180 *
5181 * Return value:
5182 * 	none
5183 **/
5184static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5185{
5186	u32 rc, ioasc;
5187	unsigned long scratch = ipr_cmd->u.scratch;
5188	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5189
5190	do {
5191		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5192
5193		if (ioa_cfg->reset_cmd != ipr_cmd) {
5194			/*
5195			 * We are doing nested adapter resets and this is
5196			 * not the current reset job.
5197			 */
5198			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5199			return;
5200		}
5201
5202		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5203			dev_err(&ioa_cfg->pdev->dev,
5204				"0x%02X failed with IOASC: 0x%08X\n",
5205				ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5206
5207			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5208			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5209			return;
5210		}
5211
5212		ipr_reinit_ipr_cmnd(ipr_cmd);
5213		ipr_cmd->u.scratch = scratch;
5214		rc = ipr_cmd->job_step(ipr_cmd);
5215	} while(rc == IPR_RC_JOB_CONTINUE);
5216}
5217
5218/**
5219 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5220 * @ioa_cfg:		ioa config struct
5221 * @job_step:		first job step of reset job
5222 * @shutdown_type:	shutdown type
5223 *
5224 * Description: This function will initiate the reset of the given adapter
5225 * starting at the selected job step.
5226 * If the caller needs to wait on the completion of the reset,
5227 * the caller must sleep on the reset_wait_q.
5228 *
5229 * Return value:
5230 * 	none
5231 **/
5232static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5233				    int (*job_step) (struct ipr_cmnd *),
5234				    enum ipr_shutdown_type shutdown_type)
5235{
5236	struct ipr_cmnd *ipr_cmd;
5237
5238	ioa_cfg->in_reset_reload = 1;
5239	ioa_cfg->allow_cmds = 0;
5240	scsi_block_requests(ioa_cfg->host);
5241
5242	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5243	ioa_cfg->reset_cmd = ipr_cmd;
5244	ipr_cmd->job_step = job_step;
5245	ipr_cmd->u.shutdown_type = shutdown_type;
5246
5247	ipr_reset_ioa_job(ipr_cmd);
5248}
5249
5250/**
5251 * ipr_initiate_ioa_reset - Initiate an adapter reset
5252 * @ioa_cfg:		ioa config struct
5253 * @shutdown_type:	shutdown type
5254 *
5255 * Description: This function will initiate the reset of the given adapter.
5256 * If the caller needs to wait on the completion of the reset,
5257 * the caller must sleep on the reset_wait_q.
5258 *
5259 * Return value:
5260 * 	none
5261 **/
5262static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5263				   enum ipr_shutdown_type shutdown_type)
5264{
5265	if (ioa_cfg->ioa_is_dead)
5266		return;
5267
5268	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5269		ioa_cfg->sdt_state = ABORT_DUMP;
5270
5271	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5272		dev_err(&ioa_cfg->pdev->dev,
5273			"IOA taken offline - error recovery failed\n");
5274
5275		ioa_cfg->reset_retries = 0;
5276		ioa_cfg->ioa_is_dead = 1;
5277
5278		if (ioa_cfg->in_ioa_bringdown) {
5279			ioa_cfg->reset_cmd = NULL;
5280			ioa_cfg->in_reset_reload = 0;
5281			ipr_fail_all_ops(ioa_cfg);
5282			wake_up_all(&ioa_cfg->reset_wait_q);
5283
5284			spin_unlock_irq(ioa_cfg->host->host_lock);
5285			scsi_unblock_requests(ioa_cfg->host);
5286			spin_lock_irq(ioa_cfg->host->host_lock);
5287			return;
5288		} else {
5289			ioa_cfg->in_ioa_bringdown = 1;
5290			shutdown_type = IPR_SHUTDOWN_NONE;
5291		}
5292	}
5293
5294	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5295				shutdown_type);
5296}
5297
5298/**
5299 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5300 * @ioa_cfg:	ioa cfg struct
5301 *
5302 * Description: This is the second phase of adapter intialization
5303 * This function takes care of initilizing the adapter to the point
5304 * where it can accept new commands.
5305
5306 * Return value:
5307 * 	0 on sucess / -EIO on failure
5308 **/
5309static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5310{
5311	int rc = 0;
5312	unsigned long host_lock_flags = 0;
5313
5314	ENTER;
5315	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5316	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5317	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5318
5319	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5320	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5321	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5322
5323	if (ioa_cfg->ioa_is_dead) {
5324		rc = -EIO;
5325	} else if (ipr_invalid_adapter(ioa_cfg)) {
5326		if (!ipr_testmode)
5327			rc = -EIO;
5328
5329		dev_err(&ioa_cfg->pdev->dev,
5330			"Adapter not supported in this hardware configuration.\n");
5331	}
5332
5333	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5334
5335	LEAVE;
5336	return rc;
5337}
5338
5339/**
5340 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5341 * @ioa_cfg:	ioa config struct
5342 *
5343 * Return value:
5344 * 	none
5345 **/
5346static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5347{
5348	int i;
5349
5350	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5351		if (ioa_cfg->ipr_cmnd_list[i])
5352			pci_pool_free(ioa_cfg->ipr_cmd_pool,
5353				      ioa_cfg->ipr_cmnd_list[i],
5354				      ioa_cfg->ipr_cmnd_list_dma[i]);
5355
5356		ioa_cfg->ipr_cmnd_list[i] = NULL;
5357	}
5358
5359	if (ioa_cfg->ipr_cmd_pool)
5360		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5361
5362	ioa_cfg->ipr_cmd_pool = NULL;
5363}
5364
5365/**
5366 * ipr_free_mem - Frees memory allocated for an adapter
5367 * @ioa_cfg:	ioa cfg struct
5368 *
5369 * Return value:
5370 * 	nothing
5371 **/
5372static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5373{
5374	int i;
5375
5376	kfree(ioa_cfg->res_entries);
5377	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5378			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5379	ipr_free_cmd_blks(ioa_cfg);
5380	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5381			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5382	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5383			    ioa_cfg->cfg_table,
5384			    ioa_cfg->cfg_table_dma);
5385
5386	for (i = 0; i < IPR_NUM_HCAMS; i++) {
5387		pci_free_consistent(ioa_cfg->pdev,
5388				    sizeof(struct ipr_hostrcb),
5389				    ioa_cfg->hostrcb[i],
5390				    ioa_cfg->hostrcb_dma[i]);
5391	}
5392
5393	ipr_free_dump(ioa_cfg);
5394	kfree(ioa_cfg->saved_mode_pages);
5395	kfree(ioa_cfg->trace);
5396}
5397
5398/**
5399 * ipr_free_all_resources - Free all allocated resources for an adapter.
5400 * @ipr_cmd:	ipr command struct
5401 *
5402 * This function frees all allocated resources for the
5403 * specified adapter.
5404 *
5405 * Return value:
5406 * 	none
5407 **/
5408static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5409{
5410	struct pci_dev *pdev = ioa_cfg->pdev;
5411
5412	ENTER;
5413	free_irq(pdev->irq, ioa_cfg);
5414	iounmap(ioa_cfg->hdw_dma_regs);
5415	pci_release_regions(pdev);
5416	ipr_free_mem(ioa_cfg);
5417	scsi_host_put(ioa_cfg->host);
5418	pci_disable_device(pdev);
5419	LEAVE;
5420}
5421
5422/**
5423 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5424 * @ioa_cfg:	ioa config struct
5425 *
5426 * Return value:
5427 * 	0 on success / -ENOMEM on allocation failure
5428 **/
5429static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5430{
5431	struct ipr_cmnd *ipr_cmd;
5432	struct ipr_ioarcb *ioarcb;
5433	dma_addr_t dma_addr;
5434	int i;
5435
5436	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5437						 sizeof(struct ipr_cmnd), 8, 0);
5438
5439	if (!ioa_cfg->ipr_cmd_pool)
5440		return -ENOMEM;
5441
5442	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5443		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5444
5445		if (!ipr_cmd) {
5446			ipr_free_cmd_blks(ioa_cfg);
5447			return -ENOMEM;
5448		}
5449
5450		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5451		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5452		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5453
5454		ioarcb = &ipr_cmd->ioarcb;
5455		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5456		ioarcb->host_response_handle = cpu_to_be32(i << 2);
5457		ioarcb->write_ioadl_addr =
5458			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5459		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5460		ioarcb->ioasa_host_pci_addr =
5461			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5462		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5463		ipr_cmd->cmd_index = i;
5464		ipr_cmd->ioa_cfg = ioa_cfg;
5465		ipr_cmd->sense_buffer_dma = dma_addr +
5466			offsetof(struct ipr_cmnd, sense_buffer);
5467
5468		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5469	}
5470
5471	return 0;
5472}
5473
5474/**
5475 * ipr_alloc_mem - Allocate memory for an adapter
5476 * @ioa_cfg:	ioa config struct
5477 *
5478 * Return value:
5479 * 	0 on success / non-zero for error
5480 **/
5481static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5482{
5483	struct pci_dev *pdev = ioa_cfg->pdev;
5484	int i, rc = -ENOMEM;
5485
5486	ENTER;
5487	ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
5488				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5489
5490	if (!ioa_cfg->res_entries)
5491		goto out;
5492
5493	memset(ioa_cfg->res_entries, 0,
5494	       sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
5495
5496	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5497		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5498
5499	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5500						sizeof(struct ipr_misc_cbs),
5501						&ioa_cfg->vpd_cbs_dma);
5502
5503	if (!ioa_cfg->vpd_cbs)
5504		goto out_free_res_entries;
5505
5506	if (ipr_alloc_cmd_blks(ioa_cfg))
5507		goto out_free_vpd_cbs;
5508
5509	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5510						 sizeof(u32) * IPR_NUM_CMD_BLKS,
5511						 &ioa_cfg->host_rrq_dma);
5512
5513	if (!ioa_cfg->host_rrq)
5514		goto out_ipr_free_cmd_blocks;
5515
5516	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5517						  sizeof(struct ipr_config_table),
5518						  &ioa_cfg->cfg_table_dma);
5519
5520	if (!ioa_cfg->cfg_table)
5521		goto out_free_host_rrq;
5522
5523	for (i = 0; i < IPR_NUM_HCAMS; i++) {
5524		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5525							   sizeof(struct ipr_hostrcb),
5526							   &ioa_cfg->hostrcb_dma[i]);
5527
5528		if (!ioa_cfg->hostrcb[i])
5529			goto out_free_hostrcb_dma;
5530
5531		ioa_cfg->hostrcb[i]->hostrcb_dma =
5532			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5533		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5534	}
5535
5536	ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
5537				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5538
5539	if (!ioa_cfg->trace)
5540		goto out_free_hostrcb_dma;
5541
5542	memset(ioa_cfg->trace, 0,
5543	       sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
5544
5545	rc = 0;
5546out:
5547	LEAVE;
5548	return rc;
5549
5550out_free_hostrcb_dma:
5551	while (i-- > 0) {
5552		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5553				    ioa_cfg->hostrcb[i],
5554				    ioa_cfg->hostrcb_dma[i]);
5555	}
5556	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5557			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5558out_free_host_rrq:
5559	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5560			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5561out_ipr_free_cmd_blocks:
5562	ipr_free_cmd_blks(ioa_cfg);
5563out_free_vpd_cbs:
5564	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5565			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5566out_free_res_entries:
5567	kfree(ioa_cfg->res_entries);
5568	goto out;
5569}
5570
5571/**
5572 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5573 * @ioa_cfg:	ioa config struct
5574 *
5575 * Return value:
5576 * 	none
5577 **/
5578static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5579{
5580	int i;
5581
5582	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5583		ioa_cfg->bus_attr[i].bus = i;
5584		ioa_cfg->bus_attr[i].qas_enabled = 0;
5585		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5586		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5587			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5588		else
5589			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5590	}
5591}
5592
5593/**
5594 * ipr_init_ioa_cfg - Initialize IOA config struct
5595 * @ioa_cfg:	ioa config struct
5596 * @host:		scsi host struct
5597 * @pdev:		PCI dev struct
5598 *
5599 * Return value:
5600 * 	none
5601 **/
5602static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5603				       struct Scsi_Host *host, struct pci_dev *pdev)
5604{
5605	const struct ipr_interrupt_offsets *p;
5606	struct ipr_interrupts *t;
5607	void __iomem *base;
5608
5609	ioa_cfg->host = host;
5610	ioa_cfg->pdev = pdev;
5611	ioa_cfg->log_level = ipr_log_level;
5612	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5613	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5614	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5615	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5616	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5617	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5618	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5619	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5620
5621	INIT_LIST_HEAD(&ioa_cfg->free_q);
5622	INIT_LIST_HEAD(&ioa_cfg->pending_q);
5623	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5624	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5625	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5626	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5627	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5628	init_waitqueue_head(&ioa_cfg->reset_wait_q);
5629	ioa_cfg->sdt_state = INACTIVE;
5630
5631	ipr_initialize_bus_attr(ioa_cfg);
5632
5633	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5634	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5635	host->max_channel = IPR_MAX_BUS_TO_SCAN;
5636	host->unique_id = host->host_no;
5637	host->max_cmd_len = IPR_MAX_CDB_LEN;
5638	pci_set_drvdata(pdev, ioa_cfg);
5639
5640	p = &ioa_cfg->chip_cfg->regs;
5641	t = &ioa_cfg->regs;
5642	base = ioa_cfg->hdw_dma_regs;
5643
5644	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5645	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5646	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5647	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5648	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5649	t->ioarrin_reg = base + p->ioarrin_reg;
5650	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5651	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5652	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5653}
5654
5655/**
5656 * ipr_get_chip_cfg - Find adapter chip configuration
5657 * @dev_id:		PCI device id struct
5658 *
5659 * Return value:
5660 * 	ptr to chip config on success / NULL on failure
5661 **/
5662static const struct ipr_chip_cfg_t * __devinit
5663ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5664{
5665	int i;
5666
5667	if (dev_id->driver_data)
5668		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5669
5670	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5671		if (ipr_chip[i].vendor == dev_id->vendor &&
5672		    ipr_chip[i].device == dev_id->device)
5673			return ipr_chip[i].cfg;
5674	return NULL;
5675}
5676
5677/**
5678 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5679 * @pdev:		PCI device struct
5680 * @dev_id:		PCI device id struct
5681 *
5682 * Return value:
5683 * 	0 on success / non-zero on failure
5684 **/
5685static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5686				   const struct pci_device_id *dev_id)
5687{
5688	struct ipr_ioa_cfg *ioa_cfg;
5689	struct Scsi_Host *host;
5690	unsigned long ipr_regs_pci;
5691	void __iomem *ipr_regs;
5692	u32 rc = PCIBIOS_SUCCESSFUL;
5693
5694	ENTER;
5695
5696	if ((rc = pci_enable_device(pdev))) {
5697		dev_err(&pdev->dev, "Cannot enable adapter\n");
5698		goto out;
5699	}
5700
5701	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5702
5703	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5704
5705	if (!host) {
5706		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5707		rc = -ENOMEM;
5708		goto out_disable;
5709	}
5710
5711	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5712	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5713
5714	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5715
5716	if (!ioa_cfg->chip_cfg) {
5717		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5718			dev_id->vendor, dev_id->device);
5719		goto out_scsi_host_put;
5720	}
5721
5722	ipr_regs_pci = pci_resource_start(pdev, 0);
5723
5724	rc = pci_request_regions(pdev, IPR_NAME);
5725	if (rc < 0) {
5726		dev_err(&pdev->dev,
5727			"Couldn't register memory range of registers\n");
5728		goto out_scsi_host_put;
5729	}
5730
5731	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5732
5733	if (!ipr_regs) {
5734		dev_err(&pdev->dev,
5735			"Couldn't map memory range of registers\n");
5736		rc = -ENOMEM;
5737		goto out_release_regions;
5738	}
5739
5740	ioa_cfg->hdw_dma_regs = ipr_regs;
5741	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5742	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5743
5744	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5745
5746	pci_set_master(pdev);
5747
5748	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5749	if (rc < 0) {
5750		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5751		goto cleanup_nomem;
5752	}
5753
5754	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5755				   ioa_cfg->chip_cfg->cache_line_size);
5756
5757	if (rc != PCIBIOS_SUCCESSFUL) {
5758		dev_err(&pdev->dev, "Write of cache line size failed\n");
5759		rc = -EIO;
5760		goto cleanup_nomem;
5761	}
5762
5763	/* Save away PCI config space for use following IOA reset */
5764	rc = pci_save_state(pdev);
5765
5766	if (rc != PCIBIOS_SUCCESSFUL) {
5767		dev_err(&pdev->dev, "Failed to save PCI config space\n");
5768		rc = -EIO;
5769		goto cleanup_nomem;
5770	}
5771
5772	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
5773		goto cleanup_nomem;
5774
5775	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
5776		goto cleanup_nomem;
5777
5778	rc = ipr_alloc_mem(ioa_cfg);
5779	if (rc < 0) {
5780		dev_err(&pdev->dev,
5781			"Couldn't allocate enough memory for device driver!\n");
5782		goto cleanup_nomem;
5783	}
5784
5785	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
5786	rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
5787
5788	if (rc) {
5789		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
5790			pdev->irq, rc);
5791		goto cleanup_nolog;
5792	}
5793
5794	spin_lock(&ipr_driver_lock);
5795	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
5796	spin_unlock(&ipr_driver_lock);
5797
5798	LEAVE;
5799out:
5800	return rc;
5801
5802cleanup_nolog:
5803	ipr_free_mem(ioa_cfg);
5804cleanup_nomem:
5805	iounmap(ipr_regs);
5806out_release_regions:
5807	pci_release_regions(pdev);
5808out_scsi_host_put:
5809	scsi_host_put(host);
5810out_disable:
5811	pci_disable_device(pdev);
5812	goto out;
5813}
5814
5815/**
5816 * ipr_scan_vsets - Scans for VSET devices
5817 * @ioa_cfg:	ioa config struct
5818 *
5819 * Description: Since the VSET resources do not follow SAM in that we can have
5820 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
5821 *
5822 * Return value:
5823 * 	none
5824 **/
5825static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
5826{
5827	int target, lun;
5828
5829	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
5830		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
5831			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
5832}
5833
5834/**
5835 * ipr_initiate_ioa_bringdown - Bring down an adapter
5836 * @ioa_cfg:		ioa config struct
5837 * @shutdown_type:	shutdown type
5838 *
5839 * Description: This function will initiate bringing down the adapter.
5840 * This consists of issuing an IOA shutdown to the adapter
5841 * to flush the cache, and running BIST.
5842 * If the caller needs to wait on the completion of the reset,
5843 * the caller must sleep on the reset_wait_q.
5844 *
5845 * Return value:
5846 * 	none
5847 **/
5848static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
5849				       enum ipr_shutdown_type shutdown_type)
5850{
5851	ENTER;
5852	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5853		ioa_cfg->sdt_state = ABORT_DUMP;
5854	ioa_cfg->reset_retries = 0;
5855	ioa_cfg->in_ioa_bringdown = 1;
5856	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
5857	LEAVE;
5858}
5859
5860/**
5861 * __ipr_remove - Remove a single adapter
5862 * @pdev:	pci device struct
5863 *
5864 * Adapter hot plug remove entry point.
5865 *
5866 * Return value:
5867 * 	none
5868 **/
5869static void __ipr_remove(struct pci_dev *pdev)
5870{
5871	unsigned long host_lock_flags = 0;
5872	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5873	ENTER;
5874
5875	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5876	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
5877
5878	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5879	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5880	flush_scheduled_work();
5881	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5882
5883	spin_lock(&ipr_driver_lock);
5884	list_del(&ioa_cfg->queue);
5885	spin_unlock(&ipr_driver_lock);
5886
5887	if (ioa_cfg->sdt_state == ABORT_DUMP)
5888		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
5889	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5890
5891	ipr_free_all_resources(ioa_cfg);
5892
5893	LEAVE;
5894}
5895
5896/**
5897 * ipr_remove - IOA hot plug remove entry point
5898 * @pdev:	pci device struct
5899 *
5900 * Adapter hot plug remove entry point.
5901 *
5902 * Return value:
5903 * 	none
5904 **/
5905static void ipr_remove(struct pci_dev *pdev)
5906{
5907	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5908
5909	ENTER;
5910
5911	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5912			      &ipr_trace_attr);
5913	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5914			     &ipr_dump_attr);
5915	scsi_remove_host(ioa_cfg->host);
5916
5917	__ipr_remove(pdev);
5918
5919	LEAVE;
5920}
5921
5922/**
5923 * ipr_probe - Adapter hot plug add entry point
5924 *
5925 * Return value:
5926 * 	0 on success / non-zero on failure
5927 **/
5928static int __devinit ipr_probe(struct pci_dev *pdev,
5929			       const struct pci_device_id *dev_id)
5930{
5931	struct ipr_ioa_cfg *ioa_cfg;
5932	int rc;
5933
5934	rc = ipr_probe_ioa(pdev, dev_id);
5935
5936	if (rc)
5937		return rc;
5938
5939	ioa_cfg = pci_get_drvdata(pdev);
5940	rc = ipr_probe_ioa_part2(ioa_cfg);
5941
5942	if (rc) {
5943		__ipr_remove(pdev);
5944		return rc;
5945	}
5946
5947	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
5948
5949	if (rc) {
5950		__ipr_remove(pdev);
5951		return rc;
5952	}
5953
5954	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5955				   &ipr_trace_attr);
5956
5957	if (rc) {
5958		scsi_remove_host(ioa_cfg->host);
5959		__ipr_remove(pdev);
5960		return rc;
5961	}
5962
5963	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
5964				   &ipr_dump_attr);
5965
5966	if (rc) {
5967		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
5968				      &ipr_trace_attr);
5969		scsi_remove_host(ioa_cfg->host);
5970		__ipr_remove(pdev);
5971		return rc;
5972	}
5973
5974	scsi_scan_host(ioa_cfg->host);
5975	ipr_scan_vsets(ioa_cfg);
5976	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
5977	ioa_cfg->allow_ml_add_del = 1;
5978	ioa_cfg->host->max_channel = IPR_VSET_BUS;
5979	schedule_work(&ioa_cfg->work_q);
5980	return 0;
5981}
5982
5983/**
5984 * ipr_shutdown - Shutdown handler.
5985 * @pdev:	pci device struct
5986 *
5987 * This function is invoked upon system shutdown/reboot. It will issue
5988 * an adapter shutdown to the adapter to flush the write cache.
5989 *
5990 * Return value:
5991 * 	none
5992 **/
5993static void ipr_shutdown(struct pci_dev *pdev)
5994{
5995	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5996	unsigned long lock_flags = 0;
5997
5998	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5999	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6000	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6001	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6002}
6003
6004static struct pci_device_id ipr_pci_table[] __devinitdata = {
6005	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6006		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6007		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6008	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6009		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6010	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6011	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6012		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6013	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6014	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6015		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6016	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6017	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6018		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6019	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6020	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6021		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6022	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6023	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6024		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6025	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6026	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6027		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6028		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6029	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6030		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6031		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6032	{ }
6033};
6034MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6035
6036static struct pci_driver ipr_driver = {
6037	.name = IPR_NAME,
6038	.id_table = ipr_pci_table,
6039	.probe = ipr_probe,
6040	.remove = ipr_remove,
6041	.shutdown = ipr_shutdown,
6042};
6043
6044/**
6045 * ipr_init - Module entry point
6046 *
6047 * Return value:
6048 * 	0 on success / negative value on failure
6049 **/
6050static int __init ipr_init(void)
6051{
6052	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6053		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6054
6055	return pci_module_init(&ipr_driver);
6056}
6057
6058/**
6059 * ipr_exit - Module unload
6060 *
6061 * Module unload entry point.
6062 *
6063 * Return value:
6064 * 	none
6065 **/
6066static void __exit ipr_exit(void)
6067{
6068	pci_unregister_driver(&ipr_driver);
6069}
6070
6071module_init(ipr_init);
6072module_exit(ipr_exit);
6073