ipr.c revision a92fa25c63a788758bd52e9123504d133210c8b7
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/slab.h>
63#include <linux/vmalloc.h>
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
75#include <linux/libata.h>
76#include <linux/hdreg.h>
77#include <linux/reboot.h>
78#include <linux/stringify.h>
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
87#include "ipr.h"
88
89/*
90 *   Global Data
91 */
92static LIST_HEAD(ipr_ioa_head);
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
97static unsigned int ipr_transop_timeout = 0;
98static unsigned int ipr_debug = 0;
99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100static unsigned int ipr_dual_ioa_raid = 1;
101static DEFINE_SPINLOCK(ipr_driver_lock);
102
103/* This table describes the differences between DMA controller chips */
104static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
105	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
106		.mailbox = 0x0042C,
107		.cache_line_size = 0x20,
108		{
109			.set_interrupt_mask_reg = 0x0022C,
110			.clr_interrupt_mask_reg = 0x00230,
111			.clr_interrupt_mask_reg32 = 0x00230,
112			.sense_interrupt_mask_reg = 0x0022C,
113			.sense_interrupt_mask_reg32 = 0x0022C,
114			.clr_interrupt_reg = 0x00228,
115			.clr_interrupt_reg32 = 0x00228,
116			.sense_interrupt_reg = 0x00224,
117			.sense_interrupt_reg32 = 0x00224,
118			.ioarrin_reg = 0x00404,
119			.sense_uproc_interrupt_reg = 0x00214,
120			.sense_uproc_interrupt_reg32 = 0x00214,
121			.set_uproc_interrupt_reg = 0x00214,
122			.set_uproc_interrupt_reg32 = 0x00214,
123			.clr_uproc_interrupt_reg = 0x00218,
124			.clr_uproc_interrupt_reg32 = 0x00218
125		}
126	},
127	{ /* Snipe and Scamp */
128		.mailbox = 0x0052C,
129		.cache_line_size = 0x20,
130		{
131			.set_interrupt_mask_reg = 0x00288,
132			.clr_interrupt_mask_reg = 0x0028C,
133			.clr_interrupt_mask_reg32 = 0x0028C,
134			.sense_interrupt_mask_reg = 0x00288,
135			.sense_interrupt_mask_reg32 = 0x00288,
136			.clr_interrupt_reg = 0x00284,
137			.clr_interrupt_reg32 = 0x00284,
138			.sense_interrupt_reg = 0x00280,
139			.sense_interrupt_reg32 = 0x00280,
140			.ioarrin_reg = 0x00504,
141			.sense_uproc_interrupt_reg = 0x00290,
142			.sense_uproc_interrupt_reg32 = 0x00290,
143			.set_uproc_interrupt_reg = 0x00290,
144			.set_uproc_interrupt_reg32 = 0x00290,
145			.clr_uproc_interrupt_reg = 0x00294,
146			.clr_uproc_interrupt_reg32 = 0x00294
147		}
148	},
149	{ /* CRoC */
150		.mailbox = 0x00044,
151		.cache_line_size = 0x20,
152		{
153			.set_interrupt_mask_reg = 0x00010,
154			.clr_interrupt_mask_reg = 0x00018,
155			.clr_interrupt_mask_reg32 = 0x0001C,
156			.sense_interrupt_mask_reg = 0x00010,
157			.sense_interrupt_mask_reg32 = 0x00014,
158			.clr_interrupt_reg = 0x00008,
159			.clr_interrupt_reg32 = 0x0000C,
160			.sense_interrupt_reg = 0x00000,
161			.sense_interrupt_reg32 = 0x00004,
162			.ioarrin_reg = 0x00070,
163			.sense_uproc_interrupt_reg = 0x00020,
164			.sense_uproc_interrupt_reg32 = 0x00024,
165			.set_uproc_interrupt_reg = 0x00020,
166			.set_uproc_interrupt_reg32 = 0x00024,
167			.clr_uproc_interrupt_reg = 0x00028,
168			.clr_uproc_interrupt_reg32 = 0x0002C,
169			.init_feedback_reg = 0x0005C,
170			.dump_addr_reg = 0x00064,
171			.dump_data_reg = 0x00068,
172			.endian_swap_reg = 0x00084
173		}
174	},
175};
176
177static const struct ipr_chip_t ipr_chip[] = {
178	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
181	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
182	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
183	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
184	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
185	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
186	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
187};
188
189static int ipr_max_bus_speeds [] = {
190	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
191};
192
193MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
194MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
195module_param_named(max_speed, ipr_max_speed, uint, 0);
196MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
197module_param_named(log_level, ipr_log_level, uint, 0);
198MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
199module_param_named(testmode, ipr_testmode, int, 0);
200MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
201module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
202MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
203module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
204MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
205module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
206MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
207module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
208MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
209module_param_named(max_devs, ipr_max_devs, int, 0);
210MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
211		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
212MODULE_LICENSE("GPL");
213MODULE_VERSION(IPR_DRIVER_VERSION);
214
215/*  A constant array of IOASCs/URCs/Error Messages */
216static const
217struct ipr_error_table_t ipr_error_table[] = {
218	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
219	"8155: An unknown error was received"},
220	{0x00330000, 0, 0,
221	"Soft underlength error"},
222	{0x005A0000, 0, 0,
223	"Command to be cancelled not found"},
224	{0x00808000, 0, 0,
225	"Qualified success"},
226	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
227	"FFFE: Soft device bus error recovered by the IOA"},
228	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
229	"4101: Soft device bus fabric error"},
230	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
231	"FFFC: Logical block guard error recovered by the device"},
232	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
233	"FFFC: Logical block reference tag error recovered by the device"},
234	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
235	"4171: Recovered scatter list tag / sequence number error"},
236	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
237	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
238	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
239	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
240	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
241	"FFFD: Recovered logical block reference tag error detected by the IOA"},
242	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
243	"FFFD: Logical block guard error recovered by the IOA"},
244	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
245	"FFF9: Device sector reassign successful"},
246	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
247	"FFF7: Media error recovered by device rewrite procedures"},
248	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
249	"7001: IOA sector reassignment successful"},
250	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
251	"FFF9: Soft media error. Sector reassignment recommended"},
252	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
253	"FFF7: Media error recovered by IOA rewrite procedures"},
254	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
255	"FF3D: Soft PCI bus error recovered by the IOA"},
256	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
257	"FFF6: Device hardware error recovered by the IOA"},
258	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
259	"FFF6: Device hardware error recovered by the device"},
260	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
261	"FF3D: Soft IOA error recovered by the IOA"},
262	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
263	"FFFA: Undefined device response recovered by the IOA"},
264	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
265	"FFF6: Device bus error, message or command phase"},
266	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
267	"FFFE: Task Management Function failed"},
268	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
269	"FFF6: Failure prediction threshold exceeded"},
270	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
271	"8009: Impending cache battery pack failure"},
272	{0x02040400, 0, 0,
273	"34FF: Disk device format in progress"},
274	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
275	"9070: IOA requested reset"},
276	{0x023F0000, 0, 0,
277	"Synchronization required"},
278	{0x024E0000, 0, 0,
279	"No ready, IOA shutdown"},
280	{0x025A0000, 0, 0,
281	"Not ready, IOA has been shutdown"},
282	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
283	"3020: Storage subsystem configuration error"},
284	{0x03110B00, 0, 0,
285	"FFF5: Medium error, data unreadable, recommend reassign"},
286	{0x03110C00, 0, 0,
287	"7000: Medium error, data unreadable, do not reassign"},
288	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
289	"FFF3: Disk media format bad"},
290	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
291	"3002: Addressed device failed to respond to selection"},
292	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
293	"3100: Device bus error"},
294	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
295	"3109: IOA timed out a device command"},
296	{0x04088000, 0, 0,
297	"3120: SCSI bus is not operational"},
298	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
299	"4100: Hard device bus fabric error"},
300	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
301	"310C: Logical block guard error detected by the device"},
302	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
303	"310C: Logical block reference tag error detected by the device"},
304	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
305	"4170: Scatter list tag / sequence number error"},
306	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
307	"8150: Logical block CRC error on IOA to Host transfer"},
308	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
309	"4170: Logical block sequence number error on IOA to Host transfer"},
310	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
311	"310D: Logical block reference tag error detected by the IOA"},
312	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
313	"310D: Logical block guard error detected by the IOA"},
314	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
315	"9000: IOA reserved area data check"},
316	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
317	"9001: IOA reserved area invalid data pattern"},
318	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
319	"9002: IOA reserved area LRC error"},
320	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
321	"Hardware Error, IOA metadata access error"},
322	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
323	"102E: Out of alternate sectors for disk storage"},
324	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
325	"FFF4: Data transfer underlength error"},
326	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
327	"FFF4: Data transfer overlength error"},
328	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
329	"3400: Logical unit failure"},
330	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
331	"FFF4: Device microcode is corrupt"},
332	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
333	"8150: PCI bus error"},
334	{0x04430000, 1, 0,
335	"Unsupported device bus message received"},
336	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
337	"FFF4: Disk device problem"},
338	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
339	"8150: Permanent IOA failure"},
340	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
341	"3010: Disk device returned wrong response to IOA"},
342	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
343	"8151: IOA microcode error"},
344	{0x04448500, 0, 0,
345	"Device bus status error"},
346	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
347	"8157: IOA error requiring IOA reset to recover"},
348	{0x04448700, 0, 0,
349	"ATA device status error"},
350	{0x04490000, 0, 0,
351	"Message reject received from the device"},
352	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
353	"8008: A permanent cache battery pack failure occurred"},
354	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
355	"9090: Disk unit has been modified after the last known status"},
356	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
357	"9081: IOA detected device error"},
358	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
359	"9082: IOA detected device error"},
360	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
361	"3110: Device bus error, message or command phase"},
362	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
363	"3110: SAS Command / Task Management Function failed"},
364	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
365	"9091: Incorrect hardware configuration change has been detected"},
366	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
367	"9073: Invalid multi-adapter configuration"},
368	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
369	"4010: Incorrect connection between cascaded expanders"},
370	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
371	"4020: Connections exceed IOA design limits"},
372	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
373	"4030: Incorrect multipath connection"},
374	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
375	"4110: Unsupported enclosure function"},
376	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
377	"FFF4: Command to logical unit failed"},
378	{0x05240000, 1, 0,
379	"Illegal request, invalid request type or request packet"},
380	{0x05250000, 0, 0,
381	"Illegal request, invalid resource handle"},
382	{0x05258000, 0, 0,
383	"Illegal request, commands not allowed to this device"},
384	{0x05258100, 0, 0,
385	"Illegal request, command not allowed to a secondary adapter"},
386	{0x05258200, 0, 0,
387	"Illegal request, command not allowed to a non-optimized resource"},
388	{0x05260000, 0, 0,
389	"Illegal request, invalid field in parameter list"},
390	{0x05260100, 0, 0,
391	"Illegal request, parameter not supported"},
392	{0x05260200, 0, 0,
393	"Illegal request, parameter value invalid"},
394	{0x052C0000, 0, 0,
395	"Illegal request, command sequence error"},
396	{0x052C8000, 1, 0,
397	"Illegal request, dual adapter support not enabled"},
398	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
399	"9031: Array protection temporarily suspended, protection resuming"},
400	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
401	"9040: Array protection temporarily suspended, protection resuming"},
402	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
403	"3140: Device bus not ready to ready transition"},
404	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
405	"FFFB: SCSI bus was reset"},
406	{0x06290500, 0, 0,
407	"FFFE: SCSI bus transition to single ended"},
408	{0x06290600, 0, 0,
409	"FFFE: SCSI bus transition to LVD"},
410	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
411	"FFFB: SCSI bus was reset by another initiator"},
412	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
413	"3029: A device replacement has occurred"},
414	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
415	"9051: IOA cache data exists for a missing or failed device"},
416	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
417	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
418	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
419	"9025: Disk unit is not supported at its physical location"},
420	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
421	"3020: IOA detected a SCSI bus configuration error"},
422	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
423	"3150: SCSI bus configuration error"},
424	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
425	"9074: Asymmetric advanced function disk configuration"},
426	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
427	"4040: Incomplete multipath connection between IOA and enclosure"},
428	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
429	"4041: Incomplete multipath connection between enclosure and device"},
430	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
431	"9075: Incomplete multipath connection between IOA and remote IOA"},
432	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
433	"9076: Configuration error, missing remote IOA"},
434	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
435	"4050: Enclosure does not support a required multipath function"},
436	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
437	"4070: Logically bad block written on device"},
438	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
439	"9041: Array protection temporarily suspended"},
440	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
441	"9042: Corrupt array parity detected on specified device"},
442	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
443	"9030: Array no longer protected due to missing or failed disk unit"},
444	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
445	"9071: Link operational transition"},
446	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
447	"9072: Link not operational transition"},
448	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
449	"9032: Array exposed but still protected"},
450	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
451	"70DD: Device forced failed by disrupt device command"},
452	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
453	"4061: Multipath redundancy level got better"},
454	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
455	"4060: Multipath redundancy level got worse"},
456	{0x07270000, 0, 0,
457	"Failure due to other device"},
458	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
459	"9008: IOA does not support functions expected by devices"},
460	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
461	"9010: Cache data associated with attached devices cannot be found"},
462	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
463	"9011: Cache data belongs to devices other than those attached"},
464	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
465	"9020: Array missing 2 or more devices with only 1 device present"},
466	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
467	"9021: Array missing 2 or more devices with 2 or more devices present"},
468	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
469	"9022: Exposed array is missing a required device"},
470	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
471	"9023: Array member(s) not at required physical locations"},
472	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
473	"9024: Array not functional due to present hardware configuration"},
474	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
475	"9026: Array not functional due to present hardware configuration"},
476	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
477	"9027: Array is missing a device and parity is out of sync"},
478	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
479	"9028: Maximum number of arrays already exist"},
480	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
481	"9050: Required cache data cannot be located for a disk unit"},
482	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
483	"9052: Cache data exists for a device that has been modified"},
484	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
485	"9054: IOA resources not available due to previous problems"},
486	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
487	"9092: Disk unit requires initialization before use"},
488	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
489	"9029: Incorrect hardware configuration change has been detected"},
490	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
491	"9060: One or more disk pairs are missing from an array"},
492	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
493	"9061: One or more disks are missing from an array"},
494	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
495	"9062: One or more disks are missing from an array"},
496	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
497	"9063: Maximum number of functional arrays has been exceeded"},
498	{0x0B260000, 0, 0,
499	"Aborted command, invalid descriptor"},
500	{0x0B5A0000, 0, 0,
501	"Command terminated by host"}
502};
503
504static const struct ipr_ses_table_entry ipr_ses_table[] = {
505	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
506	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
507	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
508	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
509	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
510	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
511	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
512	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
513	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
514	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
515	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
516	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
517	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
518};
519
520/*
521 *  Function Prototypes
522 */
523static int ipr_reset_alert(struct ipr_cmnd *);
524static void ipr_process_ccn(struct ipr_cmnd *);
525static void ipr_process_error(struct ipr_cmnd *);
526static void ipr_reset_ioa_job(struct ipr_cmnd *);
527static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
528				   enum ipr_shutdown_type);
529
530#ifdef CONFIG_SCSI_IPR_TRACE
531/**
532 * ipr_trc_hook - Add a trace entry to the driver trace
533 * @ipr_cmd:	ipr command struct
534 * @type:		trace type
535 * @add_data:	additional data
536 *
537 * Return value:
538 * 	none
539 **/
540static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
541			 u8 type, u32 add_data)
542{
543	struct ipr_trace_entry *trace_entry;
544	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
545
546	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
547	trace_entry->time = jiffies;
548	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
549	trace_entry->type = type;
550	if (ipr_cmd->ioa_cfg->sis64)
551		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
552	else
553		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
554	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
555	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
556	trace_entry->u.add_data = add_data;
557}
558#else
559#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
560#endif
561
562/**
563 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
564 * @ipr_cmd:	ipr command struct
565 *
566 * Return value:
567 * 	none
568 **/
569static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
570{
571	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
572	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
573	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
574	dma_addr_t dma_addr = ipr_cmd->dma_addr;
575
576	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
577	ioarcb->data_transfer_length = 0;
578	ioarcb->read_data_transfer_length = 0;
579	ioarcb->ioadl_len = 0;
580	ioarcb->read_ioadl_len = 0;
581
582	if (ipr_cmd->ioa_cfg->sis64) {
583		ioarcb->u.sis64_addr_data.data_ioadl_addr =
584			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
585		ioasa64->u.gata.status = 0;
586	} else {
587		ioarcb->write_ioadl_addr =
588			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
589		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
590		ioasa->u.gata.status = 0;
591	}
592
593	ioasa->hdr.ioasc = 0;
594	ioasa->hdr.residual_data_len = 0;
595	ipr_cmd->scsi_cmd = NULL;
596	ipr_cmd->qc = NULL;
597	ipr_cmd->sense_buffer[0] = 0;
598	ipr_cmd->dma_use_sg = 0;
599}
600
601/**
602 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
603 * @ipr_cmd:	ipr command struct
604 *
605 * Return value:
606 * 	none
607 **/
608static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
609{
610	ipr_reinit_ipr_cmnd(ipr_cmd);
611	ipr_cmd->u.scratch = 0;
612	ipr_cmd->sibling = NULL;
613	init_timer(&ipr_cmd->timer);
614}
615
616/**
617 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
618 * @ioa_cfg:	ioa config struct
619 *
620 * Return value:
621 * 	pointer to ipr command struct
622 **/
623static
624struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
625{
626	struct ipr_cmnd *ipr_cmd;
627
628	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
629	list_del(&ipr_cmd->queue);
630	ipr_init_ipr_cmnd(ipr_cmd);
631
632	return ipr_cmd;
633}
634
635/**
636 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
637 * @ioa_cfg:	ioa config struct
638 * @clr_ints:     interrupts to clear
639 *
640 * This function masks all interrupts on the adapter, then clears the
641 * interrupts specified in the mask
642 *
643 * Return value:
644 * 	none
645 **/
646static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
647					  u32 clr_ints)
648{
649	volatile u32 int_reg;
650
651	/* Stop new interrupts */
652	ioa_cfg->allow_interrupts = 0;
653
654	/* Set interrupt mask to stop all new interrupts */
655	if (ioa_cfg->sis64)
656		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
657	else
658		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
659
660	/* Clear any pending interrupts */
661	if (ioa_cfg->sis64)
662		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
663	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
664	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
665}
666
667/**
668 * ipr_save_pcix_cmd_reg - Save PCI-X command register
669 * @ioa_cfg:	ioa config struct
670 *
671 * Return value:
672 * 	0 on success / -EIO on failure
673 **/
674static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
675{
676	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
677
678	if (pcix_cmd_reg == 0)
679		return 0;
680
681	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
682				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
683		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
684		return -EIO;
685	}
686
687	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
688	return 0;
689}
690
691/**
692 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
693 * @ioa_cfg:	ioa config struct
694 *
695 * Return value:
696 * 	0 on success / -EIO on failure
697 **/
698static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
699{
700	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
701
702	if (pcix_cmd_reg) {
703		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
704					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
705			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
706			return -EIO;
707		}
708	}
709
710	return 0;
711}
712
713/**
714 * ipr_sata_eh_done - done function for aborted SATA commands
715 * @ipr_cmd:	ipr command struct
716 *
717 * This function is invoked for ops generated to SATA
718 * devices which are being aborted.
719 *
720 * Return value:
721 * 	none
722 **/
723static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
724{
725	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
726	struct ata_queued_cmd *qc = ipr_cmd->qc;
727	struct ipr_sata_port *sata_port = qc->ap->private_data;
728
729	qc->err_mask |= AC_ERR_OTHER;
730	sata_port->ioasa.status |= ATA_BUSY;
731	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
732	ata_qc_complete(qc);
733}
734
735/**
736 * ipr_scsi_eh_done - mid-layer done function for aborted ops
737 * @ipr_cmd:	ipr command struct
738 *
739 * This function is invoked by the interrupt handler for
740 * ops generated by the SCSI mid-layer which are being aborted.
741 *
742 * Return value:
743 * 	none
744 **/
745static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
746{
747	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
748	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
749
750	scsi_cmd->result |= (DID_ERROR << 16);
751
752	scsi_dma_unmap(ipr_cmd->scsi_cmd);
753	scsi_cmd->scsi_done(scsi_cmd);
754	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
755}
756
757/**
758 * ipr_fail_all_ops - Fails all outstanding ops.
759 * @ioa_cfg:	ioa config struct
760 *
761 * This function fails all outstanding ops.
762 *
763 * Return value:
764 * 	none
765 **/
766static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
767{
768	struct ipr_cmnd *ipr_cmd, *temp;
769
770	ENTER;
771	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
772		list_del(&ipr_cmd->queue);
773
774		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
775		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
776
777		if (ipr_cmd->scsi_cmd)
778			ipr_cmd->done = ipr_scsi_eh_done;
779		else if (ipr_cmd->qc)
780			ipr_cmd->done = ipr_sata_eh_done;
781
782		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
783		del_timer(&ipr_cmd->timer);
784		ipr_cmd->done(ipr_cmd);
785	}
786
787	LEAVE;
788}
789
790/**
791 * ipr_send_command -  Send driver initiated requests.
792 * @ipr_cmd:		ipr command struct
793 *
794 * This function sends a command to the adapter using the correct write call.
795 * In the case of sis64, calculate the ioarcb size required. Then or in the
796 * appropriate bits.
797 *
798 * Return value:
799 * 	none
800 **/
801static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
802{
803	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
804	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
805
806	if (ioa_cfg->sis64) {
807		/* The default size is 256 bytes */
808		send_dma_addr |= 0x1;
809
810		/* If the number of ioadls * size of ioadl > 128 bytes,
811		   then use a 512 byte ioarcb */
812		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
813			send_dma_addr |= 0x4;
814		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
815	} else
816		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
817}
818
819/**
820 * ipr_do_req -  Send driver initiated requests.
821 * @ipr_cmd:		ipr command struct
822 * @done:			done function
823 * @timeout_func:	timeout function
824 * @timeout:		timeout value
825 *
826 * This function sends the specified command to the adapter with the
827 * timeout given. The done function is invoked on command completion.
828 *
829 * Return value:
830 * 	none
831 **/
832static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
833		       void (*done) (struct ipr_cmnd *),
834		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
835{
836	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
837
838	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
839
840	ipr_cmd->done = done;
841
842	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
843	ipr_cmd->timer.expires = jiffies + timeout;
844	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
845
846	add_timer(&ipr_cmd->timer);
847
848	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
849
850	mb();
851
852	ipr_send_command(ipr_cmd);
853}
854
855/**
856 * ipr_internal_cmd_done - Op done function for an internally generated op.
857 * @ipr_cmd:	ipr command struct
858 *
859 * This function is the op done function for an internally generated,
860 * blocking op. It simply wakes the sleeping thread.
861 *
862 * Return value:
863 * 	none
864 **/
865static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
866{
867	if (ipr_cmd->sibling)
868		ipr_cmd->sibling = NULL;
869	else
870		complete(&ipr_cmd->completion);
871}
872
873/**
874 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
875 * @ipr_cmd:	ipr command struct
876 * @dma_addr:	dma address
877 * @len:	transfer length
878 * @flags:	ioadl flag value
879 *
880 * This function initializes an ioadl in the case where there is only a single
881 * descriptor.
882 *
883 * Return value:
884 * 	nothing
885 **/
886static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
887			   u32 len, int flags)
888{
889	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
890	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
891
892	ipr_cmd->dma_use_sg = 1;
893
894	if (ipr_cmd->ioa_cfg->sis64) {
895		ioadl64->flags = cpu_to_be32(flags);
896		ioadl64->data_len = cpu_to_be32(len);
897		ioadl64->address = cpu_to_be64(dma_addr);
898
899		ipr_cmd->ioarcb.ioadl_len =
900		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
901		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
902	} else {
903		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
904		ioadl->address = cpu_to_be32(dma_addr);
905
906		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
907			ipr_cmd->ioarcb.read_ioadl_len =
908				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
909			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
910		} else {
911			ipr_cmd->ioarcb.ioadl_len =
912			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
913			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
914		}
915	}
916}
917
918/**
919 * ipr_send_blocking_cmd - Send command and sleep on its completion.
920 * @ipr_cmd:	ipr command struct
921 * @timeout_func:	function to invoke if command times out
922 * @timeout:	timeout
923 *
924 * Return value:
925 * 	none
926 **/
927static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
928				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
929				  u32 timeout)
930{
931	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
932
933	init_completion(&ipr_cmd->completion);
934	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
935
936	spin_unlock_irq(ioa_cfg->host->host_lock);
937	wait_for_completion(&ipr_cmd->completion);
938	spin_lock_irq(ioa_cfg->host->host_lock);
939}
940
941/**
942 * ipr_send_hcam - Send an HCAM to the adapter.
943 * @ioa_cfg:	ioa config struct
944 * @type:		HCAM type
945 * @hostrcb:	hostrcb struct
946 *
947 * This function will send a Host Controlled Async command to the adapter.
948 * If HCAMs are currently not allowed to be issued to the adapter, it will
949 * place the hostrcb on the free queue.
950 *
951 * Return value:
952 * 	none
953 **/
954static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
955			  struct ipr_hostrcb *hostrcb)
956{
957	struct ipr_cmnd *ipr_cmd;
958	struct ipr_ioarcb *ioarcb;
959
960	if (ioa_cfg->allow_cmds) {
961		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
962		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
963		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
964
965		ipr_cmd->u.hostrcb = hostrcb;
966		ioarcb = &ipr_cmd->ioarcb;
967
968		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
969		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
970		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
971		ioarcb->cmd_pkt.cdb[1] = type;
972		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
973		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
974
975		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
976			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
977
978		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
979			ipr_cmd->done = ipr_process_ccn;
980		else
981			ipr_cmd->done = ipr_process_error;
982
983		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
984
985		mb();
986
987		ipr_send_command(ipr_cmd);
988	} else {
989		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
990	}
991}
992
993/**
994 * ipr_update_ata_class - Update the ata class in the resource entry
995 * @res:	resource entry struct
996 * @proto:	cfgte device bus protocol value
997 *
998 * Return value:
999 * 	none
1000 **/
1001static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1002{
1003	switch(proto) {
1004	case IPR_PROTO_SATA:
1005	case IPR_PROTO_SAS_STP:
1006		res->ata_class = ATA_DEV_ATA;
1007		break;
1008	case IPR_PROTO_SATA_ATAPI:
1009	case IPR_PROTO_SAS_STP_ATAPI:
1010		res->ata_class = ATA_DEV_ATAPI;
1011		break;
1012	default:
1013		res->ata_class = ATA_DEV_UNKNOWN;
1014		break;
1015	};
1016}
1017
1018/**
1019 * ipr_init_res_entry - Initialize a resource entry struct.
1020 * @res:	resource entry struct
1021 * @cfgtew:	config table entry wrapper struct
1022 *
1023 * Return value:
1024 * 	none
1025 **/
1026static void ipr_init_res_entry(struct ipr_resource_entry *res,
1027			       struct ipr_config_table_entry_wrapper *cfgtew)
1028{
1029	int found = 0;
1030	unsigned int proto;
1031	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1032	struct ipr_resource_entry *gscsi_res = NULL;
1033
1034	res->needs_sync_complete = 0;
1035	res->in_erp = 0;
1036	res->add_to_ml = 0;
1037	res->del_from_ml = 0;
1038	res->resetting_device = 0;
1039	res->sdev = NULL;
1040	res->sata_port = NULL;
1041
1042	if (ioa_cfg->sis64) {
1043		proto = cfgtew->u.cfgte64->proto;
1044		res->res_flags = cfgtew->u.cfgte64->res_flags;
1045		res->qmodel = IPR_QUEUEING_MODEL64(res);
1046		res->type = cfgtew->u.cfgte64->res_type;
1047
1048		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1049			sizeof(res->res_path));
1050
1051		res->bus = 0;
1052		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1053			sizeof(res->dev_lun.scsi_lun));
1054		res->lun = scsilun_to_int(&res->dev_lun);
1055
1056		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1057			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1058				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1059					found = 1;
1060					res->target = gscsi_res->target;
1061					break;
1062				}
1063			}
1064			if (!found) {
1065				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1066								  ioa_cfg->max_devs_supported);
1067				set_bit(res->target, ioa_cfg->target_ids);
1068			}
1069		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1070			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1071			res->target = 0;
1072		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1073			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1074			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1075							  ioa_cfg->max_devs_supported);
1076			set_bit(res->target, ioa_cfg->array_ids);
1077		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1078			res->bus = IPR_VSET_VIRTUAL_BUS;
1079			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1080							  ioa_cfg->max_devs_supported);
1081			set_bit(res->target, ioa_cfg->vset_ids);
1082		} else {
1083			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1084							  ioa_cfg->max_devs_supported);
1085			set_bit(res->target, ioa_cfg->target_ids);
1086		}
1087	} else {
1088		proto = cfgtew->u.cfgte->proto;
1089		res->qmodel = IPR_QUEUEING_MODEL(res);
1090		res->flags = cfgtew->u.cfgte->flags;
1091		if (res->flags & IPR_IS_IOA_RESOURCE)
1092			res->type = IPR_RES_TYPE_IOAFP;
1093		else
1094			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1095
1096		res->bus = cfgtew->u.cfgte->res_addr.bus;
1097		res->target = cfgtew->u.cfgte->res_addr.target;
1098		res->lun = cfgtew->u.cfgte->res_addr.lun;
1099		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1100	}
1101
1102	ipr_update_ata_class(res, proto);
1103}
1104
1105/**
1106 * ipr_is_same_device - Determine if two devices are the same.
1107 * @res:	resource entry struct
1108 * @cfgtew:	config table entry wrapper struct
1109 *
1110 * Return value:
1111 * 	1 if the devices are the same / 0 otherwise
1112 **/
1113static int ipr_is_same_device(struct ipr_resource_entry *res,
1114			      struct ipr_config_table_entry_wrapper *cfgtew)
1115{
1116	if (res->ioa_cfg->sis64) {
1117		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1118					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1119			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1120					sizeof(cfgtew->u.cfgte64->lun))) {
1121			return 1;
1122		}
1123	} else {
1124		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1125		    res->target == cfgtew->u.cfgte->res_addr.target &&
1126		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1127			return 1;
1128	}
1129
1130	return 0;
1131}
1132
1133/**
1134 * ipr_format_res_path - Format the resource path for printing.
1135 * @res_path:	resource path
1136 * @buf:	buffer
1137 *
1138 * Return value:
1139 * 	pointer to buffer
1140 **/
1141static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1142{
1143	int i;
1144	char *p = buffer;
1145
1146	*p = '\0';
1147	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1148	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1149		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1150
1151	return buffer;
1152}
1153
1154/**
1155 * ipr_update_res_entry - Update the resource entry.
1156 * @res:	resource entry struct
1157 * @cfgtew:	config table entry wrapper struct
1158 *
1159 * Return value:
1160 *      none
1161 **/
1162static void ipr_update_res_entry(struct ipr_resource_entry *res,
1163				 struct ipr_config_table_entry_wrapper *cfgtew)
1164{
1165	char buffer[IPR_MAX_RES_PATH_LENGTH];
1166	unsigned int proto;
1167	int new_path = 0;
1168
1169	if (res->ioa_cfg->sis64) {
1170		res->flags = cfgtew->u.cfgte64->flags;
1171		res->res_flags = cfgtew->u.cfgte64->res_flags;
1172		res->type = cfgtew->u.cfgte64->res_type;
1173
1174		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1175			sizeof(struct ipr_std_inq_data));
1176
1177		res->qmodel = IPR_QUEUEING_MODEL64(res);
1178		proto = cfgtew->u.cfgte64->proto;
1179		res->res_handle = cfgtew->u.cfgte64->res_handle;
1180		res->dev_id = cfgtew->u.cfgte64->dev_id;
1181
1182		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1183			sizeof(res->dev_lun.scsi_lun));
1184
1185		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1186					sizeof(res->res_path))) {
1187			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1188				sizeof(res->res_path));
1189			new_path = 1;
1190		}
1191
1192		if (res->sdev && new_path)
1193			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1194				    ipr_format_res_path(res->res_path, buffer,
1195							sizeof(buffer)));
1196	} else {
1197		res->flags = cfgtew->u.cfgte->flags;
1198		if (res->flags & IPR_IS_IOA_RESOURCE)
1199			res->type = IPR_RES_TYPE_IOAFP;
1200		else
1201			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1202
1203		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1204			sizeof(struct ipr_std_inq_data));
1205
1206		res->qmodel = IPR_QUEUEING_MODEL(res);
1207		proto = cfgtew->u.cfgte->proto;
1208		res->res_handle = cfgtew->u.cfgte->res_handle;
1209	}
1210
1211	ipr_update_ata_class(res, proto);
1212}
1213
1214/**
1215 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1216 * 			  for the resource.
1217 * @res:	resource entry struct
1218 * @cfgtew:	config table entry wrapper struct
1219 *
1220 * Return value:
1221 *      none
1222 **/
1223static void ipr_clear_res_target(struct ipr_resource_entry *res)
1224{
1225	struct ipr_resource_entry *gscsi_res = NULL;
1226	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1227
1228	if (!ioa_cfg->sis64)
1229		return;
1230
1231	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1232		clear_bit(res->target, ioa_cfg->array_ids);
1233	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1234		clear_bit(res->target, ioa_cfg->vset_ids);
1235	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1236		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1237			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1238				return;
1239		clear_bit(res->target, ioa_cfg->target_ids);
1240
1241	} else if (res->bus == 0)
1242		clear_bit(res->target, ioa_cfg->target_ids);
1243}
1244
1245/**
1246 * ipr_handle_config_change - Handle a config change from the adapter
1247 * @ioa_cfg:	ioa config struct
1248 * @hostrcb:	hostrcb
1249 *
1250 * Return value:
1251 * 	none
1252 **/
1253static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1254				     struct ipr_hostrcb *hostrcb)
1255{
1256	struct ipr_resource_entry *res = NULL;
1257	struct ipr_config_table_entry_wrapper cfgtew;
1258	__be32 cc_res_handle;
1259
1260	u32 is_ndn = 1;
1261
1262	if (ioa_cfg->sis64) {
1263		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1264		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1265	} else {
1266		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1267		cc_res_handle = cfgtew.u.cfgte->res_handle;
1268	}
1269
1270	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1271		if (res->res_handle == cc_res_handle) {
1272			is_ndn = 0;
1273			break;
1274		}
1275	}
1276
1277	if (is_ndn) {
1278		if (list_empty(&ioa_cfg->free_res_q)) {
1279			ipr_send_hcam(ioa_cfg,
1280				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1281				      hostrcb);
1282			return;
1283		}
1284
1285		res = list_entry(ioa_cfg->free_res_q.next,
1286				 struct ipr_resource_entry, queue);
1287
1288		list_del(&res->queue);
1289		ipr_init_res_entry(res, &cfgtew);
1290		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1291	}
1292
1293	ipr_update_res_entry(res, &cfgtew);
1294
1295	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1296		if (res->sdev) {
1297			res->del_from_ml = 1;
1298			res->res_handle = IPR_INVALID_RES_HANDLE;
1299			if (ioa_cfg->allow_ml_add_del)
1300				schedule_work(&ioa_cfg->work_q);
1301		} else {
1302			ipr_clear_res_target(res);
1303			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1304		}
1305	} else if (!res->sdev || res->del_from_ml) {
1306		res->add_to_ml = 1;
1307		if (ioa_cfg->allow_ml_add_del)
1308			schedule_work(&ioa_cfg->work_q);
1309	}
1310
1311	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1312}
1313
1314/**
1315 * ipr_process_ccn - Op done function for a CCN.
1316 * @ipr_cmd:	ipr command struct
1317 *
1318 * This function is the op done function for a configuration
1319 * change notification host controlled async from the adapter.
1320 *
1321 * Return value:
1322 * 	none
1323 **/
1324static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1325{
1326	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1327	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1328	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1329
1330	list_del(&hostrcb->queue);
1331	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1332
1333	if (ioasc) {
1334		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1335			dev_err(&ioa_cfg->pdev->dev,
1336				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1337
1338		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1339	} else {
1340		ipr_handle_config_change(ioa_cfg, hostrcb);
1341	}
1342}
1343
1344/**
1345 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1346 * @i:		index into buffer
1347 * @buf:		string to modify
1348 *
1349 * This function will strip all trailing whitespace, pad the end
1350 * of the string with a single space, and NULL terminate the string.
1351 *
1352 * Return value:
1353 * 	new length of string
1354 **/
1355static int strip_and_pad_whitespace(int i, char *buf)
1356{
1357	while (i && buf[i] == ' ')
1358		i--;
1359	buf[i+1] = ' ';
1360	buf[i+2] = '\0';
1361	return i + 2;
1362}
1363
1364/**
1365 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1366 * @prefix:		string to print at start of printk
1367 * @hostrcb:	hostrcb pointer
1368 * @vpd:		vendor/product id/sn struct
1369 *
1370 * Return value:
1371 * 	none
1372 **/
1373static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1374				struct ipr_vpd *vpd)
1375{
1376	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1377	int i = 0;
1378
1379	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1380	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1381
1382	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1383	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1384
1385	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1386	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1387
1388	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1389}
1390
1391/**
1392 * ipr_log_vpd - Log the passed VPD to the error log.
1393 * @vpd:		vendor/product id/sn struct
1394 *
1395 * Return value:
1396 * 	none
1397 **/
1398static void ipr_log_vpd(struct ipr_vpd *vpd)
1399{
1400	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1401		    + IPR_SERIAL_NUM_LEN];
1402
1403	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1404	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1405	       IPR_PROD_ID_LEN);
1406	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1407	ipr_err("Vendor/Product ID: %s\n", buffer);
1408
1409	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1410	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1411	ipr_err("    Serial Number: %s\n", buffer);
1412}
1413
1414/**
1415 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1416 * @prefix:		string to print at start of printk
1417 * @hostrcb:	hostrcb pointer
1418 * @vpd:		vendor/product id/sn/wwn struct
1419 *
1420 * Return value:
1421 * 	none
1422 **/
1423static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1424				    struct ipr_ext_vpd *vpd)
1425{
1426	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1427	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1428		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1429}
1430
1431/**
1432 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1433 * @vpd:		vendor/product id/sn/wwn struct
1434 *
1435 * Return value:
1436 * 	none
1437 **/
1438static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1439{
1440	ipr_log_vpd(&vpd->vpd);
1441	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1442		be32_to_cpu(vpd->wwid[1]));
1443}
1444
1445/**
1446 * ipr_log_enhanced_cache_error - Log a cache error.
1447 * @ioa_cfg:	ioa config struct
1448 * @hostrcb:	hostrcb struct
1449 *
1450 * Return value:
1451 * 	none
1452 **/
1453static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1454					 struct ipr_hostrcb *hostrcb)
1455{
1456	struct ipr_hostrcb_type_12_error *error;
1457
1458	if (ioa_cfg->sis64)
1459		error = &hostrcb->hcam.u.error64.u.type_12_error;
1460	else
1461		error = &hostrcb->hcam.u.error.u.type_12_error;
1462
1463	ipr_err("-----Current Configuration-----\n");
1464	ipr_err("Cache Directory Card Information:\n");
1465	ipr_log_ext_vpd(&error->ioa_vpd);
1466	ipr_err("Adapter Card Information:\n");
1467	ipr_log_ext_vpd(&error->cfc_vpd);
1468
1469	ipr_err("-----Expected Configuration-----\n");
1470	ipr_err("Cache Directory Card Information:\n");
1471	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1472	ipr_err("Adapter Card Information:\n");
1473	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1474
1475	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1476		     be32_to_cpu(error->ioa_data[0]),
1477		     be32_to_cpu(error->ioa_data[1]),
1478		     be32_to_cpu(error->ioa_data[2]));
1479}
1480
1481/**
1482 * ipr_log_cache_error - Log a cache error.
1483 * @ioa_cfg:	ioa config struct
1484 * @hostrcb:	hostrcb struct
1485 *
1486 * Return value:
1487 * 	none
1488 **/
1489static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1490				struct ipr_hostrcb *hostrcb)
1491{
1492	struct ipr_hostrcb_type_02_error *error =
1493		&hostrcb->hcam.u.error.u.type_02_error;
1494
1495	ipr_err("-----Current Configuration-----\n");
1496	ipr_err("Cache Directory Card Information:\n");
1497	ipr_log_vpd(&error->ioa_vpd);
1498	ipr_err("Adapter Card Information:\n");
1499	ipr_log_vpd(&error->cfc_vpd);
1500
1501	ipr_err("-----Expected Configuration-----\n");
1502	ipr_err("Cache Directory Card Information:\n");
1503	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1504	ipr_err("Adapter Card Information:\n");
1505	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1506
1507	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1508		     be32_to_cpu(error->ioa_data[0]),
1509		     be32_to_cpu(error->ioa_data[1]),
1510		     be32_to_cpu(error->ioa_data[2]));
1511}
1512
1513/**
1514 * ipr_log_enhanced_config_error - Log a configuration error.
1515 * @ioa_cfg:	ioa config struct
1516 * @hostrcb:	hostrcb struct
1517 *
1518 * Return value:
1519 * 	none
1520 **/
1521static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1522					  struct ipr_hostrcb *hostrcb)
1523{
1524	int errors_logged, i;
1525	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1526	struct ipr_hostrcb_type_13_error *error;
1527
1528	error = &hostrcb->hcam.u.error.u.type_13_error;
1529	errors_logged = be32_to_cpu(error->errors_logged);
1530
1531	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1532		be32_to_cpu(error->errors_detected), errors_logged);
1533
1534	dev_entry = error->dev;
1535
1536	for (i = 0; i < errors_logged; i++, dev_entry++) {
1537		ipr_err_separator;
1538
1539		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1540		ipr_log_ext_vpd(&dev_entry->vpd);
1541
1542		ipr_err("-----New Device Information-----\n");
1543		ipr_log_ext_vpd(&dev_entry->new_vpd);
1544
1545		ipr_err("Cache Directory Card Information:\n");
1546		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1547
1548		ipr_err("Adapter Card Information:\n");
1549		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1550	}
1551}
1552
1553/**
1554 * ipr_log_sis64_config_error - Log a device error.
1555 * @ioa_cfg:	ioa config struct
1556 * @hostrcb:	hostrcb struct
1557 *
1558 * Return value:
1559 * 	none
1560 **/
1561static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1562				       struct ipr_hostrcb *hostrcb)
1563{
1564	int errors_logged, i;
1565	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1566	struct ipr_hostrcb_type_23_error *error;
1567	char buffer[IPR_MAX_RES_PATH_LENGTH];
1568
1569	error = &hostrcb->hcam.u.error64.u.type_23_error;
1570	errors_logged = be32_to_cpu(error->errors_logged);
1571
1572	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1573		be32_to_cpu(error->errors_detected), errors_logged);
1574
1575	dev_entry = error->dev;
1576
1577	for (i = 0; i < errors_logged; i++, dev_entry++) {
1578		ipr_err_separator;
1579
1580		ipr_err("Device %d : %s", i + 1,
1581			 ipr_format_res_path(dev_entry->res_path, buffer,
1582					     sizeof(buffer)));
1583		ipr_log_ext_vpd(&dev_entry->vpd);
1584
1585		ipr_err("-----New Device Information-----\n");
1586		ipr_log_ext_vpd(&dev_entry->new_vpd);
1587
1588		ipr_err("Cache Directory Card Information:\n");
1589		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1590
1591		ipr_err("Adapter Card Information:\n");
1592		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1593	}
1594}
1595
1596/**
1597 * ipr_log_config_error - Log a configuration error.
1598 * @ioa_cfg:	ioa config struct
1599 * @hostrcb:	hostrcb struct
1600 *
1601 * Return value:
1602 * 	none
1603 **/
1604static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1605				 struct ipr_hostrcb *hostrcb)
1606{
1607	int errors_logged, i;
1608	struct ipr_hostrcb_device_data_entry *dev_entry;
1609	struct ipr_hostrcb_type_03_error *error;
1610
1611	error = &hostrcb->hcam.u.error.u.type_03_error;
1612	errors_logged = be32_to_cpu(error->errors_logged);
1613
1614	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1615		be32_to_cpu(error->errors_detected), errors_logged);
1616
1617	dev_entry = error->dev;
1618
1619	for (i = 0; i < errors_logged; i++, dev_entry++) {
1620		ipr_err_separator;
1621
1622		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1623		ipr_log_vpd(&dev_entry->vpd);
1624
1625		ipr_err("-----New Device Information-----\n");
1626		ipr_log_vpd(&dev_entry->new_vpd);
1627
1628		ipr_err("Cache Directory Card Information:\n");
1629		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1630
1631		ipr_err("Adapter Card Information:\n");
1632		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1633
1634		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1635			be32_to_cpu(dev_entry->ioa_data[0]),
1636			be32_to_cpu(dev_entry->ioa_data[1]),
1637			be32_to_cpu(dev_entry->ioa_data[2]),
1638			be32_to_cpu(dev_entry->ioa_data[3]),
1639			be32_to_cpu(dev_entry->ioa_data[4]));
1640	}
1641}
1642
1643/**
1644 * ipr_log_enhanced_array_error - Log an array configuration error.
1645 * @ioa_cfg:	ioa config struct
1646 * @hostrcb:	hostrcb struct
1647 *
1648 * Return value:
1649 * 	none
1650 **/
1651static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1652					 struct ipr_hostrcb *hostrcb)
1653{
1654	int i, num_entries;
1655	struct ipr_hostrcb_type_14_error *error;
1656	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1657	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1658
1659	error = &hostrcb->hcam.u.error.u.type_14_error;
1660
1661	ipr_err_separator;
1662
1663	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1664		error->protection_level,
1665		ioa_cfg->host->host_no,
1666		error->last_func_vset_res_addr.bus,
1667		error->last_func_vset_res_addr.target,
1668		error->last_func_vset_res_addr.lun);
1669
1670	ipr_err_separator;
1671
1672	array_entry = error->array_member;
1673	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1674			    ARRAY_SIZE(error->array_member));
1675
1676	for (i = 0; i < num_entries; i++, array_entry++) {
1677		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1678			continue;
1679
1680		if (be32_to_cpu(error->exposed_mode_adn) == i)
1681			ipr_err("Exposed Array Member %d:\n", i);
1682		else
1683			ipr_err("Array Member %d:\n", i);
1684
1685		ipr_log_ext_vpd(&array_entry->vpd);
1686		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1687		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1688				 "Expected Location");
1689
1690		ipr_err_separator;
1691	}
1692}
1693
1694/**
1695 * ipr_log_array_error - Log an array configuration error.
1696 * @ioa_cfg:	ioa config struct
1697 * @hostrcb:	hostrcb struct
1698 *
1699 * Return value:
1700 * 	none
1701 **/
1702static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1703				struct ipr_hostrcb *hostrcb)
1704{
1705	int i;
1706	struct ipr_hostrcb_type_04_error *error;
1707	struct ipr_hostrcb_array_data_entry *array_entry;
1708	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1709
1710	error = &hostrcb->hcam.u.error.u.type_04_error;
1711
1712	ipr_err_separator;
1713
1714	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1715		error->protection_level,
1716		ioa_cfg->host->host_no,
1717		error->last_func_vset_res_addr.bus,
1718		error->last_func_vset_res_addr.target,
1719		error->last_func_vset_res_addr.lun);
1720
1721	ipr_err_separator;
1722
1723	array_entry = error->array_member;
1724
1725	for (i = 0; i < 18; i++) {
1726		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1727			continue;
1728
1729		if (be32_to_cpu(error->exposed_mode_adn) == i)
1730			ipr_err("Exposed Array Member %d:\n", i);
1731		else
1732			ipr_err("Array Member %d:\n", i);
1733
1734		ipr_log_vpd(&array_entry->vpd);
1735
1736		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1737		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1738				 "Expected Location");
1739
1740		ipr_err_separator;
1741
1742		if (i == 9)
1743			array_entry = error->array_member2;
1744		else
1745			array_entry++;
1746	}
1747}
1748
1749/**
1750 * ipr_log_hex_data - Log additional hex IOA error data.
1751 * @ioa_cfg:	ioa config struct
1752 * @data:		IOA error data
1753 * @len:		data length
1754 *
1755 * Return value:
1756 * 	none
1757 **/
1758static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1759{
1760	int i;
1761
1762	if (len == 0)
1763		return;
1764
1765	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1766		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1767
1768	for (i = 0; i < len / 4; i += 4) {
1769		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1770			be32_to_cpu(data[i]),
1771			be32_to_cpu(data[i+1]),
1772			be32_to_cpu(data[i+2]),
1773			be32_to_cpu(data[i+3]));
1774	}
1775}
1776
1777/**
1778 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1779 * @ioa_cfg:	ioa config struct
1780 * @hostrcb:	hostrcb struct
1781 *
1782 * Return value:
1783 * 	none
1784 **/
1785static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1786					    struct ipr_hostrcb *hostrcb)
1787{
1788	struct ipr_hostrcb_type_17_error *error;
1789
1790	if (ioa_cfg->sis64)
1791		error = &hostrcb->hcam.u.error64.u.type_17_error;
1792	else
1793		error = &hostrcb->hcam.u.error.u.type_17_error;
1794
1795	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1796	strim(error->failure_reason);
1797
1798	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1799		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1800	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1801	ipr_log_hex_data(ioa_cfg, error->data,
1802			 be32_to_cpu(hostrcb->hcam.length) -
1803			 (offsetof(struct ipr_hostrcb_error, u) +
1804			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1805}
1806
1807/**
1808 * ipr_log_dual_ioa_error - Log a dual adapter error.
1809 * @ioa_cfg:	ioa config struct
1810 * @hostrcb:	hostrcb struct
1811 *
1812 * Return value:
1813 * 	none
1814 **/
1815static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1816				   struct ipr_hostrcb *hostrcb)
1817{
1818	struct ipr_hostrcb_type_07_error *error;
1819
1820	error = &hostrcb->hcam.u.error.u.type_07_error;
1821	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1822	strim(error->failure_reason);
1823
1824	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1825		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1826	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1827	ipr_log_hex_data(ioa_cfg, error->data,
1828			 be32_to_cpu(hostrcb->hcam.length) -
1829			 (offsetof(struct ipr_hostrcb_error, u) +
1830			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1831}
1832
1833static const struct {
1834	u8 active;
1835	char *desc;
1836} path_active_desc[] = {
1837	{ IPR_PATH_NO_INFO, "Path" },
1838	{ IPR_PATH_ACTIVE, "Active path" },
1839	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1840};
1841
1842static const struct {
1843	u8 state;
1844	char *desc;
1845} path_state_desc[] = {
1846	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1847	{ IPR_PATH_HEALTHY, "is healthy" },
1848	{ IPR_PATH_DEGRADED, "is degraded" },
1849	{ IPR_PATH_FAILED, "is failed" }
1850};
1851
1852/**
1853 * ipr_log_fabric_path - Log a fabric path error
1854 * @hostrcb:	hostrcb struct
1855 * @fabric:		fabric descriptor
1856 *
1857 * Return value:
1858 * 	none
1859 **/
1860static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1861				struct ipr_hostrcb_fabric_desc *fabric)
1862{
1863	int i, j;
1864	u8 path_state = fabric->path_state;
1865	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1866	u8 state = path_state & IPR_PATH_STATE_MASK;
1867
1868	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1869		if (path_active_desc[i].active != active)
1870			continue;
1871
1872		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1873			if (path_state_desc[j].state != state)
1874				continue;
1875
1876			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1877				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1878					     path_active_desc[i].desc, path_state_desc[j].desc,
1879					     fabric->ioa_port);
1880			} else if (fabric->cascaded_expander == 0xff) {
1881				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1882					     path_active_desc[i].desc, path_state_desc[j].desc,
1883					     fabric->ioa_port, fabric->phy);
1884			} else if (fabric->phy == 0xff) {
1885				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1886					     path_active_desc[i].desc, path_state_desc[j].desc,
1887					     fabric->ioa_port, fabric->cascaded_expander);
1888			} else {
1889				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1890					     path_active_desc[i].desc, path_state_desc[j].desc,
1891					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1892			}
1893			return;
1894		}
1895	}
1896
1897	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1898		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1899}
1900
1901/**
1902 * ipr_log64_fabric_path - Log a fabric path error
1903 * @hostrcb:	hostrcb struct
1904 * @fabric:		fabric descriptor
1905 *
1906 * Return value:
1907 * 	none
1908 **/
1909static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1910				  struct ipr_hostrcb64_fabric_desc *fabric)
1911{
1912	int i, j;
1913	u8 path_state = fabric->path_state;
1914	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1915	u8 state = path_state & IPR_PATH_STATE_MASK;
1916	char buffer[IPR_MAX_RES_PATH_LENGTH];
1917
1918	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1919		if (path_active_desc[i].active != active)
1920			continue;
1921
1922		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1923			if (path_state_desc[j].state != state)
1924				continue;
1925
1926			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1927				     path_active_desc[i].desc, path_state_desc[j].desc,
1928				     ipr_format_res_path(fabric->res_path, buffer,
1929							 sizeof(buffer)));
1930			return;
1931		}
1932	}
1933
1934	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1935		ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1936}
1937
1938static const struct {
1939	u8 type;
1940	char *desc;
1941} path_type_desc[] = {
1942	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1943	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1944	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1945	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1946};
1947
1948static const struct {
1949	u8 status;
1950	char *desc;
1951} path_status_desc[] = {
1952	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1953	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1954	{ IPR_PATH_CFG_FAILED, "Failed" },
1955	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1956	{ IPR_PATH_NOT_DETECTED, "Missing" },
1957	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1958};
1959
1960static const char *link_rate[] = {
1961	"unknown",
1962	"disabled",
1963	"phy reset problem",
1964	"spinup hold",
1965	"port selector",
1966	"unknown",
1967	"unknown",
1968	"unknown",
1969	"1.5Gbps",
1970	"3.0Gbps",
1971	"unknown",
1972	"unknown",
1973	"unknown",
1974	"unknown",
1975	"unknown",
1976	"unknown"
1977};
1978
1979/**
1980 * ipr_log_path_elem - Log a fabric path element.
1981 * @hostrcb:	hostrcb struct
1982 * @cfg:		fabric path element struct
1983 *
1984 * Return value:
1985 * 	none
1986 **/
1987static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1988			      struct ipr_hostrcb_config_element *cfg)
1989{
1990	int i, j;
1991	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1992	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1993
1994	if (type == IPR_PATH_CFG_NOT_EXIST)
1995		return;
1996
1997	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1998		if (path_type_desc[i].type != type)
1999			continue;
2000
2001		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2002			if (path_status_desc[j].status != status)
2003				continue;
2004
2005			if (type == IPR_PATH_CFG_IOA_PORT) {
2006				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2007					     path_status_desc[j].desc, path_type_desc[i].desc,
2008					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2009					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2010			} else {
2011				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2012					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2013						     path_status_desc[j].desc, path_type_desc[i].desc,
2014						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2015						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2016				} else if (cfg->cascaded_expander == 0xff) {
2017					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2018						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2019						     path_type_desc[i].desc, cfg->phy,
2020						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2021						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2022				} else if (cfg->phy == 0xff) {
2023					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2024						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2025						     path_type_desc[i].desc, cfg->cascaded_expander,
2026						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2027						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2028				} else {
2029					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2030						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2031						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2032						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2033						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2034				}
2035			}
2036			return;
2037		}
2038	}
2039
2040	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2041		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2042		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2043		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2044}
2045
2046/**
2047 * ipr_log64_path_elem - Log a fabric path element.
2048 * @hostrcb:	hostrcb struct
2049 * @cfg:		fabric path element struct
2050 *
2051 * Return value:
2052 * 	none
2053 **/
2054static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2055				struct ipr_hostrcb64_config_element *cfg)
2056{
2057	int i, j;
2058	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2059	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2060	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2061	char buffer[IPR_MAX_RES_PATH_LENGTH];
2062
2063	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2064		return;
2065
2066	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2067		if (path_type_desc[i].type != type)
2068			continue;
2069
2070		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2071			if (path_status_desc[j].status != status)
2072				continue;
2073
2074			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2075				     path_status_desc[j].desc, path_type_desc[i].desc,
2076				     ipr_format_res_path(cfg->res_path, buffer,
2077							 sizeof(buffer)),
2078				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2079				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2080			return;
2081		}
2082	}
2083	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2084		     "WWN=%08X%08X\n", cfg->type_status,
2085		     ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2086		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2087		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2088}
2089
2090/**
2091 * ipr_log_fabric_error - Log a fabric error.
2092 * @ioa_cfg:	ioa config struct
2093 * @hostrcb:	hostrcb struct
2094 *
2095 * Return value:
2096 * 	none
2097 **/
2098static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2099				 struct ipr_hostrcb *hostrcb)
2100{
2101	struct ipr_hostrcb_type_20_error *error;
2102	struct ipr_hostrcb_fabric_desc *fabric;
2103	struct ipr_hostrcb_config_element *cfg;
2104	int i, add_len;
2105
2106	error = &hostrcb->hcam.u.error.u.type_20_error;
2107	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2108	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2109
2110	add_len = be32_to_cpu(hostrcb->hcam.length) -
2111		(offsetof(struct ipr_hostrcb_error, u) +
2112		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2113
2114	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2115		ipr_log_fabric_path(hostrcb, fabric);
2116		for_each_fabric_cfg(fabric, cfg)
2117			ipr_log_path_elem(hostrcb, cfg);
2118
2119		add_len -= be16_to_cpu(fabric->length);
2120		fabric = (struct ipr_hostrcb_fabric_desc *)
2121			((unsigned long)fabric + be16_to_cpu(fabric->length));
2122	}
2123
2124	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2125}
2126
2127/**
2128 * ipr_log_sis64_array_error - Log a sis64 array error.
2129 * @ioa_cfg:	ioa config struct
2130 * @hostrcb:	hostrcb struct
2131 *
2132 * Return value:
2133 * 	none
2134 **/
2135static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2136				      struct ipr_hostrcb *hostrcb)
2137{
2138	int i, num_entries;
2139	struct ipr_hostrcb_type_24_error *error;
2140	struct ipr_hostrcb64_array_data_entry *array_entry;
2141	char buffer[IPR_MAX_RES_PATH_LENGTH];
2142	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2143
2144	error = &hostrcb->hcam.u.error64.u.type_24_error;
2145
2146	ipr_err_separator;
2147
2148	ipr_err("RAID %s Array Configuration: %s\n",
2149		error->protection_level,
2150		ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2151
2152	ipr_err_separator;
2153
2154	array_entry = error->array_member;
2155	num_entries = min_t(u32, error->num_entries,
2156			    ARRAY_SIZE(error->array_member));
2157
2158	for (i = 0; i < num_entries; i++, array_entry++) {
2159
2160		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2161			continue;
2162
2163		if (error->exposed_mode_adn == i)
2164			ipr_err("Exposed Array Member %d:\n", i);
2165		else
2166			ipr_err("Array Member %d:\n", i);
2167
2168		ipr_err("Array Member %d:\n", i);
2169		ipr_log_ext_vpd(&array_entry->vpd);
2170		ipr_err("Current Location: %s\n",
2171			 ipr_format_res_path(array_entry->res_path, buffer,
2172					     sizeof(buffer)));
2173		ipr_err("Expected Location: %s\n",
2174			 ipr_format_res_path(array_entry->expected_res_path,
2175					     buffer, sizeof(buffer)));
2176
2177		ipr_err_separator;
2178	}
2179}
2180
2181/**
2182 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2183 * @ioa_cfg:	ioa config struct
2184 * @hostrcb:	hostrcb struct
2185 *
2186 * Return value:
2187 * 	none
2188 **/
2189static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2190				       struct ipr_hostrcb *hostrcb)
2191{
2192	struct ipr_hostrcb_type_30_error *error;
2193	struct ipr_hostrcb64_fabric_desc *fabric;
2194	struct ipr_hostrcb64_config_element *cfg;
2195	int i, add_len;
2196
2197	error = &hostrcb->hcam.u.error64.u.type_30_error;
2198
2199	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2200	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2201
2202	add_len = be32_to_cpu(hostrcb->hcam.length) -
2203		(offsetof(struct ipr_hostrcb64_error, u) +
2204		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2205
2206	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2207		ipr_log64_fabric_path(hostrcb, fabric);
2208		for_each_fabric_cfg(fabric, cfg)
2209			ipr_log64_path_elem(hostrcb, cfg);
2210
2211		add_len -= be16_to_cpu(fabric->length);
2212		fabric = (struct ipr_hostrcb64_fabric_desc *)
2213			((unsigned long)fabric + be16_to_cpu(fabric->length));
2214	}
2215
2216	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2217}
2218
2219/**
2220 * ipr_log_generic_error - Log an adapter error.
2221 * @ioa_cfg:	ioa config struct
2222 * @hostrcb:	hostrcb struct
2223 *
2224 * Return value:
2225 * 	none
2226 **/
2227static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2228				  struct ipr_hostrcb *hostrcb)
2229{
2230	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2231			 be32_to_cpu(hostrcb->hcam.length));
2232}
2233
2234/**
2235 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2236 * @ioasc:	IOASC
2237 *
2238 * This function will return the index of into the ipr_error_table
2239 * for the specified IOASC. If the IOASC is not in the table,
2240 * 0 will be returned, which points to the entry used for unknown errors.
2241 *
2242 * Return value:
2243 * 	index into the ipr_error_table
2244 **/
2245static u32 ipr_get_error(u32 ioasc)
2246{
2247	int i;
2248
2249	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2250		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2251			return i;
2252
2253	return 0;
2254}
2255
2256/**
2257 * ipr_handle_log_data - Log an adapter error.
2258 * @ioa_cfg:	ioa config struct
2259 * @hostrcb:	hostrcb struct
2260 *
2261 * This function logs an adapter error to the system.
2262 *
2263 * Return value:
2264 * 	none
2265 **/
2266static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2267				struct ipr_hostrcb *hostrcb)
2268{
2269	u32 ioasc;
2270	int error_index;
2271
2272	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2273		return;
2274
2275	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2276		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2277
2278	if (ioa_cfg->sis64)
2279		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2280	else
2281		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2282
2283	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2284	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2285		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2286		scsi_report_bus_reset(ioa_cfg->host,
2287				      hostrcb->hcam.u.error.fd_res_addr.bus);
2288	}
2289
2290	error_index = ipr_get_error(ioasc);
2291
2292	if (!ipr_error_table[error_index].log_hcam)
2293		return;
2294
2295	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2296
2297	/* Set indication we have logged an error */
2298	ioa_cfg->errors_logged++;
2299
2300	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2301		return;
2302	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2303		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2304
2305	switch (hostrcb->hcam.overlay_id) {
2306	case IPR_HOST_RCB_OVERLAY_ID_2:
2307		ipr_log_cache_error(ioa_cfg, hostrcb);
2308		break;
2309	case IPR_HOST_RCB_OVERLAY_ID_3:
2310		ipr_log_config_error(ioa_cfg, hostrcb);
2311		break;
2312	case IPR_HOST_RCB_OVERLAY_ID_4:
2313	case IPR_HOST_RCB_OVERLAY_ID_6:
2314		ipr_log_array_error(ioa_cfg, hostrcb);
2315		break;
2316	case IPR_HOST_RCB_OVERLAY_ID_7:
2317		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2318		break;
2319	case IPR_HOST_RCB_OVERLAY_ID_12:
2320		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2321		break;
2322	case IPR_HOST_RCB_OVERLAY_ID_13:
2323		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2324		break;
2325	case IPR_HOST_RCB_OVERLAY_ID_14:
2326	case IPR_HOST_RCB_OVERLAY_ID_16:
2327		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2328		break;
2329	case IPR_HOST_RCB_OVERLAY_ID_17:
2330		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2331		break;
2332	case IPR_HOST_RCB_OVERLAY_ID_20:
2333		ipr_log_fabric_error(ioa_cfg, hostrcb);
2334		break;
2335	case IPR_HOST_RCB_OVERLAY_ID_23:
2336		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2337		break;
2338	case IPR_HOST_RCB_OVERLAY_ID_24:
2339	case IPR_HOST_RCB_OVERLAY_ID_26:
2340		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2341		break;
2342	case IPR_HOST_RCB_OVERLAY_ID_30:
2343		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2344		break;
2345	case IPR_HOST_RCB_OVERLAY_ID_1:
2346	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2347	default:
2348		ipr_log_generic_error(ioa_cfg, hostrcb);
2349		break;
2350	}
2351}
2352
2353/**
2354 * ipr_process_error - Op done function for an adapter error log.
2355 * @ipr_cmd:	ipr command struct
2356 *
2357 * This function is the op done function for an error log host
2358 * controlled async from the adapter. It will log the error and
2359 * send the HCAM back to the adapter.
2360 *
2361 * Return value:
2362 * 	none
2363 **/
2364static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2365{
2366	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2367	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2368	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2369	u32 fd_ioasc;
2370
2371	if (ioa_cfg->sis64)
2372		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2373	else
2374		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2375
2376	list_del(&hostrcb->queue);
2377	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2378
2379	if (!ioasc) {
2380		ipr_handle_log_data(ioa_cfg, hostrcb);
2381		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2382			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2383	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2384		dev_err(&ioa_cfg->pdev->dev,
2385			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2386	}
2387
2388	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2389}
2390
2391/**
2392 * ipr_timeout -  An internally generated op has timed out.
2393 * @ipr_cmd:	ipr command struct
2394 *
2395 * This function blocks host requests and initiates an
2396 * adapter reset.
2397 *
2398 * Return value:
2399 * 	none
2400 **/
2401static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2402{
2403	unsigned long lock_flags = 0;
2404	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2405
2406	ENTER;
2407	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2408
2409	ioa_cfg->errors_logged++;
2410	dev_err(&ioa_cfg->pdev->dev,
2411		"Adapter being reset due to command timeout.\n");
2412
2413	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2414		ioa_cfg->sdt_state = GET_DUMP;
2415
2416	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2417		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2418
2419	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2420	LEAVE;
2421}
2422
2423/**
2424 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2425 * @ipr_cmd:	ipr command struct
2426 *
2427 * This function blocks host requests and initiates an
2428 * adapter reset.
2429 *
2430 * Return value:
2431 * 	none
2432 **/
2433static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2434{
2435	unsigned long lock_flags = 0;
2436	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2437
2438	ENTER;
2439	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2440
2441	ioa_cfg->errors_logged++;
2442	dev_err(&ioa_cfg->pdev->dev,
2443		"Adapter timed out transitioning to operational.\n");
2444
2445	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2446		ioa_cfg->sdt_state = GET_DUMP;
2447
2448	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2449		if (ipr_fastfail)
2450			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2451		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2452	}
2453
2454	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2455	LEAVE;
2456}
2457
2458/**
2459 * ipr_reset_reload - Reset/Reload the IOA
2460 * @ioa_cfg:		ioa config struct
2461 * @shutdown_type:	shutdown type
2462 *
2463 * This function resets the adapter and re-initializes it.
2464 * This function assumes that all new host commands have been stopped.
2465 * Return value:
2466 * 	SUCCESS / FAILED
2467 **/
2468static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2469			    enum ipr_shutdown_type shutdown_type)
2470{
2471	if (!ioa_cfg->in_reset_reload)
2472		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2473
2474	spin_unlock_irq(ioa_cfg->host->host_lock);
2475	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2476	spin_lock_irq(ioa_cfg->host->host_lock);
2477
2478	/* If we got hit with a host reset while we were already resetting
2479	 the adapter for some reason, and the reset failed. */
2480	if (ioa_cfg->ioa_is_dead) {
2481		ipr_trace;
2482		return FAILED;
2483	}
2484
2485	return SUCCESS;
2486}
2487
2488/**
2489 * ipr_find_ses_entry - Find matching SES in SES table
2490 * @res:	resource entry struct of SES
2491 *
2492 * Return value:
2493 * 	pointer to SES table entry / NULL on failure
2494 **/
2495static const struct ipr_ses_table_entry *
2496ipr_find_ses_entry(struct ipr_resource_entry *res)
2497{
2498	int i, j, matches;
2499	struct ipr_std_inq_vpids *vpids;
2500	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2501
2502	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2503		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2504			if (ste->compare_product_id_byte[j] == 'X') {
2505				vpids = &res->std_inq_data.vpids;
2506				if (vpids->product_id[j] == ste->product_id[j])
2507					matches++;
2508				else
2509					break;
2510			} else
2511				matches++;
2512		}
2513
2514		if (matches == IPR_PROD_ID_LEN)
2515			return ste;
2516	}
2517
2518	return NULL;
2519}
2520
2521/**
2522 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2523 * @ioa_cfg:	ioa config struct
2524 * @bus:		SCSI bus
2525 * @bus_width:	bus width
2526 *
2527 * Return value:
2528 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2529 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2530 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2531 *	max 160MHz = max 320MB/sec).
2532 **/
2533static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2534{
2535	struct ipr_resource_entry *res;
2536	const struct ipr_ses_table_entry *ste;
2537	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2538
2539	/* Loop through each config table entry in the config table buffer */
2540	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2541		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2542			continue;
2543
2544		if (bus != res->bus)
2545			continue;
2546
2547		if (!(ste = ipr_find_ses_entry(res)))
2548			continue;
2549
2550		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2551	}
2552
2553	return max_xfer_rate;
2554}
2555
2556/**
2557 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2558 * @ioa_cfg:		ioa config struct
2559 * @max_delay:		max delay in micro-seconds to wait
2560 *
2561 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2562 *
2563 * Return value:
2564 * 	0 on success / other on failure
2565 **/
2566static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2567{
2568	volatile u32 pcii_reg;
2569	int delay = 1;
2570
2571	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2572	while (delay < max_delay) {
2573		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2574
2575		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2576			return 0;
2577
2578		/* udelay cannot be used if delay is more than a few milliseconds */
2579		if ((delay / 1000) > MAX_UDELAY_MS)
2580			mdelay(delay / 1000);
2581		else
2582			udelay(delay);
2583
2584		delay += delay;
2585	}
2586	return -EIO;
2587}
2588
2589/**
2590 * ipr_get_sis64_dump_data_section - Dump IOA memory
2591 * @ioa_cfg:			ioa config struct
2592 * @start_addr:			adapter address to dump
2593 * @dest:			destination kernel buffer
2594 * @length_in_words:		length to dump in 4 byte words
2595 *
2596 * Return value:
2597 * 	0 on success
2598 **/
2599static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2600					   u32 start_addr,
2601					   __be32 *dest, u32 length_in_words)
2602{
2603	int i;
2604
2605	for (i = 0; i < length_in_words; i++) {
2606		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2607		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2608		dest++;
2609	}
2610
2611	return 0;
2612}
2613
2614/**
2615 * ipr_get_ldump_data_section - Dump IOA memory
2616 * @ioa_cfg:			ioa config struct
2617 * @start_addr:			adapter address to dump
2618 * @dest:				destination kernel buffer
2619 * @length_in_words:	length to dump in 4 byte words
2620 *
2621 * Return value:
2622 * 	0 on success / -EIO on failure
2623 **/
2624static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2625				      u32 start_addr,
2626				      __be32 *dest, u32 length_in_words)
2627{
2628	volatile u32 temp_pcii_reg;
2629	int i, delay = 0;
2630
2631	if (ioa_cfg->sis64)
2632		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2633						       dest, length_in_words);
2634
2635	/* Write IOA interrupt reg starting LDUMP state  */
2636	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2637	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2638
2639	/* Wait for IO debug acknowledge */
2640	if (ipr_wait_iodbg_ack(ioa_cfg,
2641			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2642		dev_err(&ioa_cfg->pdev->dev,
2643			"IOA dump long data transfer timeout\n");
2644		return -EIO;
2645	}
2646
2647	/* Signal LDUMP interlocked - clear IO debug ack */
2648	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2649	       ioa_cfg->regs.clr_interrupt_reg);
2650
2651	/* Write Mailbox with starting address */
2652	writel(start_addr, ioa_cfg->ioa_mailbox);
2653
2654	/* Signal address valid - clear IOA Reset alert */
2655	writel(IPR_UPROCI_RESET_ALERT,
2656	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2657
2658	for (i = 0; i < length_in_words; i++) {
2659		/* Wait for IO debug acknowledge */
2660		if (ipr_wait_iodbg_ack(ioa_cfg,
2661				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2662			dev_err(&ioa_cfg->pdev->dev,
2663				"IOA dump short data transfer timeout\n");
2664			return -EIO;
2665		}
2666
2667		/* Read data from mailbox and increment destination pointer */
2668		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2669		dest++;
2670
2671		/* For all but the last word of data, signal data received */
2672		if (i < (length_in_words - 1)) {
2673			/* Signal dump data received - Clear IO debug Ack */
2674			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2675			       ioa_cfg->regs.clr_interrupt_reg);
2676		}
2677	}
2678
2679	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2680	writel(IPR_UPROCI_RESET_ALERT,
2681	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2682
2683	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2684	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2685
2686	/* Signal dump data received - Clear IO debug Ack */
2687	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2688	       ioa_cfg->regs.clr_interrupt_reg);
2689
2690	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2691	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2692		temp_pcii_reg =
2693		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2694
2695		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2696			return 0;
2697
2698		udelay(10);
2699		delay += 10;
2700	}
2701
2702	return 0;
2703}
2704
2705#ifdef CONFIG_SCSI_IPR_DUMP
2706/**
2707 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2708 * @ioa_cfg:		ioa config struct
2709 * @pci_address:	adapter address
2710 * @length:			length of data to copy
2711 *
2712 * Copy data from PCI adapter to kernel buffer.
2713 * Note: length MUST be a 4 byte multiple
2714 * Return value:
2715 * 	0 on success / other on failure
2716 **/
2717static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2718			unsigned long pci_address, u32 length)
2719{
2720	int bytes_copied = 0;
2721	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2722	__be32 *page;
2723	unsigned long lock_flags = 0;
2724	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2725
2726	if (ioa_cfg->sis64)
2727		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2728	else
2729		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2730
2731	while (bytes_copied < length &&
2732	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2733		if (ioa_dump->page_offset >= PAGE_SIZE ||
2734		    ioa_dump->page_offset == 0) {
2735			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2736
2737			if (!page) {
2738				ipr_trace;
2739				return bytes_copied;
2740			}
2741
2742			ioa_dump->page_offset = 0;
2743			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2744			ioa_dump->next_page_index++;
2745		} else
2746			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2747
2748		rem_len = length - bytes_copied;
2749		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2750		cur_len = min(rem_len, rem_page_len);
2751
2752		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2753		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2754			rc = -EIO;
2755		} else {
2756			rc = ipr_get_ldump_data_section(ioa_cfg,
2757							pci_address + bytes_copied,
2758							&page[ioa_dump->page_offset / 4],
2759							(cur_len / sizeof(u32)));
2760		}
2761		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2762
2763		if (!rc) {
2764			ioa_dump->page_offset += cur_len;
2765			bytes_copied += cur_len;
2766		} else {
2767			ipr_trace;
2768			break;
2769		}
2770		schedule();
2771	}
2772
2773	return bytes_copied;
2774}
2775
2776/**
2777 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2778 * @hdr:	dump entry header struct
2779 *
2780 * Return value:
2781 * 	nothing
2782 **/
2783static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2784{
2785	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2786	hdr->num_elems = 1;
2787	hdr->offset = sizeof(*hdr);
2788	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2789}
2790
2791/**
2792 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2793 * @ioa_cfg:	ioa config struct
2794 * @driver_dump:	driver dump struct
2795 *
2796 * Return value:
2797 * 	nothing
2798 **/
2799static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2800				   struct ipr_driver_dump *driver_dump)
2801{
2802	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2803
2804	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2805	driver_dump->ioa_type_entry.hdr.len =
2806		sizeof(struct ipr_dump_ioa_type_entry) -
2807		sizeof(struct ipr_dump_entry_header);
2808	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2809	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2810	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2811	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2812		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2813		ucode_vpd->minor_release[1];
2814	driver_dump->hdr.num_entries++;
2815}
2816
2817/**
2818 * ipr_dump_version_data - Fill in the driver version in the dump.
2819 * @ioa_cfg:	ioa config struct
2820 * @driver_dump:	driver dump struct
2821 *
2822 * Return value:
2823 * 	nothing
2824 **/
2825static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2826				  struct ipr_driver_dump *driver_dump)
2827{
2828	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2829	driver_dump->version_entry.hdr.len =
2830		sizeof(struct ipr_dump_version_entry) -
2831		sizeof(struct ipr_dump_entry_header);
2832	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2833	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2834	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2835	driver_dump->hdr.num_entries++;
2836}
2837
2838/**
2839 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2840 * @ioa_cfg:	ioa config struct
2841 * @driver_dump:	driver dump struct
2842 *
2843 * Return value:
2844 * 	nothing
2845 **/
2846static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2847				   struct ipr_driver_dump *driver_dump)
2848{
2849	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2850	driver_dump->trace_entry.hdr.len =
2851		sizeof(struct ipr_dump_trace_entry) -
2852		sizeof(struct ipr_dump_entry_header);
2853	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2854	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2855	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2856	driver_dump->hdr.num_entries++;
2857}
2858
2859/**
2860 * ipr_dump_location_data - Fill in the IOA location in the dump.
2861 * @ioa_cfg:	ioa config struct
2862 * @driver_dump:	driver dump struct
2863 *
2864 * Return value:
2865 * 	nothing
2866 **/
2867static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2868				   struct ipr_driver_dump *driver_dump)
2869{
2870	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2871	driver_dump->location_entry.hdr.len =
2872		sizeof(struct ipr_dump_location_entry) -
2873		sizeof(struct ipr_dump_entry_header);
2874	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2875	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2876	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2877	driver_dump->hdr.num_entries++;
2878}
2879
2880/**
2881 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2882 * @ioa_cfg:	ioa config struct
2883 * @dump:		dump struct
2884 *
2885 * Return value:
2886 * 	nothing
2887 **/
2888static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2889{
2890	unsigned long start_addr, sdt_word;
2891	unsigned long lock_flags = 0;
2892	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2893	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2894	u32 num_entries, max_num_entries, start_off, end_off;
2895	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2896	struct ipr_sdt *sdt;
2897	int valid = 1;
2898	int i;
2899
2900	ENTER;
2901
2902	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2903
2904	if (ioa_cfg->sdt_state != READ_DUMP) {
2905		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2906		return;
2907	}
2908
2909	if (ioa_cfg->sis64) {
2910		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2911		ssleep(IPR_DUMP_DELAY_SECONDS);
2912		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2913	}
2914
2915	start_addr = readl(ioa_cfg->ioa_mailbox);
2916
2917	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2918		dev_err(&ioa_cfg->pdev->dev,
2919			"Invalid dump table format: %lx\n", start_addr);
2920		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2921		return;
2922	}
2923
2924	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2925
2926	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2927
2928	/* Initialize the overall dump header */
2929	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2930	driver_dump->hdr.num_entries = 1;
2931	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2932	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2933	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2934	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2935
2936	ipr_dump_version_data(ioa_cfg, driver_dump);
2937	ipr_dump_location_data(ioa_cfg, driver_dump);
2938	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2939	ipr_dump_trace_data(ioa_cfg, driver_dump);
2940
2941	/* Update dump_header */
2942	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2943
2944	/* IOA Dump entry */
2945	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2946	ioa_dump->hdr.len = 0;
2947	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2948	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2949
2950	/* First entries in sdt are actually a list of dump addresses and
2951	 lengths to gather the real dump data.  sdt represents the pointer
2952	 to the ioa generated dump table.  Dump data will be extracted based
2953	 on entries in this table */
2954	sdt = &ioa_dump->sdt;
2955
2956	if (ioa_cfg->sis64) {
2957		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2958		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2959	} else {
2960		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2961		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2962	}
2963
2964	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
2965			(max_num_entries * sizeof(struct ipr_sdt_entry));
2966	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2967					bytes_to_copy / sizeof(__be32));
2968
2969	/* Smart Dump table is ready to use and the first entry is valid */
2970	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2971	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2972		dev_err(&ioa_cfg->pdev->dev,
2973			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2974			rc, be32_to_cpu(sdt->hdr.state));
2975		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2976		ioa_cfg->sdt_state = DUMP_OBTAINED;
2977		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2978		return;
2979	}
2980
2981	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2982
2983	if (num_entries > max_num_entries)
2984		num_entries = max_num_entries;
2985
2986	/* Update dump length to the actual data to be copied */
2987	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
2988	if (ioa_cfg->sis64)
2989		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
2990	else
2991		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
2992
2993	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2994
2995	for (i = 0; i < num_entries; i++) {
2996		if (ioa_dump->hdr.len > max_dump_size) {
2997			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2998			break;
2999		}
3000
3001		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3002			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3003			if (ioa_cfg->sis64)
3004				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3005			else {
3006				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3007				end_off = be32_to_cpu(sdt->entry[i].end_token);
3008
3009				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3010					bytes_to_copy = end_off - start_off;
3011				else
3012					valid = 0;
3013			}
3014			if (valid) {
3015				if (bytes_to_copy > max_dump_size) {
3016					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3017					continue;
3018				}
3019
3020				/* Copy data from adapter to driver buffers */
3021				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3022							    bytes_to_copy);
3023
3024				ioa_dump->hdr.len += bytes_copied;
3025
3026				if (bytes_copied != bytes_to_copy) {
3027					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3028					break;
3029				}
3030			}
3031		}
3032	}
3033
3034	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3035
3036	/* Update dump_header */
3037	driver_dump->hdr.len += ioa_dump->hdr.len;
3038	wmb();
3039	ioa_cfg->sdt_state = DUMP_OBTAINED;
3040	LEAVE;
3041}
3042
3043#else
3044#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3045#endif
3046
3047/**
3048 * ipr_release_dump - Free adapter dump memory
3049 * @kref:	kref struct
3050 *
3051 * Return value:
3052 *	nothing
3053 **/
3054static void ipr_release_dump(struct kref *kref)
3055{
3056	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3057	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3058	unsigned long lock_flags = 0;
3059	int i;
3060
3061	ENTER;
3062	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3063	ioa_cfg->dump = NULL;
3064	ioa_cfg->sdt_state = INACTIVE;
3065	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066
3067	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3068		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3069
3070	vfree(dump->ioa_dump.ioa_data);
3071	kfree(dump);
3072	LEAVE;
3073}
3074
3075/**
3076 * ipr_worker_thread - Worker thread
3077 * @work:		ioa config struct
3078 *
3079 * Called at task level from a work thread. This function takes care
3080 * of adding and removing device from the mid-layer as configuration
3081 * changes are detected by the adapter.
3082 *
3083 * Return value:
3084 * 	nothing
3085 **/
3086static void ipr_worker_thread(struct work_struct *work)
3087{
3088	unsigned long lock_flags;
3089	struct ipr_resource_entry *res;
3090	struct scsi_device *sdev;
3091	struct ipr_dump *dump;
3092	struct ipr_ioa_cfg *ioa_cfg =
3093		container_of(work, struct ipr_ioa_cfg, work_q);
3094	u8 bus, target, lun;
3095	int did_work;
3096
3097	ENTER;
3098	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3099
3100	if (ioa_cfg->sdt_state == READ_DUMP) {
3101		dump = ioa_cfg->dump;
3102		if (!dump) {
3103			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3104			return;
3105		}
3106		kref_get(&dump->kref);
3107		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3108		ipr_get_ioa_dump(ioa_cfg, dump);
3109		kref_put(&dump->kref, ipr_release_dump);
3110
3111		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3112		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3113			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3114		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3115		return;
3116	}
3117
3118restart:
3119	do {
3120		did_work = 0;
3121		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3122			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3123			return;
3124		}
3125
3126		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3127			if (res->del_from_ml && res->sdev) {
3128				did_work = 1;
3129				sdev = res->sdev;
3130				if (!scsi_device_get(sdev)) {
3131					if (!res->add_to_ml)
3132						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3133					else
3134						res->del_from_ml = 0;
3135					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3136					scsi_remove_device(sdev);
3137					scsi_device_put(sdev);
3138					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139				}
3140				break;
3141			}
3142		}
3143	} while(did_work);
3144
3145	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3146		if (res->add_to_ml) {
3147			bus = res->bus;
3148			target = res->target;
3149			lun = res->lun;
3150			res->add_to_ml = 0;
3151			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3152			scsi_add_device(ioa_cfg->host, bus, target, lun);
3153			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3154			goto restart;
3155		}
3156	}
3157
3158	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3159	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3160	LEAVE;
3161}
3162
3163#ifdef CONFIG_SCSI_IPR_TRACE
3164/**
3165 * ipr_read_trace - Dump the adapter trace
3166 * @filp:		open sysfs file
3167 * @kobj:		kobject struct
3168 * @bin_attr:		bin_attribute struct
3169 * @buf:		buffer
3170 * @off:		offset
3171 * @count:		buffer size
3172 *
3173 * Return value:
3174 *	number of bytes printed to buffer
3175 **/
3176static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3177			      struct bin_attribute *bin_attr,
3178			      char *buf, loff_t off, size_t count)
3179{
3180	struct device *dev = container_of(kobj, struct device, kobj);
3181	struct Scsi_Host *shost = class_to_shost(dev);
3182	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3183	unsigned long lock_flags = 0;
3184	ssize_t ret;
3185
3186	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3187	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3188				IPR_TRACE_SIZE);
3189	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190
3191	return ret;
3192}
3193
3194static struct bin_attribute ipr_trace_attr = {
3195	.attr =	{
3196		.name = "trace",
3197		.mode = S_IRUGO,
3198	},
3199	.size = 0,
3200	.read = ipr_read_trace,
3201};
3202#endif
3203
3204/**
3205 * ipr_show_fw_version - Show the firmware version
3206 * @dev:	class device struct
3207 * @buf:	buffer
3208 *
3209 * Return value:
3210 *	number of bytes printed to buffer
3211 **/
3212static ssize_t ipr_show_fw_version(struct device *dev,
3213				   struct device_attribute *attr, char *buf)
3214{
3215	struct Scsi_Host *shost = class_to_shost(dev);
3216	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3217	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3218	unsigned long lock_flags = 0;
3219	int len;
3220
3221	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3223		       ucode_vpd->major_release, ucode_vpd->card_type,
3224		       ucode_vpd->minor_release[0],
3225		       ucode_vpd->minor_release[1]);
3226	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227	return len;
3228}
3229
3230static struct device_attribute ipr_fw_version_attr = {
3231	.attr = {
3232		.name =		"fw_version",
3233		.mode =		S_IRUGO,
3234	},
3235	.show = ipr_show_fw_version,
3236};
3237
3238/**
3239 * ipr_show_log_level - Show the adapter's error logging level
3240 * @dev:	class device struct
3241 * @buf:	buffer
3242 *
3243 * Return value:
3244 * 	number of bytes printed to buffer
3245 **/
3246static ssize_t ipr_show_log_level(struct device *dev,
3247				   struct device_attribute *attr, char *buf)
3248{
3249	struct Scsi_Host *shost = class_to_shost(dev);
3250	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3251	unsigned long lock_flags = 0;
3252	int len;
3253
3254	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3255	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3256	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257	return len;
3258}
3259
3260/**
3261 * ipr_store_log_level - Change the adapter's error logging level
3262 * @dev:	class device struct
3263 * @buf:	buffer
3264 *
3265 * Return value:
3266 * 	number of bytes printed to buffer
3267 **/
3268static ssize_t ipr_store_log_level(struct device *dev,
3269			           struct device_attribute *attr,
3270				   const char *buf, size_t count)
3271{
3272	struct Scsi_Host *shost = class_to_shost(dev);
3273	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3274	unsigned long lock_flags = 0;
3275
3276	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3277	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3278	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279	return strlen(buf);
3280}
3281
3282static struct device_attribute ipr_log_level_attr = {
3283	.attr = {
3284		.name =		"log_level",
3285		.mode =		S_IRUGO | S_IWUSR,
3286	},
3287	.show = ipr_show_log_level,
3288	.store = ipr_store_log_level
3289};
3290
3291/**
3292 * ipr_store_diagnostics - IOA Diagnostics interface
3293 * @dev:	device struct
3294 * @buf:	buffer
3295 * @count:	buffer size
3296 *
3297 * This function will reset the adapter and wait a reasonable
3298 * amount of time for any errors that the adapter might log.
3299 *
3300 * Return value:
3301 * 	count on success / other on failure
3302 **/
3303static ssize_t ipr_store_diagnostics(struct device *dev,
3304				     struct device_attribute *attr,
3305				     const char *buf, size_t count)
3306{
3307	struct Scsi_Host *shost = class_to_shost(dev);
3308	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3309	unsigned long lock_flags = 0;
3310	int rc = count;
3311
3312	if (!capable(CAP_SYS_ADMIN))
3313		return -EACCES;
3314
3315	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3316	while(ioa_cfg->in_reset_reload) {
3317		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3318		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3319		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3320	}
3321
3322	ioa_cfg->errors_logged = 0;
3323	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3324
3325	if (ioa_cfg->in_reset_reload) {
3326		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3327		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3328
3329		/* Wait for a second for any errors to be logged */
3330		msleep(1000);
3331	} else {
3332		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3333		return -EIO;
3334	}
3335
3336	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3337	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3338		rc = -EIO;
3339	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340
3341	return rc;
3342}
3343
3344static struct device_attribute ipr_diagnostics_attr = {
3345	.attr = {
3346		.name =		"run_diagnostics",
3347		.mode =		S_IWUSR,
3348	},
3349	.store = ipr_store_diagnostics
3350};
3351
3352/**
3353 * ipr_show_adapter_state - Show the adapter's state
3354 * @class_dev:	device struct
3355 * @buf:	buffer
3356 *
3357 * Return value:
3358 * 	number of bytes printed to buffer
3359 **/
3360static ssize_t ipr_show_adapter_state(struct device *dev,
3361				      struct device_attribute *attr, char *buf)
3362{
3363	struct Scsi_Host *shost = class_to_shost(dev);
3364	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3365	unsigned long lock_flags = 0;
3366	int len;
3367
3368	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3369	if (ioa_cfg->ioa_is_dead)
3370		len = snprintf(buf, PAGE_SIZE, "offline\n");
3371	else
3372		len = snprintf(buf, PAGE_SIZE, "online\n");
3373	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374	return len;
3375}
3376
3377/**
3378 * ipr_store_adapter_state - Change adapter state
3379 * @dev:	device struct
3380 * @buf:	buffer
3381 * @count:	buffer size
3382 *
3383 * This function will change the adapter's state.
3384 *
3385 * Return value:
3386 * 	count on success / other on failure
3387 **/
3388static ssize_t ipr_store_adapter_state(struct device *dev,
3389				       struct device_attribute *attr,
3390				       const char *buf, size_t count)
3391{
3392	struct Scsi_Host *shost = class_to_shost(dev);
3393	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3394	unsigned long lock_flags;
3395	int result = count;
3396
3397	if (!capable(CAP_SYS_ADMIN))
3398		return -EACCES;
3399
3400	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3401	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3402		ioa_cfg->ioa_is_dead = 0;
3403		ioa_cfg->reset_retries = 0;
3404		ioa_cfg->in_ioa_bringdown = 0;
3405		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3406	}
3407	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3408	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3409
3410	return result;
3411}
3412
3413static struct device_attribute ipr_ioa_state_attr = {
3414	.attr = {
3415		.name =		"online_state",
3416		.mode =		S_IRUGO | S_IWUSR,
3417	},
3418	.show = ipr_show_adapter_state,
3419	.store = ipr_store_adapter_state
3420};
3421
3422/**
3423 * ipr_store_reset_adapter - Reset the adapter
3424 * @dev:	device struct
3425 * @buf:	buffer
3426 * @count:	buffer size
3427 *
3428 * This function will reset the adapter.
3429 *
3430 * Return value:
3431 * 	count on success / other on failure
3432 **/
3433static ssize_t ipr_store_reset_adapter(struct device *dev,
3434				       struct device_attribute *attr,
3435				       const char *buf, size_t count)
3436{
3437	struct Scsi_Host *shost = class_to_shost(dev);
3438	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3439	unsigned long lock_flags;
3440	int result = count;
3441
3442	if (!capable(CAP_SYS_ADMIN))
3443		return -EACCES;
3444
3445	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3446	if (!ioa_cfg->in_reset_reload)
3447		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3448	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3449	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3450
3451	return result;
3452}
3453
3454static struct device_attribute ipr_ioa_reset_attr = {
3455	.attr = {
3456		.name =		"reset_host",
3457		.mode =		S_IWUSR,
3458	},
3459	.store = ipr_store_reset_adapter
3460};
3461
3462/**
3463 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3464 * @buf_len:		buffer length
3465 *
3466 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3467 * list to use for microcode download
3468 *
3469 * Return value:
3470 * 	pointer to sglist / NULL on failure
3471 **/
3472static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3473{
3474	int sg_size, order, bsize_elem, num_elem, i, j;
3475	struct ipr_sglist *sglist;
3476	struct scatterlist *scatterlist;
3477	struct page *page;
3478
3479	/* Get the minimum size per scatter/gather element */
3480	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3481
3482	/* Get the actual size per element */
3483	order = get_order(sg_size);
3484
3485	/* Determine the actual number of bytes per element */
3486	bsize_elem = PAGE_SIZE * (1 << order);
3487
3488	/* Determine the actual number of sg entries needed */
3489	if (buf_len % bsize_elem)
3490		num_elem = (buf_len / bsize_elem) + 1;
3491	else
3492		num_elem = buf_len / bsize_elem;
3493
3494	/* Allocate a scatter/gather list for the DMA */
3495	sglist = kzalloc(sizeof(struct ipr_sglist) +
3496			 (sizeof(struct scatterlist) * (num_elem - 1)),
3497			 GFP_KERNEL);
3498
3499	if (sglist == NULL) {
3500		ipr_trace;
3501		return NULL;
3502	}
3503
3504	scatterlist = sglist->scatterlist;
3505	sg_init_table(scatterlist, num_elem);
3506
3507	sglist->order = order;
3508	sglist->num_sg = num_elem;
3509
3510	/* Allocate a bunch of sg elements */
3511	for (i = 0; i < num_elem; i++) {
3512		page = alloc_pages(GFP_KERNEL, order);
3513		if (!page) {
3514			ipr_trace;
3515
3516			/* Free up what we already allocated */
3517			for (j = i - 1; j >= 0; j--)
3518				__free_pages(sg_page(&scatterlist[j]), order);
3519			kfree(sglist);
3520			return NULL;
3521		}
3522
3523		sg_set_page(&scatterlist[i], page, 0, 0);
3524	}
3525
3526	return sglist;
3527}
3528
3529/**
3530 * ipr_free_ucode_buffer - Frees a microcode download buffer
3531 * @p_dnld:		scatter/gather list pointer
3532 *
3533 * Free a DMA'able ucode download buffer previously allocated with
3534 * ipr_alloc_ucode_buffer
3535 *
3536 * Return value:
3537 * 	nothing
3538 **/
3539static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3540{
3541	int i;
3542
3543	for (i = 0; i < sglist->num_sg; i++)
3544		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3545
3546	kfree(sglist);
3547}
3548
3549/**
3550 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3551 * @sglist:		scatter/gather list pointer
3552 * @buffer:		buffer pointer
3553 * @len:		buffer length
3554 *
3555 * Copy a microcode image from a user buffer into a buffer allocated by
3556 * ipr_alloc_ucode_buffer
3557 *
3558 * Return value:
3559 * 	0 on success / other on failure
3560 **/
3561static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3562				 u8 *buffer, u32 len)
3563{
3564	int bsize_elem, i, result = 0;
3565	struct scatterlist *scatterlist;
3566	void *kaddr;
3567
3568	/* Determine the actual number of bytes per element */
3569	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3570
3571	scatterlist = sglist->scatterlist;
3572
3573	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3574		struct page *page = sg_page(&scatterlist[i]);
3575
3576		kaddr = kmap(page);
3577		memcpy(kaddr, buffer, bsize_elem);
3578		kunmap(page);
3579
3580		scatterlist[i].length = bsize_elem;
3581
3582		if (result != 0) {
3583			ipr_trace;
3584			return result;
3585		}
3586	}
3587
3588	if (len % bsize_elem) {
3589		struct page *page = sg_page(&scatterlist[i]);
3590
3591		kaddr = kmap(page);
3592		memcpy(kaddr, buffer, len % bsize_elem);
3593		kunmap(page);
3594
3595		scatterlist[i].length = len % bsize_elem;
3596	}
3597
3598	sglist->buffer_len = len;
3599	return result;
3600}
3601
3602/**
3603 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3604 * @ipr_cmd:		ipr command struct
3605 * @sglist:		scatter/gather list
3606 *
3607 * Builds a microcode download IOA data list (IOADL).
3608 *
3609 **/
3610static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3611				    struct ipr_sglist *sglist)
3612{
3613	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3614	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3615	struct scatterlist *scatterlist = sglist->scatterlist;
3616	int i;
3617
3618	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3619	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3620	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3621
3622	ioarcb->ioadl_len =
3623		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3624	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3625		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3626		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3627		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3628	}
3629
3630	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3631}
3632
3633/**
3634 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3635 * @ipr_cmd:	ipr command struct
3636 * @sglist:		scatter/gather list
3637 *
3638 * Builds a microcode download IOA data list (IOADL).
3639 *
3640 **/
3641static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3642				  struct ipr_sglist *sglist)
3643{
3644	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3645	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3646	struct scatterlist *scatterlist = sglist->scatterlist;
3647	int i;
3648
3649	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3650	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3651	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3652
3653	ioarcb->ioadl_len =
3654		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3655
3656	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3657		ioadl[i].flags_and_data_len =
3658			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3659		ioadl[i].address =
3660			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3661	}
3662
3663	ioadl[i-1].flags_and_data_len |=
3664		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3665}
3666
3667/**
3668 * ipr_update_ioa_ucode - Update IOA's microcode
3669 * @ioa_cfg:	ioa config struct
3670 * @sglist:		scatter/gather list
3671 *
3672 * Initiate an adapter reset to update the IOA's microcode
3673 *
3674 * Return value:
3675 * 	0 on success / -EIO on failure
3676 **/
3677static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3678				struct ipr_sglist *sglist)
3679{
3680	unsigned long lock_flags;
3681
3682	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3683	while(ioa_cfg->in_reset_reload) {
3684		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3685		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3686		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3687	}
3688
3689	if (ioa_cfg->ucode_sglist) {
3690		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3691		dev_err(&ioa_cfg->pdev->dev,
3692			"Microcode download already in progress\n");
3693		return -EIO;
3694	}
3695
3696	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3697					sglist->num_sg, DMA_TO_DEVICE);
3698
3699	if (!sglist->num_dma_sg) {
3700		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3701		dev_err(&ioa_cfg->pdev->dev,
3702			"Failed to map microcode download buffer!\n");
3703		return -EIO;
3704	}
3705
3706	ioa_cfg->ucode_sglist = sglist;
3707	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3708	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3709	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3710
3711	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3712	ioa_cfg->ucode_sglist = NULL;
3713	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3714	return 0;
3715}
3716
3717/**
3718 * ipr_store_update_fw - Update the firmware on the adapter
3719 * @class_dev:	device struct
3720 * @buf:	buffer
3721 * @count:	buffer size
3722 *
3723 * This function will update the firmware on the adapter.
3724 *
3725 * Return value:
3726 * 	count on success / other on failure
3727 **/
3728static ssize_t ipr_store_update_fw(struct device *dev,
3729				   struct device_attribute *attr,
3730				   const char *buf, size_t count)
3731{
3732	struct Scsi_Host *shost = class_to_shost(dev);
3733	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3734	struct ipr_ucode_image_header *image_hdr;
3735	const struct firmware *fw_entry;
3736	struct ipr_sglist *sglist;
3737	char fname[100];
3738	char *src;
3739	int len, result, dnld_size;
3740
3741	if (!capable(CAP_SYS_ADMIN))
3742		return -EACCES;
3743
3744	len = snprintf(fname, 99, "%s", buf);
3745	fname[len-1] = '\0';
3746
3747	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3748		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3749		return -EIO;
3750	}
3751
3752	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3753
3754	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3755	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3756	sglist = ipr_alloc_ucode_buffer(dnld_size);
3757
3758	if (!sglist) {
3759		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3760		release_firmware(fw_entry);
3761		return -ENOMEM;
3762	}
3763
3764	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3765
3766	if (result) {
3767		dev_err(&ioa_cfg->pdev->dev,
3768			"Microcode buffer copy to DMA buffer failed\n");
3769		goto out;
3770	}
3771
3772	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
3773
3774	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3775
3776	if (!result)
3777		result = count;
3778out:
3779	ipr_free_ucode_buffer(sglist);
3780	release_firmware(fw_entry);
3781	return result;
3782}
3783
3784static struct device_attribute ipr_update_fw_attr = {
3785	.attr = {
3786		.name =		"update_fw",
3787		.mode =		S_IWUSR,
3788	},
3789	.store = ipr_store_update_fw
3790};
3791
3792/**
3793 * ipr_show_fw_type - Show the adapter's firmware type.
3794 * @dev:	class device struct
3795 * @buf:	buffer
3796 *
3797 * Return value:
3798 *	number of bytes printed to buffer
3799 **/
3800static ssize_t ipr_show_fw_type(struct device *dev,
3801				struct device_attribute *attr, char *buf)
3802{
3803	struct Scsi_Host *shost = class_to_shost(dev);
3804	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3805	unsigned long lock_flags = 0;
3806	int len;
3807
3808	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3809	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3810	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3811	return len;
3812}
3813
3814static struct device_attribute ipr_ioa_fw_type_attr = {
3815	.attr = {
3816		.name =		"fw_type",
3817		.mode =		S_IRUGO,
3818	},
3819	.show = ipr_show_fw_type
3820};
3821
3822static struct device_attribute *ipr_ioa_attrs[] = {
3823	&ipr_fw_version_attr,
3824	&ipr_log_level_attr,
3825	&ipr_diagnostics_attr,
3826	&ipr_ioa_state_attr,
3827	&ipr_ioa_reset_attr,
3828	&ipr_update_fw_attr,
3829	&ipr_ioa_fw_type_attr,
3830	NULL,
3831};
3832
3833#ifdef CONFIG_SCSI_IPR_DUMP
3834/**
3835 * ipr_read_dump - Dump the adapter
3836 * @filp:		open sysfs file
3837 * @kobj:		kobject struct
3838 * @bin_attr:		bin_attribute struct
3839 * @buf:		buffer
3840 * @off:		offset
3841 * @count:		buffer size
3842 *
3843 * Return value:
3844 *	number of bytes printed to buffer
3845 **/
3846static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3847			     struct bin_attribute *bin_attr,
3848			     char *buf, loff_t off, size_t count)
3849{
3850	struct device *cdev = container_of(kobj, struct device, kobj);
3851	struct Scsi_Host *shost = class_to_shost(cdev);
3852	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3853	struct ipr_dump *dump;
3854	unsigned long lock_flags = 0;
3855	char *src;
3856	int len, sdt_end;
3857	size_t rc = count;
3858
3859	if (!capable(CAP_SYS_ADMIN))
3860		return -EACCES;
3861
3862	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3863	dump = ioa_cfg->dump;
3864
3865	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3866		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3867		return 0;
3868	}
3869	kref_get(&dump->kref);
3870	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3871
3872	if (off > dump->driver_dump.hdr.len) {
3873		kref_put(&dump->kref, ipr_release_dump);
3874		return 0;
3875	}
3876
3877	if (off + count > dump->driver_dump.hdr.len) {
3878		count = dump->driver_dump.hdr.len - off;
3879		rc = count;
3880	}
3881
3882	if (count && off < sizeof(dump->driver_dump)) {
3883		if (off + count > sizeof(dump->driver_dump))
3884			len = sizeof(dump->driver_dump) - off;
3885		else
3886			len = count;
3887		src = (u8 *)&dump->driver_dump + off;
3888		memcpy(buf, src, len);
3889		buf += len;
3890		off += len;
3891		count -= len;
3892	}
3893
3894	off -= sizeof(dump->driver_dump);
3895
3896	if (ioa_cfg->sis64)
3897		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3898			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3899			   sizeof(struct ipr_sdt_entry));
3900	else
3901		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3902			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3903
3904	if (count && off < sdt_end) {
3905		if (off + count > sdt_end)
3906			len = sdt_end - off;
3907		else
3908			len = count;
3909		src = (u8 *)&dump->ioa_dump + off;
3910		memcpy(buf, src, len);
3911		buf += len;
3912		off += len;
3913		count -= len;
3914	}
3915
3916	off -= sdt_end;
3917
3918	while (count) {
3919		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3920			len = PAGE_ALIGN(off) - off;
3921		else
3922			len = count;
3923		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3924		src += off & ~PAGE_MASK;
3925		memcpy(buf, src, len);
3926		buf += len;
3927		off += len;
3928		count -= len;
3929	}
3930
3931	kref_put(&dump->kref, ipr_release_dump);
3932	return rc;
3933}
3934
3935/**
3936 * ipr_alloc_dump - Prepare for adapter dump
3937 * @ioa_cfg:	ioa config struct
3938 *
3939 * Return value:
3940 *	0 on success / other on failure
3941 **/
3942static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3943{
3944	struct ipr_dump *dump;
3945	__be32 **ioa_data;
3946	unsigned long lock_flags = 0;
3947
3948	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3949
3950	if (!dump) {
3951		ipr_err("Dump memory allocation failed\n");
3952		return -ENOMEM;
3953	}
3954
3955	if (ioa_cfg->sis64)
3956		ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3957	else
3958		ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3959
3960	if (!ioa_data) {
3961		ipr_err("Dump memory allocation failed\n");
3962		kfree(dump);
3963		return -ENOMEM;
3964	}
3965
3966	dump->ioa_dump.ioa_data = ioa_data;
3967
3968	kref_init(&dump->kref);
3969	dump->ioa_cfg = ioa_cfg;
3970
3971	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3972
3973	if (INACTIVE != ioa_cfg->sdt_state) {
3974		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3975		vfree(dump->ioa_dump.ioa_data);
3976		kfree(dump);
3977		return 0;
3978	}
3979
3980	ioa_cfg->dump = dump;
3981	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3982	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3983		ioa_cfg->dump_taken = 1;
3984		schedule_work(&ioa_cfg->work_q);
3985	}
3986	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3987
3988	return 0;
3989}
3990
3991/**
3992 * ipr_free_dump - Free adapter dump memory
3993 * @ioa_cfg:	ioa config struct
3994 *
3995 * Return value:
3996 *	0 on success / other on failure
3997 **/
3998static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3999{
4000	struct ipr_dump *dump;
4001	unsigned long lock_flags = 0;
4002
4003	ENTER;
4004
4005	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4006	dump = ioa_cfg->dump;
4007	if (!dump) {
4008		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4009		return 0;
4010	}
4011
4012	ioa_cfg->dump = NULL;
4013	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4014
4015	kref_put(&dump->kref, ipr_release_dump);
4016
4017	LEAVE;
4018	return 0;
4019}
4020
4021/**
4022 * ipr_write_dump - Setup dump state of adapter
4023 * @filp:		open sysfs file
4024 * @kobj:		kobject struct
4025 * @bin_attr:		bin_attribute struct
4026 * @buf:		buffer
4027 * @off:		offset
4028 * @count:		buffer size
4029 *
4030 * Return value:
4031 *	number of bytes printed to buffer
4032 **/
4033static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4034			      struct bin_attribute *bin_attr,
4035			      char *buf, loff_t off, size_t count)
4036{
4037	struct device *cdev = container_of(kobj, struct device, kobj);
4038	struct Scsi_Host *shost = class_to_shost(cdev);
4039	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4040	int rc;
4041
4042	if (!capable(CAP_SYS_ADMIN))
4043		return -EACCES;
4044
4045	if (buf[0] == '1')
4046		rc = ipr_alloc_dump(ioa_cfg);
4047	else if (buf[0] == '0')
4048		rc = ipr_free_dump(ioa_cfg);
4049	else
4050		return -EINVAL;
4051
4052	if (rc)
4053		return rc;
4054	else
4055		return count;
4056}
4057
4058static struct bin_attribute ipr_dump_attr = {
4059	.attr =	{
4060		.name = "dump",
4061		.mode = S_IRUSR | S_IWUSR,
4062	},
4063	.size = 0,
4064	.read = ipr_read_dump,
4065	.write = ipr_write_dump
4066};
4067#else
4068static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4069#endif
4070
4071/**
4072 * ipr_change_queue_depth - Change the device's queue depth
4073 * @sdev:	scsi device struct
4074 * @qdepth:	depth to set
4075 * @reason:	calling context
4076 *
4077 * Return value:
4078 * 	actual depth set
4079 **/
4080static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4081				  int reason)
4082{
4083	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4084	struct ipr_resource_entry *res;
4085	unsigned long lock_flags = 0;
4086
4087	if (reason != SCSI_QDEPTH_DEFAULT)
4088		return -EOPNOTSUPP;
4089
4090	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4091	res = (struct ipr_resource_entry *)sdev->hostdata;
4092
4093	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4094		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4095	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4096
4097	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4098	return sdev->queue_depth;
4099}
4100
4101/**
4102 * ipr_change_queue_type - Change the device's queue type
4103 * @dsev:		scsi device struct
4104 * @tag_type:	type of tags to use
4105 *
4106 * Return value:
4107 * 	actual queue type set
4108 **/
4109static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4110{
4111	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4112	struct ipr_resource_entry *res;
4113	unsigned long lock_flags = 0;
4114
4115	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4116	res = (struct ipr_resource_entry *)sdev->hostdata;
4117
4118	if (res) {
4119		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4120			/*
4121			 * We don't bother quiescing the device here since the
4122			 * adapter firmware does it for us.
4123			 */
4124			scsi_set_tag_type(sdev, tag_type);
4125
4126			if (tag_type)
4127				scsi_activate_tcq(sdev, sdev->queue_depth);
4128			else
4129				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4130		} else
4131			tag_type = 0;
4132	} else
4133		tag_type = 0;
4134
4135	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4136	return tag_type;
4137}
4138
4139/**
4140 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4141 * @dev:	device struct
4142 * @attr:	device attribute structure
4143 * @buf:	buffer
4144 *
4145 * Return value:
4146 * 	number of bytes printed to buffer
4147 **/
4148static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4149{
4150	struct scsi_device *sdev = to_scsi_device(dev);
4151	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4152	struct ipr_resource_entry *res;
4153	unsigned long lock_flags = 0;
4154	ssize_t len = -ENXIO;
4155
4156	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4157	res = (struct ipr_resource_entry *)sdev->hostdata;
4158	if (res)
4159		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4160	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4161	return len;
4162}
4163
4164static struct device_attribute ipr_adapter_handle_attr = {
4165	.attr = {
4166		.name = 	"adapter_handle",
4167		.mode =		S_IRUSR,
4168	},
4169	.show = ipr_show_adapter_handle
4170};
4171
4172/**
4173 * ipr_show_resource_path - Show the resource path or the resource address for
4174 *			    this device.
4175 * @dev:	device struct
4176 * @attr:	device attribute structure
4177 * @buf:	buffer
4178 *
4179 * Return value:
4180 * 	number of bytes printed to buffer
4181 **/
4182static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4183{
4184	struct scsi_device *sdev = to_scsi_device(dev);
4185	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4186	struct ipr_resource_entry *res;
4187	unsigned long lock_flags = 0;
4188	ssize_t len = -ENXIO;
4189	char buffer[IPR_MAX_RES_PATH_LENGTH];
4190
4191	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4192	res = (struct ipr_resource_entry *)sdev->hostdata;
4193	if (res && ioa_cfg->sis64)
4194		len = snprintf(buf, PAGE_SIZE, "%s\n",
4195			       ipr_format_res_path(res->res_path, buffer,
4196						   sizeof(buffer)));
4197	else if (res)
4198		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4199			       res->bus, res->target, res->lun);
4200
4201	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202	return len;
4203}
4204
4205static struct device_attribute ipr_resource_path_attr = {
4206	.attr = {
4207		.name = 	"resource_path",
4208		.mode =		S_IRUGO,
4209	},
4210	.show = ipr_show_resource_path
4211};
4212
4213/**
4214 * ipr_show_device_id - Show the device_id for this device.
4215 * @dev:	device struct
4216 * @attr:	device attribute structure
4217 * @buf:	buffer
4218 *
4219 * Return value:
4220 *	number of bytes printed to buffer
4221 **/
4222static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4223{
4224	struct scsi_device *sdev = to_scsi_device(dev);
4225	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4226	struct ipr_resource_entry *res;
4227	unsigned long lock_flags = 0;
4228	ssize_t len = -ENXIO;
4229
4230	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4231	res = (struct ipr_resource_entry *)sdev->hostdata;
4232	if (res && ioa_cfg->sis64)
4233		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4234	else if (res)
4235		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4236
4237	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4238	return len;
4239}
4240
4241static struct device_attribute ipr_device_id_attr = {
4242	.attr = {
4243		.name =		"device_id",
4244		.mode =		S_IRUGO,
4245	},
4246	.show = ipr_show_device_id
4247};
4248
4249/**
4250 * ipr_show_resource_type - Show the resource type for this device.
4251 * @dev:	device struct
4252 * @attr:	device attribute structure
4253 * @buf:	buffer
4254 *
4255 * Return value:
4256 *	number of bytes printed to buffer
4257 **/
4258static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4259{
4260	struct scsi_device *sdev = to_scsi_device(dev);
4261	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4262	struct ipr_resource_entry *res;
4263	unsigned long lock_flags = 0;
4264	ssize_t len = -ENXIO;
4265
4266	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4267	res = (struct ipr_resource_entry *)sdev->hostdata;
4268
4269	if (res)
4270		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4271
4272	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4273	return len;
4274}
4275
4276static struct device_attribute ipr_resource_type_attr = {
4277	.attr = {
4278		.name =		"resource_type",
4279		.mode =		S_IRUGO,
4280	},
4281	.show = ipr_show_resource_type
4282};
4283
4284static struct device_attribute *ipr_dev_attrs[] = {
4285	&ipr_adapter_handle_attr,
4286	&ipr_resource_path_attr,
4287	&ipr_device_id_attr,
4288	&ipr_resource_type_attr,
4289	NULL,
4290};
4291
4292/**
4293 * ipr_biosparam - Return the HSC mapping
4294 * @sdev:			scsi device struct
4295 * @block_device:	block device pointer
4296 * @capacity:		capacity of the device
4297 * @parm:			Array containing returned HSC values.
4298 *
4299 * This function generates the HSC parms that fdisk uses.
4300 * We want to make sure we return something that places partitions
4301 * on 4k boundaries for best performance with the IOA.
4302 *
4303 * Return value:
4304 * 	0 on success
4305 **/
4306static int ipr_biosparam(struct scsi_device *sdev,
4307			 struct block_device *block_device,
4308			 sector_t capacity, int *parm)
4309{
4310	int heads, sectors;
4311	sector_t cylinders;
4312
4313	heads = 128;
4314	sectors = 32;
4315
4316	cylinders = capacity;
4317	sector_div(cylinders, (128 * 32));
4318
4319	/* return result */
4320	parm[0] = heads;
4321	parm[1] = sectors;
4322	parm[2] = cylinders;
4323
4324	return 0;
4325}
4326
4327/**
4328 * ipr_find_starget - Find target based on bus/target.
4329 * @starget:	scsi target struct
4330 *
4331 * Return value:
4332 * 	resource entry pointer if found / NULL if not found
4333 **/
4334static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4335{
4336	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4337	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4338	struct ipr_resource_entry *res;
4339
4340	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4341		if ((res->bus == starget->channel) &&
4342		    (res->target == starget->id) &&
4343		    (res->lun == 0)) {
4344			return res;
4345		}
4346	}
4347
4348	return NULL;
4349}
4350
4351static struct ata_port_info sata_port_info;
4352
4353/**
4354 * ipr_target_alloc - Prepare for commands to a SCSI target
4355 * @starget:	scsi target struct
4356 *
4357 * If the device is a SATA device, this function allocates an
4358 * ATA port with libata, else it does nothing.
4359 *
4360 * Return value:
4361 * 	0 on success / non-0 on failure
4362 **/
4363static int ipr_target_alloc(struct scsi_target *starget)
4364{
4365	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4366	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4367	struct ipr_sata_port *sata_port;
4368	struct ata_port *ap;
4369	struct ipr_resource_entry *res;
4370	unsigned long lock_flags;
4371
4372	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4373	res = ipr_find_starget(starget);
4374	starget->hostdata = NULL;
4375
4376	if (res && ipr_is_gata(res)) {
4377		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4378		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4379		if (!sata_port)
4380			return -ENOMEM;
4381
4382		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4383		if (ap) {
4384			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4385			sata_port->ioa_cfg = ioa_cfg;
4386			sata_port->ap = ap;
4387			sata_port->res = res;
4388
4389			res->sata_port = sata_port;
4390			ap->private_data = sata_port;
4391			starget->hostdata = sata_port;
4392		} else {
4393			kfree(sata_port);
4394			return -ENOMEM;
4395		}
4396	}
4397	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4398
4399	return 0;
4400}
4401
4402/**
4403 * ipr_target_destroy - Destroy a SCSI target
4404 * @starget:	scsi target struct
4405 *
4406 * If the device was a SATA device, this function frees the libata
4407 * ATA port, else it does nothing.
4408 *
4409 **/
4410static void ipr_target_destroy(struct scsi_target *starget)
4411{
4412	struct ipr_sata_port *sata_port = starget->hostdata;
4413	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4414	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4415
4416	if (ioa_cfg->sis64) {
4417		if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4418			clear_bit(starget->id, ioa_cfg->array_ids);
4419		else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4420			clear_bit(starget->id, ioa_cfg->vset_ids);
4421		else if (starget->channel == 0)
4422			clear_bit(starget->id, ioa_cfg->target_ids);
4423	}
4424
4425	if (sata_port) {
4426		starget->hostdata = NULL;
4427		ata_sas_port_destroy(sata_port->ap);
4428		kfree(sata_port);
4429	}
4430}
4431
4432/**
4433 * ipr_find_sdev - Find device based on bus/target/lun.
4434 * @sdev:	scsi device struct
4435 *
4436 * Return value:
4437 * 	resource entry pointer if found / NULL if not found
4438 **/
4439static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4440{
4441	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4442	struct ipr_resource_entry *res;
4443
4444	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4445		if ((res->bus == sdev->channel) &&
4446		    (res->target == sdev->id) &&
4447		    (res->lun == sdev->lun))
4448			return res;
4449	}
4450
4451	return NULL;
4452}
4453
4454/**
4455 * ipr_slave_destroy - Unconfigure a SCSI device
4456 * @sdev:	scsi device struct
4457 *
4458 * Return value:
4459 * 	nothing
4460 **/
4461static void ipr_slave_destroy(struct scsi_device *sdev)
4462{
4463	struct ipr_resource_entry *res;
4464	struct ipr_ioa_cfg *ioa_cfg;
4465	unsigned long lock_flags = 0;
4466
4467	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4468
4469	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4470	res = (struct ipr_resource_entry *) sdev->hostdata;
4471	if (res) {
4472		if (res->sata_port)
4473			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4474		sdev->hostdata = NULL;
4475		res->sdev = NULL;
4476		res->sata_port = NULL;
4477	}
4478	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4479}
4480
4481/**
4482 * ipr_slave_configure - Configure a SCSI device
4483 * @sdev:	scsi device struct
4484 *
4485 * This function configures the specified scsi device.
4486 *
4487 * Return value:
4488 * 	0 on success
4489 **/
4490static int ipr_slave_configure(struct scsi_device *sdev)
4491{
4492	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4493	struct ipr_resource_entry *res;
4494	struct ata_port *ap = NULL;
4495	unsigned long lock_flags = 0;
4496	char buffer[IPR_MAX_RES_PATH_LENGTH];
4497
4498	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4499	res = sdev->hostdata;
4500	if (res) {
4501		if (ipr_is_af_dasd_device(res))
4502			sdev->type = TYPE_RAID;
4503		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4504			sdev->scsi_level = 4;
4505			sdev->no_uld_attach = 1;
4506		}
4507		if (ipr_is_vset_device(res)) {
4508			blk_queue_rq_timeout(sdev->request_queue,
4509					     IPR_VSET_RW_TIMEOUT);
4510			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4511		}
4512		if (ipr_is_gata(res) && res->sata_port)
4513			ap = res->sata_port->ap;
4514		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4515
4516		if (ap) {
4517			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4518			ata_sas_slave_configure(sdev, ap);
4519		} else
4520			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4521		if (ioa_cfg->sis64)
4522			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4523				    ipr_format_res_path(res->res_path, buffer,
4524							sizeof(buffer)));
4525		return 0;
4526	}
4527	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4528	return 0;
4529}
4530
4531/**
4532 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4533 * @sdev:	scsi device struct
4534 *
4535 * This function initializes an ATA port so that future commands
4536 * sent through queuecommand will work.
4537 *
4538 * Return value:
4539 * 	0 on success
4540 **/
4541static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4542{
4543	struct ipr_sata_port *sata_port = NULL;
4544	int rc = -ENXIO;
4545
4546	ENTER;
4547	if (sdev->sdev_target)
4548		sata_port = sdev->sdev_target->hostdata;
4549	if (sata_port)
4550		rc = ata_sas_port_init(sata_port->ap);
4551	if (rc)
4552		ipr_slave_destroy(sdev);
4553
4554	LEAVE;
4555	return rc;
4556}
4557
4558/**
4559 * ipr_slave_alloc - Prepare for commands to a device.
4560 * @sdev:	scsi device struct
4561 *
4562 * This function saves a pointer to the resource entry
4563 * in the scsi device struct if the device exists. We
4564 * can then use this pointer in ipr_queuecommand when
4565 * handling new commands.
4566 *
4567 * Return value:
4568 * 	0 on success / -ENXIO if device does not exist
4569 **/
4570static int ipr_slave_alloc(struct scsi_device *sdev)
4571{
4572	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4573	struct ipr_resource_entry *res;
4574	unsigned long lock_flags;
4575	int rc = -ENXIO;
4576
4577	sdev->hostdata = NULL;
4578
4579	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4580
4581	res = ipr_find_sdev(sdev);
4582	if (res) {
4583		res->sdev = sdev;
4584		res->add_to_ml = 0;
4585		res->in_erp = 0;
4586		sdev->hostdata = res;
4587		if (!ipr_is_naca_model(res))
4588			res->needs_sync_complete = 1;
4589		rc = 0;
4590		if (ipr_is_gata(res)) {
4591			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4592			return ipr_ata_slave_alloc(sdev);
4593		}
4594	}
4595
4596	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4597
4598	return rc;
4599}
4600
4601/**
4602 * ipr_eh_host_reset - Reset the host adapter
4603 * @scsi_cmd:	scsi command struct
4604 *
4605 * Return value:
4606 * 	SUCCESS / FAILED
4607 **/
4608static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4609{
4610	struct ipr_ioa_cfg *ioa_cfg;
4611	int rc;
4612
4613	ENTER;
4614	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4615
4616	if (!ioa_cfg->in_reset_reload) {
4617		dev_err(&ioa_cfg->pdev->dev,
4618			"Adapter being reset as a result of error recovery.\n");
4619
4620		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4621			ioa_cfg->sdt_state = GET_DUMP;
4622	}
4623
4624	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4625
4626	LEAVE;
4627	return rc;
4628}
4629
4630static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4631{
4632	int rc;
4633
4634	spin_lock_irq(cmd->device->host->host_lock);
4635	rc = __ipr_eh_host_reset(cmd);
4636	spin_unlock_irq(cmd->device->host->host_lock);
4637
4638	return rc;
4639}
4640
4641/**
4642 * ipr_device_reset - Reset the device
4643 * @ioa_cfg:	ioa config struct
4644 * @res:		resource entry struct
4645 *
4646 * This function issues a device reset to the affected device.
4647 * If the device is a SCSI device, a LUN reset will be sent
4648 * to the device first. If that does not work, a target reset
4649 * will be sent. If the device is a SATA device, a PHY reset will
4650 * be sent.
4651 *
4652 * Return value:
4653 *	0 on success / non-zero on failure
4654 **/
4655static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4656			    struct ipr_resource_entry *res)
4657{
4658	struct ipr_cmnd *ipr_cmd;
4659	struct ipr_ioarcb *ioarcb;
4660	struct ipr_cmd_pkt *cmd_pkt;
4661	struct ipr_ioarcb_ata_regs *regs;
4662	u32 ioasc;
4663
4664	ENTER;
4665	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4666	ioarcb = &ipr_cmd->ioarcb;
4667	cmd_pkt = &ioarcb->cmd_pkt;
4668
4669	if (ipr_cmd->ioa_cfg->sis64) {
4670		regs = &ipr_cmd->i.ata_ioadl.regs;
4671		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4672	} else
4673		regs = &ioarcb->u.add_data.u.regs;
4674
4675	ioarcb->res_handle = res->res_handle;
4676	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4677	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4678	if (ipr_is_gata(res)) {
4679		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4680		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4681		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4682	}
4683
4684	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4685	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4686	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4687	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4688		if (ipr_cmd->ioa_cfg->sis64)
4689			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4690			       sizeof(struct ipr_ioasa_gata));
4691		else
4692			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4693			       sizeof(struct ipr_ioasa_gata));
4694	}
4695
4696	LEAVE;
4697	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4698}
4699
4700/**
4701 * ipr_sata_reset - Reset the SATA port
4702 * @link:	SATA link to reset
4703 * @classes:	class of the attached device
4704 *
4705 * This function issues a SATA phy reset to the affected ATA link.
4706 *
4707 * Return value:
4708 *	0 on success / non-zero on failure
4709 **/
4710static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4711				unsigned long deadline)
4712{
4713	struct ipr_sata_port *sata_port = link->ap->private_data;
4714	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4715	struct ipr_resource_entry *res;
4716	unsigned long lock_flags = 0;
4717	int rc = -ENXIO;
4718
4719	ENTER;
4720	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4721	while(ioa_cfg->in_reset_reload) {
4722		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4723		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4724		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4725	}
4726
4727	res = sata_port->res;
4728	if (res) {
4729		rc = ipr_device_reset(ioa_cfg, res);
4730		*classes = res->ata_class;
4731	}
4732
4733	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4734	LEAVE;
4735	return rc;
4736}
4737
4738/**
4739 * ipr_eh_dev_reset - Reset the device
4740 * @scsi_cmd:	scsi command struct
4741 *
4742 * This function issues a device reset to the affected device.
4743 * A LUN reset will be sent to the device first. If that does
4744 * not work, a target reset will be sent.
4745 *
4746 * Return value:
4747 *	SUCCESS / FAILED
4748 **/
4749static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4750{
4751	struct ipr_cmnd *ipr_cmd;
4752	struct ipr_ioa_cfg *ioa_cfg;
4753	struct ipr_resource_entry *res;
4754	struct ata_port *ap;
4755	int rc = 0;
4756
4757	ENTER;
4758	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4759	res = scsi_cmd->device->hostdata;
4760
4761	if (!res)
4762		return FAILED;
4763
4764	/*
4765	 * If we are currently going through reset/reload, return failed. This will force the
4766	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4767	 * reset to complete
4768	 */
4769	if (ioa_cfg->in_reset_reload)
4770		return FAILED;
4771	if (ioa_cfg->ioa_is_dead)
4772		return FAILED;
4773
4774	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4775		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4776			if (ipr_cmd->scsi_cmd)
4777				ipr_cmd->done = ipr_scsi_eh_done;
4778			if (ipr_cmd->qc)
4779				ipr_cmd->done = ipr_sata_eh_done;
4780			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4781				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4782				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4783			}
4784		}
4785	}
4786
4787	res->resetting_device = 1;
4788	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4789
4790	if (ipr_is_gata(res) && res->sata_port) {
4791		ap = res->sata_port->ap;
4792		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4793		ata_std_error_handler(ap);
4794		spin_lock_irq(scsi_cmd->device->host->host_lock);
4795
4796		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4797			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4798				rc = -EIO;
4799				break;
4800			}
4801		}
4802	} else
4803		rc = ipr_device_reset(ioa_cfg, res);
4804	res->resetting_device = 0;
4805
4806	LEAVE;
4807	return (rc ? FAILED : SUCCESS);
4808}
4809
4810static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4811{
4812	int rc;
4813
4814	spin_lock_irq(cmd->device->host->host_lock);
4815	rc = __ipr_eh_dev_reset(cmd);
4816	spin_unlock_irq(cmd->device->host->host_lock);
4817
4818	return rc;
4819}
4820
4821/**
4822 * ipr_bus_reset_done - Op done function for bus reset.
4823 * @ipr_cmd:	ipr command struct
4824 *
4825 * This function is the op done function for a bus reset
4826 *
4827 * Return value:
4828 * 	none
4829 **/
4830static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4831{
4832	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4833	struct ipr_resource_entry *res;
4834
4835	ENTER;
4836	if (!ioa_cfg->sis64)
4837		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4838			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4839				scsi_report_bus_reset(ioa_cfg->host, res->bus);
4840				break;
4841			}
4842		}
4843
4844	/*
4845	 * If abort has not completed, indicate the reset has, else call the
4846	 * abort's done function to wake the sleeping eh thread
4847	 */
4848	if (ipr_cmd->sibling->sibling)
4849		ipr_cmd->sibling->sibling = NULL;
4850	else
4851		ipr_cmd->sibling->done(ipr_cmd->sibling);
4852
4853	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4854	LEAVE;
4855}
4856
4857/**
4858 * ipr_abort_timeout - An abort task has timed out
4859 * @ipr_cmd:	ipr command struct
4860 *
4861 * This function handles when an abort task times out. If this
4862 * happens we issue a bus reset since we have resources tied
4863 * up that must be freed before returning to the midlayer.
4864 *
4865 * Return value:
4866 *	none
4867 **/
4868static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4869{
4870	struct ipr_cmnd *reset_cmd;
4871	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4872	struct ipr_cmd_pkt *cmd_pkt;
4873	unsigned long lock_flags = 0;
4874
4875	ENTER;
4876	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4877	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4878		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4879		return;
4880	}
4881
4882	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4883	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4884	ipr_cmd->sibling = reset_cmd;
4885	reset_cmd->sibling = ipr_cmd;
4886	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4887	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4888	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4889	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4890	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4891
4892	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4893	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4894	LEAVE;
4895}
4896
4897/**
4898 * ipr_cancel_op - Cancel specified op
4899 * @scsi_cmd:	scsi command struct
4900 *
4901 * This function cancels specified op.
4902 *
4903 * Return value:
4904 *	SUCCESS / FAILED
4905 **/
4906static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4907{
4908	struct ipr_cmnd *ipr_cmd;
4909	struct ipr_ioa_cfg *ioa_cfg;
4910	struct ipr_resource_entry *res;
4911	struct ipr_cmd_pkt *cmd_pkt;
4912	u32 ioasc, int_reg;
4913	int op_found = 0;
4914
4915	ENTER;
4916	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4917	res = scsi_cmd->device->hostdata;
4918
4919	/* If we are currently going through reset/reload, return failed.
4920	 * This will force the mid-layer to call ipr_eh_host_reset,
4921	 * which will then go to sleep and wait for the reset to complete
4922	 */
4923	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4924		return FAILED;
4925	if (!res)
4926		return FAILED;
4927
4928	/*
4929	 * If we are aborting a timed out op, chances are that the timeout was caused
4930	 * by a still not detected EEH error. In such cases, reading a register will
4931	 * trigger the EEH recovery infrastructure.
4932	 */
4933	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4934
4935	if (!ipr_is_gscsi(res))
4936		return FAILED;
4937
4938	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4939		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4940			ipr_cmd->done = ipr_scsi_eh_done;
4941			op_found = 1;
4942			break;
4943		}
4944	}
4945
4946	if (!op_found)
4947		return SUCCESS;
4948
4949	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4950	ipr_cmd->ioarcb.res_handle = res->res_handle;
4951	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4952	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4953	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4954	ipr_cmd->u.sdev = scsi_cmd->device;
4955
4956	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4957		    scsi_cmd->cmnd[0]);
4958	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4959	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4960
4961	/*
4962	 * If the abort task timed out and we sent a bus reset, we will get
4963	 * one the following responses to the abort
4964	 */
4965	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4966		ioasc = 0;
4967		ipr_trace;
4968	}
4969
4970	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4971	if (!ipr_is_naca_model(res))
4972		res->needs_sync_complete = 1;
4973
4974	LEAVE;
4975	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4976}
4977
4978/**
4979 * ipr_eh_abort - Abort a single op
4980 * @scsi_cmd:	scsi command struct
4981 *
4982 * Return value:
4983 * 	SUCCESS / FAILED
4984 **/
4985static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4986{
4987	unsigned long flags;
4988	int rc;
4989
4990	ENTER;
4991
4992	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4993	rc = ipr_cancel_op(scsi_cmd);
4994	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4995
4996	LEAVE;
4997	return rc;
4998}
4999
5000/**
5001 * ipr_handle_other_interrupt - Handle "other" interrupts
5002 * @ioa_cfg:	ioa config struct
5003 * @int_reg:	interrupt register
5004 *
5005 * Return value:
5006 * 	IRQ_NONE / IRQ_HANDLED
5007 **/
5008static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5009					      u32 int_reg)
5010{
5011	irqreturn_t rc = IRQ_HANDLED;
5012	u32 int_mask_reg;
5013
5014	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5015	int_reg &= ~int_mask_reg;
5016
5017	/* If an interrupt on the adapter did not occur, ignore it.
5018	 * Or in the case of SIS 64, check for a stage change interrupt.
5019	 */
5020	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5021		if (ioa_cfg->sis64) {
5022			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5023			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5024			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5025
5026				/* clear stage change */
5027				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5028				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5029				list_del(&ioa_cfg->reset_cmd->queue);
5030				del_timer(&ioa_cfg->reset_cmd->timer);
5031				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5032				return IRQ_HANDLED;
5033			}
5034		}
5035
5036		return IRQ_NONE;
5037	}
5038
5039	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5040		/* Mask the interrupt */
5041		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5042
5043		/* Clear the interrupt */
5044		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5045		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5046
5047		list_del(&ioa_cfg->reset_cmd->queue);
5048		del_timer(&ioa_cfg->reset_cmd->timer);
5049		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5050	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5051		if (ipr_debug && printk_ratelimit())
5052			dev_err(&ioa_cfg->pdev->dev,
5053				"Spurious interrupt detected. 0x%08X\n", int_reg);
5054		writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5055		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5056		return IRQ_NONE;
5057	} else {
5058		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5059			ioa_cfg->ioa_unit_checked = 1;
5060		else
5061			dev_err(&ioa_cfg->pdev->dev,
5062				"Permanent IOA failure. 0x%08X\n", int_reg);
5063
5064		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5065			ioa_cfg->sdt_state = GET_DUMP;
5066
5067		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5068		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5069	}
5070
5071	return rc;
5072}
5073
5074/**
5075 * ipr_isr_eh - Interrupt service routine error handler
5076 * @ioa_cfg:	ioa config struct
5077 * @msg:	message to log
5078 *
5079 * Return value:
5080 * 	none
5081 **/
5082static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5083{
5084	ioa_cfg->errors_logged++;
5085	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5086
5087	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5088		ioa_cfg->sdt_state = GET_DUMP;
5089
5090	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5091}
5092
5093/**
5094 * ipr_isr - Interrupt service routine
5095 * @irq:	irq number
5096 * @devp:	pointer to ioa config struct
5097 *
5098 * Return value:
5099 * 	IRQ_NONE / IRQ_HANDLED
5100 **/
5101static irqreturn_t ipr_isr(int irq, void *devp)
5102{
5103	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5104	unsigned long lock_flags = 0;
5105	u32 int_reg = 0;
5106	u32 ioasc;
5107	u16 cmd_index;
5108	int num_hrrq = 0;
5109	int irq_none = 0;
5110	struct ipr_cmnd *ipr_cmd;
5111	irqreturn_t rc = IRQ_NONE;
5112
5113	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5114
5115	/* If interrupts are disabled, ignore the interrupt */
5116	if (!ioa_cfg->allow_interrupts) {
5117		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5118		return IRQ_NONE;
5119	}
5120
5121	while (1) {
5122		ipr_cmd = NULL;
5123
5124		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5125		       ioa_cfg->toggle_bit) {
5126
5127			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5128				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5129
5130			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5131				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5132				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5133				return IRQ_HANDLED;
5134			}
5135
5136			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5137
5138			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5139
5140			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5141
5142			list_del(&ipr_cmd->queue);
5143			del_timer(&ipr_cmd->timer);
5144			ipr_cmd->done(ipr_cmd);
5145
5146			rc = IRQ_HANDLED;
5147
5148			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5149				ioa_cfg->hrrq_curr++;
5150			} else {
5151				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5152				ioa_cfg->toggle_bit ^= 1u;
5153			}
5154		}
5155
5156		if (ipr_cmd != NULL) {
5157			/* Clear the PCI interrupt */
5158			num_hrrq = 0;
5159			do {
5160				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5161				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5162			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5163					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5164
5165		} else if (rc == IRQ_NONE && irq_none == 0) {
5166			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5167			irq_none++;
5168		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5169			   int_reg & IPR_PCII_HRRQ_UPDATED) {
5170			ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5171			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5172			return IRQ_HANDLED;
5173		} else
5174			break;
5175	}
5176
5177	if (unlikely(rc == IRQ_NONE))
5178		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5179
5180	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5181	return rc;
5182}
5183
5184/**
5185 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5186 * @ioa_cfg:	ioa config struct
5187 * @ipr_cmd:	ipr command struct
5188 *
5189 * Return value:
5190 * 	0 on success / -1 on failure
5191 **/
5192static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5193			     struct ipr_cmnd *ipr_cmd)
5194{
5195	int i, nseg;
5196	struct scatterlist *sg;
5197	u32 length;
5198	u32 ioadl_flags = 0;
5199	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5200	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5201	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5202
5203	length = scsi_bufflen(scsi_cmd);
5204	if (!length)
5205		return 0;
5206
5207	nseg = scsi_dma_map(scsi_cmd);
5208	if (nseg < 0) {
5209		if (printk_ratelimit())
5210			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5211		return -1;
5212	}
5213
5214	ipr_cmd->dma_use_sg = nseg;
5215
5216	ioarcb->data_transfer_length = cpu_to_be32(length);
5217	ioarcb->ioadl_len =
5218		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5219
5220	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5221		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5222		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5223	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5224		ioadl_flags = IPR_IOADL_FLAGS_READ;
5225
5226	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5227		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5228		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5229		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5230	}
5231
5232	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5233	return 0;
5234}
5235
5236/**
5237 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5238 * @ioa_cfg:	ioa config struct
5239 * @ipr_cmd:	ipr command struct
5240 *
5241 * Return value:
5242 * 	0 on success / -1 on failure
5243 **/
5244static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5245			   struct ipr_cmnd *ipr_cmd)
5246{
5247	int i, nseg;
5248	struct scatterlist *sg;
5249	u32 length;
5250	u32 ioadl_flags = 0;
5251	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5252	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5253	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5254
5255	length = scsi_bufflen(scsi_cmd);
5256	if (!length)
5257		return 0;
5258
5259	nseg = scsi_dma_map(scsi_cmd);
5260	if (nseg < 0) {
5261		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5262		return -1;
5263	}
5264
5265	ipr_cmd->dma_use_sg = nseg;
5266
5267	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5268		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5269		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5270		ioarcb->data_transfer_length = cpu_to_be32(length);
5271		ioarcb->ioadl_len =
5272			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5273	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5274		ioadl_flags = IPR_IOADL_FLAGS_READ;
5275		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5276		ioarcb->read_ioadl_len =
5277			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5278	}
5279
5280	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5281		ioadl = ioarcb->u.add_data.u.ioadl;
5282		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5283				    offsetof(struct ipr_ioarcb, u.add_data));
5284		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5285	}
5286
5287	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5288		ioadl[i].flags_and_data_len =
5289			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5290		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5291	}
5292
5293	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5294	return 0;
5295}
5296
5297/**
5298 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5299 * @scsi_cmd:	scsi command struct
5300 *
5301 * Return value:
5302 * 	task attributes
5303 **/
5304static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5305{
5306	u8 tag[2];
5307	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5308
5309	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5310		switch (tag[0]) {
5311		case MSG_SIMPLE_TAG:
5312			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5313			break;
5314		case MSG_HEAD_TAG:
5315			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5316			break;
5317		case MSG_ORDERED_TAG:
5318			rc = IPR_FLAGS_LO_ORDERED_TASK;
5319			break;
5320		};
5321	}
5322
5323	return rc;
5324}
5325
5326/**
5327 * ipr_erp_done - Process completion of ERP for a device
5328 * @ipr_cmd:		ipr command struct
5329 *
5330 * This function copies the sense buffer into the scsi_cmd
5331 * struct and pushes the scsi_done function.
5332 *
5333 * Return value:
5334 * 	nothing
5335 **/
5336static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5337{
5338	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5339	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5340	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5341	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5342
5343	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5344		scsi_cmd->result |= (DID_ERROR << 16);
5345		scmd_printk(KERN_ERR, scsi_cmd,
5346			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5347	} else {
5348		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5349		       SCSI_SENSE_BUFFERSIZE);
5350	}
5351
5352	if (res) {
5353		if (!ipr_is_naca_model(res))
5354			res->needs_sync_complete = 1;
5355		res->in_erp = 0;
5356	}
5357	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5358	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5359	scsi_cmd->scsi_done(scsi_cmd);
5360}
5361
5362/**
5363 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5364 * @ipr_cmd:	ipr command struct
5365 *
5366 * Return value:
5367 * 	none
5368 **/
5369static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5370{
5371	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5372	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5373	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5374
5375	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5376	ioarcb->data_transfer_length = 0;
5377	ioarcb->read_data_transfer_length = 0;
5378	ioarcb->ioadl_len = 0;
5379	ioarcb->read_ioadl_len = 0;
5380	ioasa->hdr.ioasc = 0;
5381	ioasa->hdr.residual_data_len = 0;
5382
5383	if (ipr_cmd->ioa_cfg->sis64)
5384		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5385			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5386	else {
5387		ioarcb->write_ioadl_addr =
5388			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5389		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5390	}
5391}
5392
5393/**
5394 * ipr_erp_request_sense - Send request sense to a device
5395 * @ipr_cmd:	ipr command struct
5396 *
5397 * This function sends a request sense to a device as a result
5398 * of a check condition.
5399 *
5400 * Return value:
5401 * 	nothing
5402 **/
5403static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5404{
5405	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5406	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5407
5408	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5409		ipr_erp_done(ipr_cmd);
5410		return;
5411	}
5412
5413	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5414
5415	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5416	cmd_pkt->cdb[0] = REQUEST_SENSE;
5417	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5418	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5419	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5420	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5421
5422	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5423		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5424
5425	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5426		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5427}
5428
5429/**
5430 * ipr_erp_cancel_all - Send cancel all to a device
5431 * @ipr_cmd:	ipr command struct
5432 *
5433 * This function sends a cancel all to a device to clear the
5434 * queue. If we are running TCQ on the device, QERR is set to 1,
5435 * which means all outstanding ops have been dropped on the floor.
5436 * Cancel all will return them to us.
5437 *
5438 * Return value:
5439 * 	nothing
5440 **/
5441static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5442{
5443	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5444	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5445	struct ipr_cmd_pkt *cmd_pkt;
5446
5447	res->in_erp = 1;
5448
5449	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5450
5451	if (!scsi_get_tag_type(scsi_cmd->device)) {
5452		ipr_erp_request_sense(ipr_cmd);
5453		return;
5454	}
5455
5456	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5457	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5458	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5459
5460	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5461		   IPR_CANCEL_ALL_TIMEOUT);
5462}
5463
5464/**
5465 * ipr_dump_ioasa - Dump contents of IOASA
5466 * @ioa_cfg:	ioa config struct
5467 * @ipr_cmd:	ipr command struct
5468 * @res:		resource entry struct
5469 *
5470 * This function is invoked by the interrupt handler when ops
5471 * fail. It will log the IOASA if appropriate. Only called
5472 * for GPDD ops.
5473 *
5474 * Return value:
5475 * 	none
5476 **/
5477static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5478			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5479{
5480	int i;
5481	u16 data_len;
5482	u32 ioasc, fd_ioasc;
5483	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5484	__be32 *ioasa_data = (__be32 *)ioasa;
5485	int error_index;
5486
5487	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5488	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5489
5490	if (0 == ioasc)
5491		return;
5492
5493	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5494		return;
5495
5496	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5497		error_index = ipr_get_error(fd_ioasc);
5498	else
5499		error_index = ipr_get_error(ioasc);
5500
5501	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5502		/* Don't log an error if the IOA already logged one */
5503		if (ioasa->hdr.ilid != 0)
5504			return;
5505
5506		if (!ipr_is_gscsi(res))
5507			return;
5508
5509		if (ipr_error_table[error_index].log_ioasa == 0)
5510			return;
5511	}
5512
5513	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5514
5515	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5516	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5517		data_len = sizeof(struct ipr_ioasa64);
5518	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5519		data_len = sizeof(struct ipr_ioasa);
5520
5521	ipr_err("IOASA Dump:\n");
5522
5523	for (i = 0; i < data_len / 4; i += 4) {
5524		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5525			be32_to_cpu(ioasa_data[i]),
5526			be32_to_cpu(ioasa_data[i+1]),
5527			be32_to_cpu(ioasa_data[i+2]),
5528			be32_to_cpu(ioasa_data[i+3]));
5529	}
5530}
5531
5532/**
5533 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5534 * @ioasa:		IOASA
5535 * @sense_buf:	sense data buffer
5536 *
5537 * Return value:
5538 * 	none
5539 **/
5540static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5541{
5542	u32 failing_lba;
5543	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5544	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5545	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5546	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5547
5548	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5549
5550	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5551		return;
5552
5553	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5554
5555	if (ipr_is_vset_device(res) &&
5556	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5557	    ioasa->u.vset.failing_lba_hi != 0) {
5558		sense_buf[0] = 0x72;
5559		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5560		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5561		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5562
5563		sense_buf[7] = 12;
5564		sense_buf[8] = 0;
5565		sense_buf[9] = 0x0A;
5566		sense_buf[10] = 0x80;
5567
5568		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5569
5570		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5571		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5572		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5573		sense_buf[15] = failing_lba & 0x000000ff;
5574
5575		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5576
5577		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5578		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5579		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5580		sense_buf[19] = failing_lba & 0x000000ff;
5581	} else {
5582		sense_buf[0] = 0x70;
5583		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5584		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5585		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5586
5587		/* Illegal request */
5588		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5589		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5590			sense_buf[7] = 10;	/* additional length */
5591
5592			/* IOARCB was in error */
5593			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5594				sense_buf[15] = 0xC0;
5595			else	/* Parameter data was invalid */
5596				sense_buf[15] = 0x80;
5597
5598			sense_buf[16] =
5599			    ((IPR_FIELD_POINTER_MASK &
5600			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5601			sense_buf[17] =
5602			    (IPR_FIELD_POINTER_MASK &
5603			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5604		} else {
5605			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5606				if (ipr_is_vset_device(res))
5607					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5608				else
5609					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5610
5611				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5612				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5613				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5614				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5615				sense_buf[6] = failing_lba & 0x000000ff;
5616			}
5617
5618			sense_buf[7] = 6;	/* additional length */
5619		}
5620	}
5621}
5622
5623/**
5624 * ipr_get_autosense - Copy autosense data to sense buffer
5625 * @ipr_cmd:	ipr command struct
5626 *
5627 * This function copies the autosense buffer to the buffer
5628 * in the scsi_cmd, if there is autosense available.
5629 *
5630 * Return value:
5631 *	1 if autosense was available / 0 if not
5632 **/
5633static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5634{
5635	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5636	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5637
5638	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5639		return 0;
5640
5641	if (ipr_cmd->ioa_cfg->sis64)
5642		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5643		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5644			   SCSI_SENSE_BUFFERSIZE));
5645	else
5646		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5647		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5648			   SCSI_SENSE_BUFFERSIZE));
5649	return 1;
5650}
5651
5652/**
5653 * ipr_erp_start - Process an error response for a SCSI op
5654 * @ioa_cfg:	ioa config struct
5655 * @ipr_cmd:	ipr command struct
5656 *
5657 * This function determines whether or not to initiate ERP
5658 * on the affected device.
5659 *
5660 * Return value:
5661 * 	nothing
5662 **/
5663static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5664			      struct ipr_cmnd *ipr_cmd)
5665{
5666	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5667	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5668	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5669	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5670
5671	if (!res) {
5672		ipr_scsi_eh_done(ipr_cmd);
5673		return;
5674	}
5675
5676	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5677		ipr_gen_sense(ipr_cmd);
5678
5679	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5680
5681	switch (masked_ioasc) {
5682	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5683		if (ipr_is_naca_model(res))
5684			scsi_cmd->result |= (DID_ABORT << 16);
5685		else
5686			scsi_cmd->result |= (DID_IMM_RETRY << 16);
5687		break;
5688	case IPR_IOASC_IR_RESOURCE_HANDLE:
5689	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5690		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5691		break;
5692	case IPR_IOASC_HW_SEL_TIMEOUT:
5693		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5694		if (!ipr_is_naca_model(res))
5695			res->needs_sync_complete = 1;
5696		break;
5697	case IPR_IOASC_SYNC_REQUIRED:
5698		if (!res->in_erp)
5699			res->needs_sync_complete = 1;
5700		scsi_cmd->result |= (DID_IMM_RETRY << 16);
5701		break;
5702	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5703	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5704		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5705		break;
5706	case IPR_IOASC_BUS_WAS_RESET:
5707	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5708		/*
5709		 * Report the bus reset and ask for a retry. The device
5710		 * will give CC/UA the next command.
5711		 */
5712		if (!res->resetting_device)
5713			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5714		scsi_cmd->result |= (DID_ERROR << 16);
5715		if (!ipr_is_naca_model(res))
5716			res->needs_sync_complete = 1;
5717		break;
5718	case IPR_IOASC_HW_DEV_BUS_STATUS:
5719		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5720		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5721			if (!ipr_get_autosense(ipr_cmd)) {
5722				if (!ipr_is_naca_model(res)) {
5723					ipr_erp_cancel_all(ipr_cmd);
5724					return;
5725				}
5726			}
5727		}
5728		if (!ipr_is_naca_model(res))
5729			res->needs_sync_complete = 1;
5730		break;
5731	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5732		break;
5733	default:
5734		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5735			scsi_cmd->result |= (DID_ERROR << 16);
5736		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5737			res->needs_sync_complete = 1;
5738		break;
5739	}
5740
5741	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5742	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5743	scsi_cmd->scsi_done(scsi_cmd);
5744}
5745
5746/**
5747 * ipr_scsi_done - mid-layer done function
5748 * @ipr_cmd:	ipr command struct
5749 *
5750 * This function is invoked by the interrupt handler for
5751 * ops generated by the SCSI mid-layer
5752 *
5753 * Return value:
5754 * 	none
5755 **/
5756static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5757{
5758	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5759	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5760	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5761
5762	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5763
5764	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5765		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5766		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5767		scsi_cmd->scsi_done(scsi_cmd);
5768	} else
5769		ipr_erp_start(ioa_cfg, ipr_cmd);
5770}
5771
5772/**
5773 * ipr_queuecommand - Queue a mid-layer request
5774 * @scsi_cmd:	scsi command struct
5775 * @done:		done function
5776 *
5777 * This function queues a request generated by the mid-layer.
5778 *
5779 * Return value:
5780 *	0 on success
5781 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5782 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5783 **/
5784static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5785			    void (*done) (struct scsi_cmnd *))
5786{
5787	struct ipr_ioa_cfg *ioa_cfg;
5788	struct ipr_resource_entry *res;
5789	struct ipr_ioarcb *ioarcb;
5790	struct ipr_cmnd *ipr_cmd;
5791	int rc = 0;
5792
5793	scsi_cmd->scsi_done = done;
5794	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5795	res = scsi_cmd->device->hostdata;
5796	scsi_cmd->result = (DID_OK << 16);
5797
5798	/*
5799	 * We are currently blocking all devices due to a host reset
5800	 * We have told the host to stop giving us new requests, but
5801	 * ERP ops don't count. FIXME
5802	 */
5803	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5804		return SCSI_MLQUEUE_HOST_BUSY;
5805
5806	/*
5807	 * FIXME - Create scsi_set_host_offline interface
5808	 *  and the ioa_is_dead check can be removed
5809	 */
5810	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5811		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5812		scsi_cmd->result = (DID_NO_CONNECT << 16);
5813		scsi_cmd->scsi_done(scsi_cmd);
5814		return 0;
5815	}
5816
5817	if (ipr_is_gata(res) && res->sata_port)
5818		return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5819
5820	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5821	ioarcb = &ipr_cmd->ioarcb;
5822	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5823
5824	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5825	ipr_cmd->scsi_cmd = scsi_cmd;
5826	ioarcb->res_handle = res->res_handle;
5827	ipr_cmd->done = ipr_scsi_done;
5828	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5829
5830	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5831		if (scsi_cmd->underflow == 0)
5832			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5833
5834		if (res->needs_sync_complete) {
5835			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5836			res->needs_sync_complete = 0;
5837		}
5838
5839		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5840		if (ipr_is_gscsi(res))
5841			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5842		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5843		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5844	}
5845
5846	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5847	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5848		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5849
5850	if (likely(rc == 0)) {
5851		if (ioa_cfg->sis64)
5852			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5853		else
5854			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5855	}
5856
5857	if (likely(rc == 0)) {
5858		mb();
5859		ipr_send_command(ipr_cmd);
5860	} else {
5861		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5862		 return SCSI_MLQUEUE_HOST_BUSY;
5863	}
5864
5865	return 0;
5866}
5867
5868static DEF_SCSI_QCMD(ipr_queuecommand)
5869
5870/**
5871 * ipr_ioctl - IOCTL handler
5872 * @sdev:	scsi device struct
5873 * @cmd:	IOCTL cmd
5874 * @arg:	IOCTL arg
5875 *
5876 * Return value:
5877 * 	0 on success / other on failure
5878 **/
5879static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5880{
5881	struct ipr_resource_entry *res;
5882
5883	res = (struct ipr_resource_entry *)sdev->hostdata;
5884	if (res && ipr_is_gata(res)) {
5885		if (cmd == HDIO_GET_IDENTITY)
5886			return -ENOTTY;
5887		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5888	}
5889
5890	return -EINVAL;
5891}
5892
5893/**
5894 * ipr_info - Get information about the card/driver
5895 * @scsi_host:	scsi host struct
5896 *
5897 * Return value:
5898 * 	pointer to buffer with description string
5899 **/
5900static const char * ipr_ioa_info(struct Scsi_Host *host)
5901{
5902	static char buffer[512];
5903	struct ipr_ioa_cfg *ioa_cfg;
5904	unsigned long lock_flags = 0;
5905
5906	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5907
5908	spin_lock_irqsave(host->host_lock, lock_flags);
5909	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5910	spin_unlock_irqrestore(host->host_lock, lock_flags);
5911
5912	return buffer;
5913}
5914
5915static struct scsi_host_template driver_template = {
5916	.module = THIS_MODULE,
5917	.name = "IPR",
5918	.info = ipr_ioa_info,
5919	.ioctl = ipr_ioctl,
5920	.queuecommand = ipr_queuecommand,
5921	.eh_abort_handler = ipr_eh_abort,
5922	.eh_device_reset_handler = ipr_eh_dev_reset,
5923	.eh_host_reset_handler = ipr_eh_host_reset,
5924	.slave_alloc = ipr_slave_alloc,
5925	.slave_configure = ipr_slave_configure,
5926	.slave_destroy = ipr_slave_destroy,
5927	.target_alloc = ipr_target_alloc,
5928	.target_destroy = ipr_target_destroy,
5929	.change_queue_depth = ipr_change_queue_depth,
5930	.change_queue_type = ipr_change_queue_type,
5931	.bios_param = ipr_biosparam,
5932	.can_queue = IPR_MAX_COMMANDS,
5933	.this_id = -1,
5934	.sg_tablesize = IPR_MAX_SGLIST,
5935	.max_sectors = IPR_IOA_MAX_SECTORS,
5936	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5937	.use_clustering = ENABLE_CLUSTERING,
5938	.shost_attrs = ipr_ioa_attrs,
5939	.sdev_attrs = ipr_dev_attrs,
5940	.proc_name = IPR_NAME
5941};
5942
5943/**
5944 * ipr_ata_phy_reset - libata phy_reset handler
5945 * @ap:		ata port to reset
5946 *
5947 **/
5948static void ipr_ata_phy_reset(struct ata_port *ap)
5949{
5950	unsigned long flags;
5951	struct ipr_sata_port *sata_port = ap->private_data;
5952	struct ipr_resource_entry *res = sata_port->res;
5953	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5954	int rc;
5955
5956	ENTER;
5957	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5958	while(ioa_cfg->in_reset_reload) {
5959		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5960		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5961		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5962	}
5963
5964	if (!ioa_cfg->allow_cmds)
5965		goto out_unlock;
5966
5967	rc = ipr_device_reset(ioa_cfg, res);
5968
5969	if (rc) {
5970		ap->link.device[0].class = ATA_DEV_NONE;
5971		goto out_unlock;
5972	}
5973
5974	ap->link.device[0].class = res->ata_class;
5975	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5976		ap->link.device[0].class = ATA_DEV_NONE;
5977
5978out_unlock:
5979	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5980	LEAVE;
5981}
5982
5983/**
5984 * ipr_ata_post_internal - Cleanup after an internal command
5985 * @qc:	ATA queued command
5986 *
5987 * Return value:
5988 * 	none
5989 **/
5990static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5991{
5992	struct ipr_sata_port *sata_port = qc->ap->private_data;
5993	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5994	struct ipr_cmnd *ipr_cmd;
5995	unsigned long flags;
5996
5997	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5998	while(ioa_cfg->in_reset_reload) {
5999		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6000		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6001		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6002	}
6003
6004	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6005		if (ipr_cmd->qc == qc) {
6006			ipr_device_reset(ioa_cfg, sata_port->res);
6007			break;
6008		}
6009	}
6010	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6011}
6012
6013/**
6014 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6015 * @regs:	destination
6016 * @tf:	source ATA taskfile
6017 *
6018 * Return value:
6019 * 	none
6020 **/
6021static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6022			     struct ata_taskfile *tf)
6023{
6024	regs->feature = tf->feature;
6025	regs->nsect = tf->nsect;
6026	regs->lbal = tf->lbal;
6027	regs->lbam = tf->lbam;
6028	regs->lbah = tf->lbah;
6029	regs->device = tf->device;
6030	regs->command = tf->command;
6031	regs->hob_feature = tf->hob_feature;
6032	regs->hob_nsect = tf->hob_nsect;
6033	regs->hob_lbal = tf->hob_lbal;
6034	regs->hob_lbam = tf->hob_lbam;
6035	regs->hob_lbah = tf->hob_lbah;
6036	regs->ctl = tf->ctl;
6037}
6038
6039/**
6040 * ipr_sata_done - done function for SATA commands
6041 * @ipr_cmd:	ipr command struct
6042 *
6043 * This function is invoked by the interrupt handler for
6044 * ops generated by the SCSI mid-layer to SATA devices
6045 *
6046 * Return value:
6047 * 	none
6048 **/
6049static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6050{
6051	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6052	struct ata_queued_cmd *qc = ipr_cmd->qc;
6053	struct ipr_sata_port *sata_port = qc->ap->private_data;
6054	struct ipr_resource_entry *res = sata_port->res;
6055	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6056
6057	if (ipr_cmd->ioa_cfg->sis64)
6058		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6059		       sizeof(struct ipr_ioasa_gata));
6060	else
6061		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6062		       sizeof(struct ipr_ioasa_gata));
6063	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6064
6065	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6066		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6067
6068	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6069		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6070	else
6071		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6072	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6073	ata_qc_complete(qc);
6074}
6075
6076/**
6077 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6078 * @ipr_cmd:	ipr command struct
6079 * @qc:		ATA queued command
6080 *
6081 **/
6082static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6083				  struct ata_queued_cmd *qc)
6084{
6085	u32 ioadl_flags = 0;
6086	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6087	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6088	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6089	int len = qc->nbytes;
6090	struct scatterlist *sg;
6091	unsigned int si;
6092	dma_addr_t dma_addr = ipr_cmd->dma_addr;
6093
6094	if (len == 0)
6095		return;
6096
6097	if (qc->dma_dir == DMA_TO_DEVICE) {
6098		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6099		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6100	} else if (qc->dma_dir == DMA_FROM_DEVICE)
6101		ioadl_flags = IPR_IOADL_FLAGS_READ;
6102
6103	ioarcb->data_transfer_length = cpu_to_be32(len);
6104	ioarcb->ioadl_len =
6105		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6106	ioarcb->u.sis64_addr_data.data_ioadl_addr =
6107		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6108
6109	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6110		ioadl64->flags = cpu_to_be32(ioadl_flags);
6111		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6112		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6113
6114		last_ioadl64 = ioadl64;
6115		ioadl64++;
6116	}
6117
6118	if (likely(last_ioadl64))
6119		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6120}
6121
6122/**
6123 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6124 * @ipr_cmd:	ipr command struct
6125 * @qc:		ATA queued command
6126 *
6127 **/
6128static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6129				struct ata_queued_cmd *qc)
6130{
6131	u32 ioadl_flags = 0;
6132	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6133	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6134	struct ipr_ioadl_desc *last_ioadl = NULL;
6135	int len = qc->nbytes;
6136	struct scatterlist *sg;
6137	unsigned int si;
6138
6139	if (len == 0)
6140		return;
6141
6142	if (qc->dma_dir == DMA_TO_DEVICE) {
6143		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6144		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6145		ioarcb->data_transfer_length = cpu_to_be32(len);
6146		ioarcb->ioadl_len =
6147			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6148	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6149		ioadl_flags = IPR_IOADL_FLAGS_READ;
6150		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6151		ioarcb->read_ioadl_len =
6152			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6153	}
6154
6155	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6156		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6157		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6158
6159		last_ioadl = ioadl;
6160		ioadl++;
6161	}
6162
6163	if (likely(last_ioadl))
6164		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6165}
6166
6167/**
6168 * ipr_qc_issue - Issue a SATA qc to a device
6169 * @qc:	queued command
6170 *
6171 * Return value:
6172 * 	0 if success
6173 **/
6174static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6175{
6176	struct ata_port *ap = qc->ap;
6177	struct ipr_sata_port *sata_port = ap->private_data;
6178	struct ipr_resource_entry *res = sata_port->res;
6179	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6180	struct ipr_cmnd *ipr_cmd;
6181	struct ipr_ioarcb *ioarcb;
6182	struct ipr_ioarcb_ata_regs *regs;
6183
6184	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6185		return AC_ERR_SYSTEM;
6186
6187	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6188	ioarcb = &ipr_cmd->ioarcb;
6189
6190	if (ioa_cfg->sis64) {
6191		regs = &ipr_cmd->i.ata_ioadl.regs;
6192		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6193	} else
6194		regs = &ioarcb->u.add_data.u.regs;
6195
6196	memset(regs, 0, sizeof(*regs));
6197	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6198
6199	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6200	ipr_cmd->qc = qc;
6201	ipr_cmd->done = ipr_sata_done;
6202	ipr_cmd->ioarcb.res_handle = res->res_handle;
6203	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6204	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6205	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6206	ipr_cmd->dma_use_sg = qc->n_elem;
6207
6208	if (ioa_cfg->sis64)
6209		ipr_build_ata_ioadl64(ipr_cmd, qc);
6210	else
6211		ipr_build_ata_ioadl(ipr_cmd, qc);
6212
6213	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6214	ipr_copy_sata_tf(regs, &qc->tf);
6215	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6216	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6217
6218	switch (qc->tf.protocol) {
6219	case ATA_PROT_NODATA:
6220	case ATA_PROT_PIO:
6221		break;
6222
6223	case ATA_PROT_DMA:
6224		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6225		break;
6226
6227	case ATAPI_PROT_PIO:
6228	case ATAPI_PROT_NODATA:
6229		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6230		break;
6231
6232	case ATAPI_PROT_DMA:
6233		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6234		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6235		break;
6236
6237	default:
6238		WARN_ON(1);
6239		return AC_ERR_INVALID;
6240	}
6241
6242	mb();
6243
6244	ipr_send_command(ipr_cmd);
6245
6246	return 0;
6247}
6248
6249/**
6250 * ipr_qc_fill_rtf - Read result TF
6251 * @qc: ATA queued command
6252 *
6253 * Return value:
6254 * 	true
6255 **/
6256static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6257{
6258	struct ipr_sata_port *sata_port = qc->ap->private_data;
6259	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6260	struct ata_taskfile *tf = &qc->result_tf;
6261
6262	tf->feature = g->error;
6263	tf->nsect = g->nsect;
6264	tf->lbal = g->lbal;
6265	tf->lbam = g->lbam;
6266	tf->lbah = g->lbah;
6267	tf->device = g->device;
6268	tf->command = g->status;
6269	tf->hob_nsect = g->hob_nsect;
6270	tf->hob_lbal = g->hob_lbal;
6271	tf->hob_lbam = g->hob_lbam;
6272	tf->hob_lbah = g->hob_lbah;
6273	tf->ctl = g->alt_status;
6274
6275	return true;
6276}
6277
6278static struct ata_port_operations ipr_sata_ops = {
6279	.phy_reset = ipr_ata_phy_reset,
6280	.hardreset = ipr_sata_reset,
6281	.post_internal_cmd = ipr_ata_post_internal,
6282	.qc_prep = ata_noop_qc_prep,
6283	.qc_issue = ipr_qc_issue,
6284	.qc_fill_rtf = ipr_qc_fill_rtf,
6285	.port_start = ata_sas_port_start,
6286	.port_stop = ata_sas_port_stop
6287};
6288
6289static struct ata_port_info sata_port_info = {
6290	.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6291	.pio_mask	= ATA_PIO4_ONLY,
6292	.mwdma_mask	= ATA_MWDMA2,
6293	.udma_mask	= ATA_UDMA6,
6294	.port_ops	= &ipr_sata_ops
6295};
6296
6297#ifdef CONFIG_PPC_PSERIES
6298static const u16 ipr_blocked_processors[] = {
6299	PV_NORTHSTAR,
6300	PV_PULSAR,
6301	PV_POWER4,
6302	PV_ICESTAR,
6303	PV_SSTAR,
6304	PV_POWER4p,
6305	PV_630,
6306	PV_630p
6307};
6308
6309/**
6310 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6311 * @ioa_cfg:	ioa cfg struct
6312 *
6313 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6314 * certain pSeries hardware. This function determines if the given
6315 * adapter is in one of these confgurations or not.
6316 *
6317 * Return value:
6318 * 	1 if adapter is not supported / 0 if adapter is supported
6319 **/
6320static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6321{
6322	int i;
6323
6324	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6325		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6326			if (__is_processor(ipr_blocked_processors[i]))
6327				return 1;
6328		}
6329	}
6330	return 0;
6331}
6332#else
6333#define ipr_invalid_adapter(ioa_cfg) 0
6334#endif
6335
6336/**
6337 * ipr_ioa_bringdown_done - IOA bring down completion.
6338 * @ipr_cmd:	ipr command struct
6339 *
6340 * This function processes the completion of an adapter bring down.
6341 * It wakes any reset sleepers.
6342 *
6343 * Return value:
6344 * 	IPR_RC_JOB_RETURN
6345 **/
6346static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6347{
6348	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6349
6350	ENTER;
6351	ioa_cfg->in_reset_reload = 0;
6352	ioa_cfg->reset_retries = 0;
6353	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6354	wake_up_all(&ioa_cfg->reset_wait_q);
6355
6356	spin_unlock_irq(ioa_cfg->host->host_lock);
6357	scsi_unblock_requests(ioa_cfg->host);
6358	spin_lock_irq(ioa_cfg->host->host_lock);
6359	LEAVE;
6360
6361	return IPR_RC_JOB_RETURN;
6362}
6363
6364/**
6365 * ipr_ioa_reset_done - IOA reset completion.
6366 * @ipr_cmd:	ipr command struct
6367 *
6368 * This function processes the completion of an adapter reset.
6369 * It schedules any necessary mid-layer add/removes and
6370 * wakes any reset sleepers.
6371 *
6372 * Return value:
6373 * 	IPR_RC_JOB_RETURN
6374 **/
6375static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6376{
6377	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6378	struct ipr_resource_entry *res;
6379	struct ipr_hostrcb *hostrcb, *temp;
6380	int i = 0;
6381
6382	ENTER;
6383	ioa_cfg->in_reset_reload = 0;
6384	ioa_cfg->allow_cmds = 1;
6385	ioa_cfg->reset_cmd = NULL;
6386	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6387
6388	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6389		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6390			ipr_trace;
6391			break;
6392		}
6393	}
6394	schedule_work(&ioa_cfg->work_q);
6395
6396	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6397		list_del(&hostrcb->queue);
6398		if (i++ < IPR_NUM_LOG_HCAMS)
6399			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6400		else
6401			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6402	}
6403
6404	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6405	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6406
6407	ioa_cfg->reset_retries = 0;
6408	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6409	wake_up_all(&ioa_cfg->reset_wait_q);
6410
6411	spin_unlock(ioa_cfg->host->host_lock);
6412	scsi_unblock_requests(ioa_cfg->host);
6413	spin_lock(ioa_cfg->host->host_lock);
6414
6415	if (!ioa_cfg->allow_cmds)
6416		scsi_block_requests(ioa_cfg->host);
6417
6418	LEAVE;
6419	return IPR_RC_JOB_RETURN;
6420}
6421
6422/**
6423 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6424 * @supported_dev:	supported device struct
6425 * @vpids:			vendor product id struct
6426 *
6427 * Return value:
6428 * 	none
6429 **/
6430static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6431				 struct ipr_std_inq_vpids *vpids)
6432{
6433	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6434	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6435	supported_dev->num_records = 1;
6436	supported_dev->data_length =
6437		cpu_to_be16(sizeof(struct ipr_supported_device));
6438	supported_dev->reserved = 0;
6439}
6440
6441/**
6442 * ipr_set_supported_devs - Send Set Supported Devices for a device
6443 * @ipr_cmd:	ipr command struct
6444 *
6445 * This function sends a Set Supported Devices to the adapter
6446 *
6447 * Return value:
6448 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6449 **/
6450static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6451{
6452	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6453	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6454	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6455	struct ipr_resource_entry *res = ipr_cmd->u.res;
6456
6457	ipr_cmd->job_step = ipr_ioa_reset_done;
6458
6459	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6460		if (!ipr_is_scsi_disk(res))
6461			continue;
6462
6463		ipr_cmd->u.res = res;
6464		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6465
6466		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6467		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6468		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6469
6470		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6471		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6472		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6473		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6474
6475		ipr_init_ioadl(ipr_cmd,
6476			       ioa_cfg->vpd_cbs_dma +
6477				 offsetof(struct ipr_misc_cbs, supp_dev),
6478			       sizeof(struct ipr_supported_device),
6479			       IPR_IOADL_FLAGS_WRITE_LAST);
6480
6481		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6482			   IPR_SET_SUP_DEVICE_TIMEOUT);
6483
6484		if (!ioa_cfg->sis64)
6485			ipr_cmd->job_step = ipr_set_supported_devs;
6486		return IPR_RC_JOB_RETURN;
6487	}
6488
6489	return IPR_RC_JOB_CONTINUE;
6490}
6491
6492/**
6493 * ipr_get_mode_page - Locate specified mode page
6494 * @mode_pages:	mode page buffer
6495 * @page_code:	page code to find
6496 * @len:		minimum required length for mode page
6497 *
6498 * Return value:
6499 * 	pointer to mode page / NULL on failure
6500 **/
6501static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6502			       u32 page_code, u32 len)
6503{
6504	struct ipr_mode_page_hdr *mode_hdr;
6505	u32 page_length;
6506	u32 length;
6507
6508	if (!mode_pages || (mode_pages->hdr.length == 0))
6509		return NULL;
6510
6511	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6512	mode_hdr = (struct ipr_mode_page_hdr *)
6513		(mode_pages->data + mode_pages->hdr.block_desc_len);
6514
6515	while (length) {
6516		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6517			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6518				return mode_hdr;
6519			break;
6520		} else {
6521			page_length = (sizeof(struct ipr_mode_page_hdr) +
6522				       mode_hdr->page_length);
6523			length -= page_length;
6524			mode_hdr = (struct ipr_mode_page_hdr *)
6525				((unsigned long)mode_hdr + page_length);
6526		}
6527	}
6528	return NULL;
6529}
6530
6531/**
6532 * ipr_check_term_power - Check for term power errors
6533 * @ioa_cfg:	ioa config struct
6534 * @mode_pages:	IOAFP mode pages buffer
6535 *
6536 * Check the IOAFP's mode page 28 for term power errors
6537 *
6538 * Return value:
6539 * 	nothing
6540 **/
6541static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6542				 struct ipr_mode_pages *mode_pages)
6543{
6544	int i;
6545	int entry_length;
6546	struct ipr_dev_bus_entry *bus;
6547	struct ipr_mode_page28 *mode_page;
6548
6549	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6550				      sizeof(struct ipr_mode_page28));
6551
6552	entry_length = mode_page->entry_length;
6553
6554	bus = mode_page->bus;
6555
6556	for (i = 0; i < mode_page->num_entries; i++) {
6557		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6558			dev_err(&ioa_cfg->pdev->dev,
6559				"Term power is absent on scsi bus %d\n",
6560				bus->res_addr.bus);
6561		}
6562
6563		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6564	}
6565}
6566
6567/**
6568 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6569 * @ioa_cfg:	ioa config struct
6570 *
6571 * Looks through the config table checking for SES devices. If
6572 * the SES device is in the SES table indicating a maximum SCSI
6573 * bus speed, the speed is limited for the bus.
6574 *
6575 * Return value:
6576 * 	none
6577 **/
6578static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6579{
6580	u32 max_xfer_rate;
6581	int i;
6582
6583	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6584		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6585						       ioa_cfg->bus_attr[i].bus_width);
6586
6587		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6588			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6589	}
6590}
6591
6592/**
6593 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6594 * @ioa_cfg:	ioa config struct
6595 * @mode_pages:	mode page 28 buffer
6596 *
6597 * Updates mode page 28 based on driver configuration
6598 *
6599 * Return value:
6600 * 	none
6601 **/
6602static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6603					  	struct ipr_mode_pages *mode_pages)
6604{
6605	int i, entry_length;
6606	struct ipr_dev_bus_entry *bus;
6607	struct ipr_bus_attributes *bus_attr;
6608	struct ipr_mode_page28 *mode_page;
6609
6610	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6611				      sizeof(struct ipr_mode_page28));
6612
6613	entry_length = mode_page->entry_length;
6614
6615	/* Loop for each device bus entry */
6616	for (i = 0, bus = mode_page->bus;
6617	     i < mode_page->num_entries;
6618	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6619		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6620			dev_err(&ioa_cfg->pdev->dev,
6621				"Invalid resource address reported: 0x%08X\n",
6622				IPR_GET_PHYS_LOC(bus->res_addr));
6623			continue;
6624		}
6625
6626		bus_attr = &ioa_cfg->bus_attr[i];
6627		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6628		bus->bus_width = bus_attr->bus_width;
6629		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6630		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6631		if (bus_attr->qas_enabled)
6632			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6633		else
6634			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6635	}
6636}
6637
6638/**
6639 * ipr_build_mode_select - Build a mode select command
6640 * @ipr_cmd:	ipr command struct
6641 * @res_handle:	resource handle to send command to
6642 * @parm:		Byte 2 of Mode Sense command
6643 * @dma_addr:	DMA buffer address
6644 * @xfer_len:	data transfer length
6645 *
6646 * Return value:
6647 * 	none
6648 **/
6649static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6650				  __be32 res_handle, u8 parm,
6651				  dma_addr_t dma_addr, u8 xfer_len)
6652{
6653	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6654
6655	ioarcb->res_handle = res_handle;
6656	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6657	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6658	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6659	ioarcb->cmd_pkt.cdb[1] = parm;
6660	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6661
6662	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6663}
6664
6665/**
6666 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6667 * @ipr_cmd:	ipr command struct
6668 *
6669 * This function sets up the SCSI bus attributes and sends
6670 * a Mode Select for Page 28 to activate them.
6671 *
6672 * Return value:
6673 * 	IPR_RC_JOB_RETURN
6674 **/
6675static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6676{
6677	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6678	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6679	int length;
6680
6681	ENTER;
6682	ipr_scsi_bus_speed_limit(ioa_cfg);
6683	ipr_check_term_power(ioa_cfg, mode_pages);
6684	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6685	length = mode_pages->hdr.length + 1;
6686	mode_pages->hdr.length = 0;
6687
6688	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6689			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6690			      length);
6691
6692	ipr_cmd->job_step = ipr_set_supported_devs;
6693	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6694				    struct ipr_resource_entry, queue);
6695	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6696
6697	LEAVE;
6698	return IPR_RC_JOB_RETURN;
6699}
6700
6701/**
6702 * ipr_build_mode_sense - Builds a mode sense command
6703 * @ipr_cmd:	ipr command struct
6704 * @res:		resource entry struct
6705 * @parm:		Byte 2 of mode sense command
6706 * @dma_addr:	DMA address of mode sense buffer
6707 * @xfer_len:	Size of DMA buffer
6708 *
6709 * Return value:
6710 * 	none
6711 **/
6712static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6713				 __be32 res_handle,
6714				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6715{
6716	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6717
6718	ioarcb->res_handle = res_handle;
6719	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6720	ioarcb->cmd_pkt.cdb[2] = parm;
6721	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6722	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6723
6724	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6725}
6726
6727/**
6728 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6729 * @ipr_cmd:	ipr command struct
6730 *
6731 * This function handles the failure of an IOA bringup command.
6732 *
6733 * Return value:
6734 * 	IPR_RC_JOB_RETURN
6735 **/
6736static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6737{
6738	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6739	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6740
6741	dev_err(&ioa_cfg->pdev->dev,
6742		"0x%02X failed with IOASC: 0x%08X\n",
6743		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6744
6745	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6746	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6747	return IPR_RC_JOB_RETURN;
6748}
6749
6750/**
6751 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6752 * @ipr_cmd:	ipr command struct
6753 *
6754 * This function handles the failure of a Mode Sense to the IOAFP.
6755 * Some adapters do not handle all mode pages.
6756 *
6757 * Return value:
6758 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6759 **/
6760static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6761{
6762	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6763	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6764
6765	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6766		ipr_cmd->job_step = ipr_set_supported_devs;
6767		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6768					    struct ipr_resource_entry, queue);
6769		return IPR_RC_JOB_CONTINUE;
6770	}
6771
6772	return ipr_reset_cmd_failed(ipr_cmd);
6773}
6774
6775/**
6776 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6777 * @ipr_cmd:	ipr command struct
6778 *
6779 * This function send a Page 28 mode sense to the IOA to
6780 * retrieve SCSI bus attributes.
6781 *
6782 * Return value:
6783 * 	IPR_RC_JOB_RETURN
6784 **/
6785static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6786{
6787	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6788
6789	ENTER;
6790	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6791			     0x28, ioa_cfg->vpd_cbs_dma +
6792			     offsetof(struct ipr_misc_cbs, mode_pages),
6793			     sizeof(struct ipr_mode_pages));
6794
6795	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6796	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6797
6798	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6799
6800	LEAVE;
6801	return IPR_RC_JOB_RETURN;
6802}
6803
6804/**
6805 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6806 * @ipr_cmd:	ipr command struct
6807 *
6808 * This function enables dual IOA RAID support if possible.
6809 *
6810 * Return value:
6811 * 	IPR_RC_JOB_RETURN
6812 **/
6813static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6814{
6815	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6816	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6817	struct ipr_mode_page24 *mode_page;
6818	int length;
6819
6820	ENTER;
6821	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6822				      sizeof(struct ipr_mode_page24));
6823
6824	if (mode_page)
6825		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6826
6827	length = mode_pages->hdr.length + 1;
6828	mode_pages->hdr.length = 0;
6829
6830	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6831			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6832			      length);
6833
6834	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6835	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6836
6837	LEAVE;
6838	return IPR_RC_JOB_RETURN;
6839}
6840
6841/**
6842 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6843 * @ipr_cmd:	ipr command struct
6844 *
6845 * This function handles the failure of a Mode Sense to the IOAFP.
6846 * Some adapters do not handle all mode pages.
6847 *
6848 * Return value:
6849 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6850 **/
6851static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6852{
6853	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6854
6855	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6856		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6857		return IPR_RC_JOB_CONTINUE;
6858	}
6859
6860	return ipr_reset_cmd_failed(ipr_cmd);
6861}
6862
6863/**
6864 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6865 * @ipr_cmd:	ipr command struct
6866 *
6867 * This function send a mode sense to the IOA to retrieve
6868 * the IOA Advanced Function Control mode page.
6869 *
6870 * Return value:
6871 * 	IPR_RC_JOB_RETURN
6872 **/
6873static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6874{
6875	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6876
6877	ENTER;
6878	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6879			     0x24, ioa_cfg->vpd_cbs_dma +
6880			     offsetof(struct ipr_misc_cbs, mode_pages),
6881			     sizeof(struct ipr_mode_pages));
6882
6883	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6884	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6885
6886	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6887
6888	LEAVE;
6889	return IPR_RC_JOB_RETURN;
6890}
6891
6892/**
6893 * ipr_init_res_table - Initialize the resource table
6894 * @ipr_cmd:	ipr command struct
6895 *
6896 * This function looks through the existing resource table, comparing
6897 * it with the config table. This function will take care of old/new
6898 * devices and schedule adding/removing them from the mid-layer
6899 * as appropriate.
6900 *
6901 * Return value:
6902 * 	IPR_RC_JOB_CONTINUE
6903 **/
6904static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6905{
6906	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6907	struct ipr_resource_entry *res, *temp;
6908	struct ipr_config_table_entry_wrapper cfgtew;
6909	int entries, found, flag, i;
6910	LIST_HEAD(old_res);
6911
6912	ENTER;
6913	if (ioa_cfg->sis64)
6914		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6915	else
6916		flag = ioa_cfg->u.cfg_table->hdr.flags;
6917
6918	if (flag & IPR_UCODE_DOWNLOAD_REQ)
6919		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6920
6921	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6922		list_move_tail(&res->queue, &old_res);
6923
6924	if (ioa_cfg->sis64)
6925		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6926	else
6927		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6928
6929	for (i = 0; i < entries; i++) {
6930		if (ioa_cfg->sis64)
6931			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6932		else
6933			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6934		found = 0;
6935
6936		list_for_each_entry_safe(res, temp, &old_res, queue) {
6937			if (ipr_is_same_device(res, &cfgtew)) {
6938				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6939				found = 1;
6940				break;
6941			}
6942		}
6943
6944		if (!found) {
6945			if (list_empty(&ioa_cfg->free_res_q)) {
6946				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6947				break;
6948			}
6949
6950			found = 1;
6951			res = list_entry(ioa_cfg->free_res_q.next,
6952					 struct ipr_resource_entry, queue);
6953			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6954			ipr_init_res_entry(res, &cfgtew);
6955			res->add_to_ml = 1;
6956		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6957			res->sdev->allow_restart = 1;
6958
6959		if (found)
6960			ipr_update_res_entry(res, &cfgtew);
6961	}
6962
6963	list_for_each_entry_safe(res, temp, &old_res, queue) {
6964		if (res->sdev) {
6965			res->del_from_ml = 1;
6966			res->res_handle = IPR_INVALID_RES_HANDLE;
6967			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6968		}
6969	}
6970
6971	list_for_each_entry_safe(res, temp, &old_res, queue) {
6972		ipr_clear_res_target(res);
6973		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6974	}
6975
6976	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6977		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6978	else
6979		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6980
6981	LEAVE;
6982	return IPR_RC_JOB_CONTINUE;
6983}
6984
6985/**
6986 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6987 * @ipr_cmd:	ipr command struct
6988 *
6989 * This function sends a Query IOA Configuration command
6990 * to the adapter to retrieve the IOA configuration table.
6991 *
6992 * Return value:
6993 * 	IPR_RC_JOB_RETURN
6994 **/
6995static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6996{
6997	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6998	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6999	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7000	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7001
7002	ENTER;
7003	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7004		ioa_cfg->dual_raid = 1;
7005	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7006		 ucode_vpd->major_release, ucode_vpd->card_type,
7007		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7008	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7009	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7010
7011	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7012	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7013	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7014	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7015
7016	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7017		       IPR_IOADL_FLAGS_READ_LAST);
7018
7019	ipr_cmd->job_step = ipr_init_res_table;
7020
7021	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7022
7023	LEAVE;
7024	return IPR_RC_JOB_RETURN;
7025}
7026
7027/**
7028 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7029 * @ipr_cmd:	ipr command struct
7030 *
7031 * This utility function sends an inquiry to the adapter.
7032 *
7033 * Return value:
7034 * 	none
7035 **/
7036static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7037			      dma_addr_t dma_addr, u8 xfer_len)
7038{
7039	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7040
7041	ENTER;
7042	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7043	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7044
7045	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7046	ioarcb->cmd_pkt.cdb[1] = flags;
7047	ioarcb->cmd_pkt.cdb[2] = page;
7048	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7049
7050	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7051
7052	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7053	LEAVE;
7054}
7055
7056/**
7057 * ipr_inquiry_page_supported - Is the given inquiry page supported
7058 * @page0:		inquiry page 0 buffer
7059 * @page:		page code.
7060 *
7061 * This function determines if the specified inquiry page is supported.
7062 *
7063 * Return value:
7064 *	1 if page is supported / 0 if not
7065 **/
7066static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7067{
7068	int i;
7069
7070	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7071		if (page0->page[i] == page)
7072			return 1;
7073
7074	return 0;
7075}
7076
7077/**
7078 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7079 * @ipr_cmd:	ipr command struct
7080 *
7081 * This function sends a Page 0xD0 inquiry to the adapter
7082 * to retrieve adapter capabilities.
7083 *
7084 * Return value:
7085 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7086 **/
7087static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7088{
7089	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7090	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7091	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7092
7093	ENTER;
7094	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7095	memset(cap, 0, sizeof(*cap));
7096
7097	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7098		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7099				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7100				  sizeof(struct ipr_inquiry_cap));
7101		return IPR_RC_JOB_RETURN;
7102	}
7103
7104	LEAVE;
7105	return IPR_RC_JOB_CONTINUE;
7106}
7107
7108/**
7109 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7110 * @ipr_cmd:	ipr command struct
7111 *
7112 * This function sends a Page 3 inquiry to the adapter
7113 * to retrieve software VPD information.
7114 *
7115 * Return value:
7116 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7117 **/
7118static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7119{
7120	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7121
7122	ENTER;
7123
7124	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7125
7126	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7127			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7128			  sizeof(struct ipr_inquiry_page3));
7129
7130	LEAVE;
7131	return IPR_RC_JOB_RETURN;
7132}
7133
7134/**
7135 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7136 * @ipr_cmd:	ipr command struct
7137 *
7138 * This function sends a Page 0 inquiry to the adapter
7139 * to retrieve supported inquiry pages.
7140 *
7141 * Return value:
7142 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7143 **/
7144static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7145{
7146	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7147	char type[5];
7148
7149	ENTER;
7150
7151	/* Grab the type out of the VPD and store it away */
7152	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7153	type[4] = '\0';
7154	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7155
7156	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7157
7158	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7159			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7160			  sizeof(struct ipr_inquiry_page0));
7161
7162	LEAVE;
7163	return IPR_RC_JOB_RETURN;
7164}
7165
7166/**
7167 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7168 * @ipr_cmd:	ipr command struct
7169 *
7170 * This function sends a standard inquiry to the adapter.
7171 *
7172 * Return value:
7173 * 	IPR_RC_JOB_RETURN
7174 **/
7175static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7176{
7177	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7178
7179	ENTER;
7180	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7181
7182	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7183			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7184			  sizeof(struct ipr_ioa_vpd));
7185
7186	LEAVE;
7187	return IPR_RC_JOB_RETURN;
7188}
7189
7190/**
7191 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7192 * @ipr_cmd:	ipr command struct
7193 *
7194 * This function send an Identify Host Request Response Queue
7195 * command to establish the HRRQ with the adapter.
7196 *
7197 * Return value:
7198 * 	IPR_RC_JOB_RETURN
7199 **/
7200static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7201{
7202	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7203	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7204
7205	ENTER;
7206	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7207
7208	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7209	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7210
7211	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7212	if (ioa_cfg->sis64)
7213		ioarcb->cmd_pkt.cdb[1] = 0x1;
7214	ioarcb->cmd_pkt.cdb[2] =
7215		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7216	ioarcb->cmd_pkt.cdb[3] =
7217		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7218	ioarcb->cmd_pkt.cdb[4] =
7219		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7220	ioarcb->cmd_pkt.cdb[5] =
7221		((u64) ioa_cfg->host_rrq_dma) & 0xff;
7222	ioarcb->cmd_pkt.cdb[7] =
7223		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7224	ioarcb->cmd_pkt.cdb[8] =
7225		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7226
7227	if (ioa_cfg->sis64) {
7228		ioarcb->cmd_pkt.cdb[10] =
7229			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7230		ioarcb->cmd_pkt.cdb[11] =
7231			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7232		ioarcb->cmd_pkt.cdb[12] =
7233			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7234		ioarcb->cmd_pkt.cdb[13] =
7235			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7236	}
7237
7238	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7239
7240	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7241
7242	LEAVE;
7243	return IPR_RC_JOB_RETURN;
7244}
7245
7246/**
7247 * ipr_reset_timer_done - Adapter reset timer function
7248 * @ipr_cmd:	ipr command struct
7249 *
7250 * Description: This function is used in adapter reset processing
7251 * for timing events. If the reset_cmd pointer in the IOA
7252 * config struct is not this adapter's we are doing nested
7253 * resets and fail_all_ops will take care of freeing the
7254 * command block.
7255 *
7256 * Return value:
7257 * 	none
7258 **/
7259static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7260{
7261	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7262	unsigned long lock_flags = 0;
7263
7264	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7265
7266	if (ioa_cfg->reset_cmd == ipr_cmd) {
7267		list_del(&ipr_cmd->queue);
7268		ipr_cmd->done(ipr_cmd);
7269	}
7270
7271	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7272}
7273
7274/**
7275 * ipr_reset_start_timer - Start a timer for adapter reset job
7276 * @ipr_cmd:	ipr command struct
7277 * @timeout:	timeout value
7278 *
7279 * Description: This function is used in adapter reset processing
7280 * for timing events. If the reset_cmd pointer in the IOA
7281 * config struct is not this adapter's we are doing nested
7282 * resets and fail_all_ops will take care of freeing the
7283 * command block.
7284 *
7285 * Return value:
7286 * 	none
7287 **/
7288static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7289				  unsigned long timeout)
7290{
7291	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7292	ipr_cmd->done = ipr_reset_ioa_job;
7293
7294	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7295	ipr_cmd->timer.expires = jiffies + timeout;
7296	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7297	add_timer(&ipr_cmd->timer);
7298}
7299
7300/**
7301 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7302 * @ioa_cfg:	ioa cfg struct
7303 *
7304 * Return value:
7305 * 	nothing
7306 **/
7307static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7308{
7309	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7310
7311	/* Initialize Host RRQ pointers */
7312	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7313	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7314	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7315	ioa_cfg->toggle_bit = 1;
7316
7317	/* Zero out config table */
7318	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7319}
7320
7321/**
7322 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7323 * @ipr_cmd:	ipr command struct
7324 *
7325 * Return value:
7326 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7327 **/
7328static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7329{
7330	unsigned long stage, stage_time;
7331	u32 feedback;
7332	volatile u32 int_reg;
7333	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7334	u64 maskval = 0;
7335
7336	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7337	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7338	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7339
7340	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7341
7342	/* sanity check the stage_time value */
7343	if (stage_time == 0)
7344		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7345	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7346		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7347	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7348		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7349
7350	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7351		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7352		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7353		stage_time = ioa_cfg->transop_timeout;
7354		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7355	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7356		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7357		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7358			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7359			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7360			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7361			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7362			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7363			return IPR_RC_JOB_CONTINUE;
7364		}
7365	}
7366
7367	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7368	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7369	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7370	ipr_cmd->done = ipr_reset_ioa_job;
7371	add_timer(&ipr_cmd->timer);
7372	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7373
7374	return IPR_RC_JOB_RETURN;
7375}
7376
7377/**
7378 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7379 * @ipr_cmd:	ipr command struct
7380 *
7381 * This function reinitializes some control blocks and
7382 * enables destructive diagnostics on the adapter.
7383 *
7384 * Return value:
7385 * 	IPR_RC_JOB_RETURN
7386 **/
7387static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7388{
7389	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7390	volatile u32 int_reg;
7391	volatile u64 maskval;
7392
7393	ENTER;
7394	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7395	ipr_init_ioa_mem(ioa_cfg);
7396
7397	ioa_cfg->allow_interrupts = 1;
7398	if (ioa_cfg->sis64) {
7399		/* Set the adapter to the correct endian mode. */
7400		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7401		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7402	}
7403
7404	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7405
7406	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7407		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7408		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7409		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7410		return IPR_RC_JOB_CONTINUE;
7411	}
7412
7413	/* Enable destructive diagnostics on IOA */
7414	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7415
7416	if (ioa_cfg->sis64) {
7417		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7418		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7419		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7420	} else
7421		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7422
7423	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7424
7425	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7426
7427	if (ioa_cfg->sis64) {
7428		ipr_cmd->job_step = ipr_reset_next_stage;
7429		return IPR_RC_JOB_CONTINUE;
7430	}
7431
7432	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7433	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7434	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7435	ipr_cmd->done = ipr_reset_ioa_job;
7436	add_timer(&ipr_cmd->timer);
7437	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7438
7439	LEAVE;
7440	return IPR_RC_JOB_RETURN;
7441}
7442
7443/**
7444 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7445 * @ipr_cmd:	ipr command struct
7446 *
7447 * This function is invoked when an adapter dump has run out
7448 * of processing time.
7449 *
7450 * Return value:
7451 * 	IPR_RC_JOB_CONTINUE
7452 **/
7453static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7454{
7455	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7456
7457	if (ioa_cfg->sdt_state == GET_DUMP)
7458		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7459	else if (ioa_cfg->sdt_state == READ_DUMP)
7460		ioa_cfg->sdt_state = ABORT_DUMP;
7461
7462	ioa_cfg->dump_timeout = 1;
7463	ipr_cmd->job_step = ipr_reset_alert;
7464
7465	return IPR_RC_JOB_CONTINUE;
7466}
7467
7468/**
7469 * ipr_unit_check_no_data - Log a unit check/no data error log
7470 * @ioa_cfg:		ioa config struct
7471 *
7472 * Logs an error indicating the adapter unit checked, but for some
7473 * reason, we were unable to fetch the unit check buffer.
7474 *
7475 * Return value:
7476 * 	nothing
7477 **/
7478static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7479{
7480	ioa_cfg->errors_logged++;
7481	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7482}
7483
7484/**
7485 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7486 * @ioa_cfg:		ioa config struct
7487 *
7488 * Fetches the unit check buffer from the adapter by clocking the data
7489 * through the mailbox register.
7490 *
7491 * Return value:
7492 * 	nothing
7493 **/
7494static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7495{
7496	unsigned long mailbox;
7497	struct ipr_hostrcb *hostrcb;
7498	struct ipr_uc_sdt sdt;
7499	int rc, length;
7500	u32 ioasc;
7501
7502	mailbox = readl(ioa_cfg->ioa_mailbox);
7503
7504	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7505		ipr_unit_check_no_data(ioa_cfg);
7506		return;
7507	}
7508
7509	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7510	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7511					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7512
7513	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7514	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7515	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7516		ipr_unit_check_no_data(ioa_cfg);
7517		return;
7518	}
7519
7520	/* Find length of the first sdt entry (UC buffer) */
7521	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7522		length = be32_to_cpu(sdt.entry[0].end_token);
7523	else
7524		length = (be32_to_cpu(sdt.entry[0].end_token) -
7525			  be32_to_cpu(sdt.entry[0].start_token)) &
7526			  IPR_FMT2_MBX_ADDR_MASK;
7527
7528	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7529			     struct ipr_hostrcb, queue);
7530	list_del(&hostrcb->queue);
7531	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7532
7533	rc = ipr_get_ldump_data_section(ioa_cfg,
7534					be32_to_cpu(sdt.entry[0].start_token),
7535					(__be32 *)&hostrcb->hcam,
7536					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7537
7538	if (!rc) {
7539		ipr_handle_log_data(ioa_cfg, hostrcb);
7540		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7541		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7542		    ioa_cfg->sdt_state == GET_DUMP)
7543			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7544	} else
7545		ipr_unit_check_no_data(ioa_cfg);
7546
7547	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7548}
7549
7550/**
7551 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7552 * @ipr_cmd:	ipr command struct
7553 *
7554 * Description: This function will call to get the unit check buffer.
7555 *
7556 * Return value:
7557 *	IPR_RC_JOB_RETURN
7558 **/
7559static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7560{
7561	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7562
7563	ENTER;
7564	ioa_cfg->ioa_unit_checked = 0;
7565	ipr_get_unit_check_buffer(ioa_cfg);
7566	ipr_cmd->job_step = ipr_reset_alert;
7567	ipr_reset_start_timer(ipr_cmd, 0);
7568
7569	LEAVE;
7570	return IPR_RC_JOB_RETURN;
7571}
7572
7573/**
7574 * ipr_reset_restore_cfg_space - Restore PCI config space.
7575 * @ipr_cmd:	ipr command struct
7576 *
7577 * Description: This function restores the saved PCI config space of
7578 * the adapter, fails all outstanding ops back to the callers, and
7579 * fetches the dump/unit check if applicable to this reset.
7580 *
7581 * Return value:
7582 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7583 **/
7584static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7585{
7586	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7587	u32 int_reg;
7588
7589	ENTER;
7590	ioa_cfg->pdev->state_saved = true;
7591	pci_restore_state(ioa_cfg->pdev);
7592
7593	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7594		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7595		return IPR_RC_JOB_CONTINUE;
7596	}
7597
7598	ipr_fail_all_ops(ioa_cfg);
7599
7600	if (ioa_cfg->sis64) {
7601		/* Set the adapter to the correct endian mode. */
7602		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7603		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7604	}
7605
7606	if (ioa_cfg->ioa_unit_checked) {
7607		if (ioa_cfg->sis64) {
7608			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7609			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7610			return IPR_RC_JOB_RETURN;
7611		} else {
7612			ioa_cfg->ioa_unit_checked = 0;
7613			ipr_get_unit_check_buffer(ioa_cfg);
7614			ipr_cmd->job_step = ipr_reset_alert;
7615			ipr_reset_start_timer(ipr_cmd, 0);
7616			return IPR_RC_JOB_RETURN;
7617		}
7618	}
7619
7620	if (ioa_cfg->in_ioa_bringdown) {
7621		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7622	} else {
7623		ipr_cmd->job_step = ipr_reset_enable_ioa;
7624
7625		if (GET_DUMP == ioa_cfg->sdt_state) {
7626			ioa_cfg->sdt_state = READ_DUMP;
7627			ioa_cfg->dump_timeout = 0;
7628			if (ioa_cfg->sis64)
7629				ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7630			else
7631				ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7632			ipr_cmd->job_step = ipr_reset_wait_for_dump;
7633			schedule_work(&ioa_cfg->work_q);
7634			return IPR_RC_JOB_RETURN;
7635		}
7636	}
7637
7638	LEAVE;
7639	return IPR_RC_JOB_CONTINUE;
7640}
7641
7642/**
7643 * ipr_reset_bist_done - BIST has completed on the adapter.
7644 * @ipr_cmd:	ipr command struct
7645 *
7646 * Description: Unblock config space and resume the reset process.
7647 *
7648 * Return value:
7649 * 	IPR_RC_JOB_CONTINUE
7650 **/
7651static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7652{
7653	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7654
7655	ENTER;
7656	if (ioa_cfg->cfg_locked)
7657		pci_cfg_access_unlock(ioa_cfg->pdev);
7658	ioa_cfg->cfg_locked = 0;
7659	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7660	LEAVE;
7661	return IPR_RC_JOB_CONTINUE;
7662}
7663
7664/**
7665 * ipr_reset_start_bist - Run BIST on the adapter.
7666 * @ipr_cmd:	ipr command struct
7667 *
7668 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7669 *
7670 * Return value:
7671 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7672 **/
7673static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7674{
7675	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7676	int rc = PCIBIOS_SUCCESSFUL;
7677
7678	ENTER;
7679	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7680		writel(IPR_UPROCI_SIS64_START_BIST,
7681		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7682	else
7683		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7684
7685	if (rc == PCIBIOS_SUCCESSFUL) {
7686		ipr_cmd->job_step = ipr_reset_bist_done;
7687		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7688		rc = IPR_RC_JOB_RETURN;
7689	} else {
7690		if (ioa_cfg->cfg_locked)
7691			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7692		ioa_cfg->cfg_locked = 0;
7693		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7694		rc = IPR_RC_JOB_CONTINUE;
7695	}
7696
7697	LEAVE;
7698	return rc;
7699}
7700
7701/**
7702 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7703 * @ipr_cmd:	ipr command struct
7704 *
7705 * Description: This clears PCI reset to the adapter and delays two seconds.
7706 *
7707 * Return value:
7708 * 	IPR_RC_JOB_RETURN
7709 **/
7710static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7711{
7712	ENTER;
7713	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7714	ipr_cmd->job_step = ipr_reset_bist_done;
7715	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7716	LEAVE;
7717	return IPR_RC_JOB_RETURN;
7718}
7719
7720/**
7721 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7722 * @ipr_cmd:	ipr command struct
7723 *
7724 * Description: This asserts PCI reset to the adapter.
7725 *
7726 * Return value:
7727 * 	IPR_RC_JOB_RETURN
7728 **/
7729static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7730{
7731	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7732	struct pci_dev *pdev = ioa_cfg->pdev;
7733
7734	ENTER;
7735	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7736	ipr_cmd->job_step = ipr_reset_slot_reset_done;
7737	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7738	LEAVE;
7739	return IPR_RC_JOB_RETURN;
7740}
7741
7742/**
7743 * ipr_reset_block_config_access_wait - Wait for permission to block config access
7744 * @ipr_cmd:	ipr command struct
7745 *
7746 * Description: This attempts to block config access to the IOA.
7747 *
7748 * Return value:
7749 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7750 **/
7751static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
7752{
7753	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7754	int rc = IPR_RC_JOB_CONTINUE;
7755
7756	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7757		ioa_cfg->cfg_locked = 1;
7758		ipr_cmd->job_step = ioa_cfg->reset;
7759	} else {
7760		if (ipr_cmd->u.time_left) {
7761			rc = IPR_RC_JOB_RETURN;
7762			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7763			ipr_reset_start_timer(ipr_cmd,
7764					      IPR_CHECK_FOR_RESET_TIMEOUT);
7765		} else {
7766			ipr_cmd->job_step = ioa_cfg->reset;
7767			dev_err(&ioa_cfg->pdev->dev,
7768				"Timed out waiting to lock config access. Resetting anyway.\n");
7769		}
7770	}
7771
7772	return rc;
7773}
7774
7775/**
7776 * ipr_reset_block_config_access - Block config access to the IOA
7777 * @ipr_cmd:	ipr command struct
7778 *
7779 * Description: This attempts to block config access to the IOA
7780 *
7781 * Return value:
7782 * 	IPR_RC_JOB_CONTINUE
7783 **/
7784static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
7785{
7786	ipr_cmd->ioa_cfg->cfg_locked = 0;
7787	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
7788	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7789	return IPR_RC_JOB_CONTINUE;
7790}
7791
7792/**
7793 * ipr_reset_allowed - Query whether or not IOA can be reset
7794 * @ioa_cfg:	ioa config struct
7795 *
7796 * Return value:
7797 * 	0 if reset not allowed / non-zero if reset is allowed
7798 **/
7799static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7800{
7801	volatile u32 temp_reg;
7802
7803	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7804	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7805}
7806
7807/**
7808 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7809 * @ipr_cmd:	ipr command struct
7810 *
7811 * Description: This function waits for adapter permission to run BIST,
7812 * then runs BIST. If the adapter does not give permission after a
7813 * reasonable time, we will reset the adapter anyway. The impact of
7814 * resetting the adapter without warning the adapter is the risk of
7815 * losing the persistent error log on the adapter. If the adapter is
7816 * reset while it is writing to the flash on the adapter, the flash
7817 * segment will have bad ECC and be zeroed.
7818 *
7819 * Return value:
7820 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7821 **/
7822static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7823{
7824	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7825	int rc = IPR_RC_JOB_RETURN;
7826
7827	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7828		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7829		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7830	} else {
7831		ipr_cmd->job_step = ipr_reset_block_config_access;
7832		rc = IPR_RC_JOB_CONTINUE;
7833	}
7834
7835	return rc;
7836}
7837
7838/**
7839 * ipr_reset_alert - Alert the adapter of a pending reset
7840 * @ipr_cmd:	ipr command struct
7841 *
7842 * Description: This function alerts the adapter that it will be reset.
7843 * If memory space is not currently enabled, proceed directly
7844 * to running BIST on the adapter. The timer must always be started
7845 * so we guarantee we do not run BIST from ipr_isr.
7846 *
7847 * Return value:
7848 * 	IPR_RC_JOB_RETURN
7849 **/
7850static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7851{
7852	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7853	u16 cmd_reg;
7854	int rc;
7855
7856	ENTER;
7857	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7858
7859	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7860		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7861		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7862		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7863	} else {
7864		ipr_cmd->job_step = ipr_reset_block_config_access;
7865	}
7866
7867	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7868	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7869
7870	LEAVE;
7871	return IPR_RC_JOB_RETURN;
7872}
7873
7874/**
7875 * ipr_reset_ucode_download_done - Microcode download completion
7876 * @ipr_cmd:	ipr command struct
7877 *
7878 * Description: This function unmaps the microcode download buffer.
7879 *
7880 * Return value:
7881 * 	IPR_RC_JOB_CONTINUE
7882 **/
7883static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7884{
7885	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7886	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7887
7888	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7889		     sglist->num_sg, DMA_TO_DEVICE);
7890
7891	ipr_cmd->job_step = ipr_reset_alert;
7892	return IPR_RC_JOB_CONTINUE;
7893}
7894
7895/**
7896 * ipr_reset_ucode_download - Download microcode to the adapter
7897 * @ipr_cmd:	ipr command struct
7898 *
7899 * Description: This function checks to see if it there is microcode
7900 * to download to the adapter. If there is, a download is performed.
7901 *
7902 * Return value:
7903 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7904 **/
7905static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7906{
7907	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7908	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7909
7910	ENTER;
7911	ipr_cmd->job_step = ipr_reset_alert;
7912
7913	if (!sglist)
7914		return IPR_RC_JOB_CONTINUE;
7915
7916	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7917	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7918	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7919	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7920	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7921	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7922	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7923
7924	if (ioa_cfg->sis64)
7925		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7926	else
7927		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7928	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7929
7930	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7931		   IPR_WRITE_BUFFER_TIMEOUT);
7932
7933	LEAVE;
7934	return IPR_RC_JOB_RETURN;
7935}
7936
7937/**
7938 * ipr_reset_shutdown_ioa - Shutdown the adapter
7939 * @ipr_cmd:	ipr command struct
7940 *
7941 * Description: This function issues an adapter shutdown of the
7942 * specified type to the specified adapter as part of the
7943 * adapter reset job.
7944 *
7945 * Return value:
7946 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7947 **/
7948static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7949{
7950	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7951	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7952	unsigned long timeout;
7953	int rc = IPR_RC_JOB_CONTINUE;
7954
7955	ENTER;
7956	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7957		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7958		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7959		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7960		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7961
7962		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7963			timeout = IPR_SHUTDOWN_TIMEOUT;
7964		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7965			timeout = IPR_INTERNAL_TIMEOUT;
7966		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7967			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7968		else
7969			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7970
7971		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7972
7973		rc = IPR_RC_JOB_RETURN;
7974		ipr_cmd->job_step = ipr_reset_ucode_download;
7975	} else
7976		ipr_cmd->job_step = ipr_reset_alert;
7977
7978	LEAVE;
7979	return rc;
7980}
7981
7982/**
7983 * ipr_reset_ioa_job - Adapter reset job
7984 * @ipr_cmd:	ipr command struct
7985 *
7986 * Description: This function is the job router for the adapter reset job.
7987 *
7988 * Return value:
7989 * 	none
7990 **/
7991static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7992{
7993	u32 rc, ioasc;
7994	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7995
7996	do {
7997		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7998
7999		if (ioa_cfg->reset_cmd != ipr_cmd) {
8000			/*
8001			 * We are doing nested adapter resets and this is
8002			 * not the current reset job.
8003			 */
8004			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8005			return;
8006		}
8007
8008		if (IPR_IOASC_SENSE_KEY(ioasc)) {
8009			rc = ipr_cmd->job_step_failed(ipr_cmd);
8010			if (rc == IPR_RC_JOB_RETURN)
8011				return;
8012		}
8013
8014		ipr_reinit_ipr_cmnd(ipr_cmd);
8015		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8016		rc = ipr_cmd->job_step(ipr_cmd);
8017	} while(rc == IPR_RC_JOB_CONTINUE);
8018}
8019
8020/**
8021 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8022 * @ioa_cfg:		ioa config struct
8023 * @job_step:		first job step of reset job
8024 * @shutdown_type:	shutdown type
8025 *
8026 * Description: This function will initiate the reset of the given adapter
8027 * starting at the selected job step.
8028 * If the caller needs to wait on the completion of the reset,
8029 * the caller must sleep on the reset_wait_q.
8030 *
8031 * Return value:
8032 * 	none
8033 **/
8034static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8035				    int (*job_step) (struct ipr_cmnd *),
8036				    enum ipr_shutdown_type shutdown_type)
8037{
8038	struct ipr_cmnd *ipr_cmd;
8039
8040	ioa_cfg->in_reset_reload = 1;
8041	ioa_cfg->allow_cmds = 0;
8042	scsi_block_requests(ioa_cfg->host);
8043
8044	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8045	ioa_cfg->reset_cmd = ipr_cmd;
8046	ipr_cmd->job_step = job_step;
8047	ipr_cmd->u.shutdown_type = shutdown_type;
8048
8049	ipr_reset_ioa_job(ipr_cmd);
8050}
8051
8052/**
8053 * ipr_initiate_ioa_reset - Initiate an adapter reset
8054 * @ioa_cfg:		ioa config struct
8055 * @shutdown_type:	shutdown type
8056 *
8057 * Description: This function will initiate the reset of the given adapter.
8058 * If the caller needs to wait on the completion of the reset,
8059 * the caller must sleep on the reset_wait_q.
8060 *
8061 * Return value:
8062 * 	none
8063 **/
8064static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8065				   enum ipr_shutdown_type shutdown_type)
8066{
8067	if (ioa_cfg->ioa_is_dead)
8068		return;
8069
8070	if (ioa_cfg->in_reset_reload) {
8071		if (ioa_cfg->sdt_state == GET_DUMP)
8072			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8073		else if (ioa_cfg->sdt_state == READ_DUMP)
8074			ioa_cfg->sdt_state = ABORT_DUMP;
8075	}
8076
8077	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8078		dev_err(&ioa_cfg->pdev->dev,
8079			"IOA taken offline - error recovery failed\n");
8080
8081		ioa_cfg->reset_retries = 0;
8082		ioa_cfg->ioa_is_dead = 1;
8083
8084		if (ioa_cfg->in_ioa_bringdown) {
8085			ioa_cfg->reset_cmd = NULL;
8086			ioa_cfg->in_reset_reload = 0;
8087			ipr_fail_all_ops(ioa_cfg);
8088			wake_up_all(&ioa_cfg->reset_wait_q);
8089
8090			spin_unlock_irq(ioa_cfg->host->host_lock);
8091			scsi_unblock_requests(ioa_cfg->host);
8092			spin_lock_irq(ioa_cfg->host->host_lock);
8093			return;
8094		} else {
8095			ioa_cfg->in_ioa_bringdown = 1;
8096			shutdown_type = IPR_SHUTDOWN_NONE;
8097		}
8098	}
8099
8100	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8101				shutdown_type);
8102}
8103
8104/**
8105 * ipr_reset_freeze - Hold off all I/O activity
8106 * @ipr_cmd:	ipr command struct
8107 *
8108 * Description: If the PCI slot is frozen, hold off all I/O
8109 * activity; then, as soon as the slot is available again,
8110 * initiate an adapter reset.
8111 */
8112static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8113{
8114	/* Disallow new interrupts, avoid loop */
8115	ipr_cmd->ioa_cfg->allow_interrupts = 0;
8116	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8117	ipr_cmd->done = ipr_reset_ioa_job;
8118	return IPR_RC_JOB_RETURN;
8119}
8120
8121/**
8122 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8123 * @pdev:	PCI device struct
8124 *
8125 * Description: This routine is called to tell us that the PCI bus
8126 * is down. Can't do anything here, except put the device driver
8127 * into a holding pattern, waiting for the PCI bus to come back.
8128 */
8129static void ipr_pci_frozen(struct pci_dev *pdev)
8130{
8131	unsigned long flags = 0;
8132	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8133
8134	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8135	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8136	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8137}
8138
8139/**
8140 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8141 * @pdev:	PCI device struct
8142 *
8143 * Description: This routine is called by the pci error recovery
8144 * code after the PCI slot has been reset, just before we
8145 * should resume normal operations.
8146 */
8147static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8148{
8149	unsigned long flags = 0;
8150	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8151
8152	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8153	if (ioa_cfg->needs_warm_reset)
8154		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8155	else
8156		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8157					IPR_SHUTDOWN_NONE);
8158	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8159	return PCI_ERS_RESULT_RECOVERED;
8160}
8161
8162/**
8163 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8164 * @pdev:	PCI device struct
8165 *
8166 * Description: This routine is called when the PCI bus has
8167 * permanently failed.
8168 */
8169static void ipr_pci_perm_failure(struct pci_dev *pdev)
8170{
8171	unsigned long flags = 0;
8172	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8173
8174	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8175	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8176		ioa_cfg->sdt_state = ABORT_DUMP;
8177	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8178	ioa_cfg->in_ioa_bringdown = 1;
8179	ioa_cfg->allow_cmds = 0;
8180	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8181	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8182}
8183
8184/**
8185 * ipr_pci_error_detected - Called when a PCI error is detected.
8186 * @pdev:	PCI device struct
8187 * @state:	PCI channel state
8188 *
8189 * Description: Called when a PCI error is detected.
8190 *
8191 * Return value:
8192 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8193 */
8194static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8195					       pci_channel_state_t state)
8196{
8197	switch (state) {
8198	case pci_channel_io_frozen:
8199		ipr_pci_frozen(pdev);
8200		return PCI_ERS_RESULT_NEED_RESET;
8201	case pci_channel_io_perm_failure:
8202		ipr_pci_perm_failure(pdev);
8203		return PCI_ERS_RESULT_DISCONNECT;
8204		break;
8205	default:
8206		break;
8207	}
8208	return PCI_ERS_RESULT_NEED_RESET;
8209}
8210
8211/**
8212 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8213 * @ioa_cfg:	ioa cfg struct
8214 *
8215 * Description: This is the second phase of adapter intialization
8216 * This function takes care of initilizing the adapter to the point
8217 * where it can accept new commands.
8218
8219 * Return value:
8220 * 	0 on success / -EIO on failure
8221 **/
8222static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8223{
8224	int rc = 0;
8225	unsigned long host_lock_flags = 0;
8226
8227	ENTER;
8228	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8229	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8230	if (ioa_cfg->needs_hard_reset) {
8231		ioa_cfg->needs_hard_reset = 0;
8232		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8233	} else
8234		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8235					IPR_SHUTDOWN_NONE);
8236
8237	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8238	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8239	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8240
8241	if (ioa_cfg->ioa_is_dead) {
8242		rc = -EIO;
8243	} else if (ipr_invalid_adapter(ioa_cfg)) {
8244		if (!ipr_testmode)
8245			rc = -EIO;
8246
8247		dev_err(&ioa_cfg->pdev->dev,
8248			"Adapter not supported in this hardware configuration.\n");
8249	}
8250
8251	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8252
8253	LEAVE;
8254	return rc;
8255}
8256
8257/**
8258 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8259 * @ioa_cfg:	ioa config struct
8260 *
8261 * Return value:
8262 * 	none
8263 **/
8264static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8265{
8266	int i;
8267
8268	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8269		if (ioa_cfg->ipr_cmnd_list[i])
8270			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8271				      ioa_cfg->ipr_cmnd_list[i],
8272				      ioa_cfg->ipr_cmnd_list_dma[i]);
8273
8274		ioa_cfg->ipr_cmnd_list[i] = NULL;
8275	}
8276
8277	if (ioa_cfg->ipr_cmd_pool)
8278		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8279
8280	ioa_cfg->ipr_cmd_pool = NULL;
8281}
8282
8283/**
8284 * ipr_free_mem - Frees memory allocated for an adapter
8285 * @ioa_cfg:	ioa cfg struct
8286 *
8287 * Return value:
8288 * 	nothing
8289 **/
8290static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8291{
8292	int i;
8293
8294	kfree(ioa_cfg->res_entries);
8295	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8296			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8297	ipr_free_cmd_blks(ioa_cfg);
8298	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8299			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8300	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8301			    ioa_cfg->u.cfg_table,
8302			    ioa_cfg->cfg_table_dma);
8303
8304	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8305		pci_free_consistent(ioa_cfg->pdev,
8306				    sizeof(struct ipr_hostrcb),
8307				    ioa_cfg->hostrcb[i],
8308				    ioa_cfg->hostrcb_dma[i]);
8309	}
8310
8311	ipr_free_dump(ioa_cfg);
8312	kfree(ioa_cfg->trace);
8313}
8314
8315/**
8316 * ipr_free_all_resources - Free all allocated resources for an adapter.
8317 * @ipr_cmd:	ipr command struct
8318 *
8319 * This function frees all allocated resources for the
8320 * specified adapter.
8321 *
8322 * Return value:
8323 * 	none
8324 **/
8325static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8326{
8327	struct pci_dev *pdev = ioa_cfg->pdev;
8328
8329	ENTER;
8330	free_irq(pdev->irq, ioa_cfg);
8331	pci_disable_msi(pdev);
8332	iounmap(ioa_cfg->hdw_dma_regs);
8333	pci_release_regions(pdev);
8334	ipr_free_mem(ioa_cfg);
8335	scsi_host_put(ioa_cfg->host);
8336	pci_disable_device(pdev);
8337	LEAVE;
8338}
8339
8340/**
8341 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8342 * @ioa_cfg:	ioa config struct
8343 *
8344 * Return value:
8345 * 	0 on success / -ENOMEM on allocation failure
8346 **/
8347static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8348{
8349	struct ipr_cmnd *ipr_cmd;
8350	struct ipr_ioarcb *ioarcb;
8351	dma_addr_t dma_addr;
8352	int i;
8353
8354	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8355						 sizeof(struct ipr_cmnd), 16, 0);
8356
8357	if (!ioa_cfg->ipr_cmd_pool)
8358		return -ENOMEM;
8359
8360	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8361		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8362
8363		if (!ipr_cmd) {
8364			ipr_free_cmd_blks(ioa_cfg);
8365			return -ENOMEM;
8366		}
8367
8368		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8369		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8370		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8371
8372		ioarcb = &ipr_cmd->ioarcb;
8373		ipr_cmd->dma_addr = dma_addr;
8374		if (ioa_cfg->sis64)
8375			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8376		else
8377			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8378
8379		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8380		if (ioa_cfg->sis64) {
8381			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8382				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8383			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8384				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8385		} else {
8386			ioarcb->write_ioadl_addr =
8387				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8388			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8389			ioarcb->ioasa_host_pci_addr =
8390				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8391		}
8392		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8393		ipr_cmd->cmd_index = i;
8394		ipr_cmd->ioa_cfg = ioa_cfg;
8395		ipr_cmd->sense_buffer_dma = dma_addr +
8396			offsetof(struct ipr_cmnd, sense_buffer);
8397
8398		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8399	}
8400
8401	return 0;
8402}
8403
8404/**
8405 * ipr_alloc_mem - Allocate memory for an adapter
8406 * @ioa_cfg:	ioa config struct
8407 *
8408 * Return value:
8409 * 	0 on success / non-zero for error
8410 **/
8411static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8412{
8413	struct pci_dev *pdev = ioa_cfg->pdev;
8414	int i, rc = -ENOMEM;
8415
8416	ENTER;
8417	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8418				       ioa_cfg->max_devs_supported, GFP_KERNEL);
8419
8420	if (!ioa_cfg->res_entries)
8421		goto out;
8422
8423	if (ioa_cfg->sis64) {
8424		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8425					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8426		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8427					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8428		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8429					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8430	}
8431
8432	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8433		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8434		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8435	}
8436
8437	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8438						sizeof(struct ipr_misc_cbs),
8439						&ioa_cfg->vpd_cbs_dma);
8440
8441	if (!ioa_cfg->vpd_cbs)
8442		goto out_free_res_entries;
8443
8444	if (ipr_alloc_cmd_blks(ioa_cfg))
8445		goto out_free_vpd_cbs;
8446
8447	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8448						 sizeof(u32) * IPR_NUM_CMD_BLKS,
8449						 &ioa_cfg->host_rrq_dma);
8450
8451	if (!ioa_cfg->host_rrq)
8452		goto out_ipr_free_cmd_blocks;
8453
8454	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8455						    ioa_cfg->cfg_table_size,
8456						    &ioa_cfg->cfg_table_dma);
8457
8458	if (!ioa_cfg->u.cfg_table)
8459		goto out_free_host_rrq;
8460
8461	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8462		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8463							   sizeof(struct ipr_hostrcb),
8464							   &ioa_cfg->hostrcb_dma[i]);
8465
8466		if (!ioa_cfg->hostrcb[i])
8467			goto out_free_hostrcb_dma;
8468
8469		ioa_cfg->hostrcb[i]->hostrcb_dma =
8470			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8471		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8472		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8473	}
8474
8475	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8476				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8477
8478	if (!ioa_cfg->trace)
8479		goto out_free_hostrcb_dma;
8480
8481	rc = 0;
8482out:
8483	LEAVE;
8484	return rc;
8485
8486out_free_hostrcb_dma:
8487	while (i-- > 0) {
8488		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8489				    ioa_cfg->hostrcb[i],
8490				    ioa_cfg->hostrcb_dma[i]);
8491	}
8492	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8493			    ioa_cfg->u.cfg_table,
8494			    ioa_cfg->cfg_table_dma);
8495out_free_host_rrq:
8496	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8497			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8498out_ipr_free_cmd_blocks:
8499	ipr_free_cmd_blks(ioa_cfg);
8500out_free_vpd_cbs:
8501	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8502			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8503out_free_res_entries:
8504	kfree(ioa_cfg->res_entries);
8505	goto out;
8506}
8507
8508/**
8509 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8510 * @ioa_cfg:	ioa config struct
8511 *
8512 * Return value:
8513 * 	none
8514 **/
8515static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8516{
8517	int i;
8518
8519	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8520		ioa_cfg->bus_attr[i].bus = i;
8521		ioa_cfg->bus_attr[i].qas_enabled = 0;
8522		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8523		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8524			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8525		else
8526			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8527	}
8528}
8529
8530/**
8531 * ipr_init_ioa_cfg - Initialize IOA config struct
8532 * @ioa_cfg:	ioa config struct
8533 * @host:		scsi host struct
8534 * @pdev:		PCI dev struct
8535 *
8536 * Return value:
8537 * 	none
8538 **/
8539static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8540				       struct Scsi_Host *host, struct pci_dev *pdev)
8541{
8542	const struct ipr_interrupt_offsets *p;
8543	struct ipr_interrupts *t;
8544	void __iomem *base;
8545
8546	ioa_cfg->host = host;
8547	ioa_cfg->pdev = pdev;
8548	ioa_cfg->log_level = ipr_log_level;
8549	ioa_cfg->doorbell = IPR_DOORBELL;
8550	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8551	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8552	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8553	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8554	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8555	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8556	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8557	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8558
8559	INIT_LIST_HEAD(&ioa_cfg->free_q);
8560	INIT_LIST_HEAD(&ioa_cfg->pending_q);
8561	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8562	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8563	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8564	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8565	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8566	init_waitqueue_head(&ioa_cfg->reset_wait_q);
8567	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8568	ioa_cfg->sdt_state = INACTIVE;
8569
8570	ipr_initialize_bus_attr(ioa_cfg);
8571	ioa_cfg->max_devs_supported = ipr_max_devs;
8572
8573	if (ioa_cfg->sis64) {
8574		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8575		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8576		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8577			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8578	} else {
8579		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8580		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8581		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8582			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8583	}
8584	host->max_channel = IPR_MAX_BUS_TO_SCAN;
8585	host->unique_id = host->host_no;
8586	host->max_cmd_len = IPR_MAX_CDB_LEN;
8587	pci_set_drvdata(pdev, ioa_cfg);
8588
8589	p = &ioa_cfg->chip_cfg->regs;
8590	t = &ioa_cfg->regs;
8591	base = ioa_cfg->hdw_dma_regs;
8592
8593	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8594	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8595	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8596	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8597	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8598	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8599	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8600	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8601	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8602	t->ioarrin_reg = base + p->ioarrin_reg;
8603	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8604	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8605	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8606	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8607	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8608	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8609
8610	if (ioa_cfg->sis64) {
8611		t->init_feedback_reg = base + p->init_feedback_reg;
8612		t->dump_addr_reg = base + p->dump_addr_reg;
8613		t->dump_data_reg = base + p->dump_data_reg;
8614		t->endian_swap_reg = base + p->endian_swap_reg;
8615	}
8616}
8617
8618/**
8619 * ipr_get_chip_info - Find adapter chip information
8620 * @dev_id:		PCI device id struct
8621 *
8622 * Return value:
8623 * 	ptr to chip information on success / NULL on failure
8624 **/
8625static const struct ipr_chip_t * __devinit
8626ipr_get_chip_info(const struct pci_device_id *dev_id)
8627{
8628	int i;
8629
8630	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8631		if (ipr_chip[i].vendor == dev_id->vendor &&
8632		    ipr_chip[i].device == dev_id->device)
8633			return &ipr_chip[i];
8634	return NULL;
8635}
8636
8637/**
8638 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8639 * @pdev:		PCI device struct
8640 *
8641 * Description: Simply set the msi_received flag to 1 indicating that
8642 * Message Signaled Interrupts are supported.
8643 *
8644 * Return value:
8645 * 	0 on success / non-zero on failure
8646 **/
8647static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8648{
8649	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8650	unsigned long lock_flags = 0;
8651	irqreturn_t rc = IRQ_HANDLED;
8652
8653	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8654
8655	ioa_cfg->msi_received = 1;
8656	wake_up(&ioa_cfg->msi_wait_q);
8657
8658	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8659	return rc;
8660}
8661
8662/**
8663 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8664 * @pdev:		PCI device struct
8665 *
8666 * Description: The return value from pci_enable_msi() can not always be
8667 * trusted.  This routine sets up and initiates a test interrupt to determine
8668 * if the interrupt is received via the ipr_test_intr() service routine.
8669 * If the tests fails, the driver will fall back to LSI.
8670 *
8671 * Return value:
8672 * 	0 on success / non-zero on failure
8673 **/
8674static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8675				  struct pci_dev *pdev)
8676{
8677	int rc;
8678	volatile u32 int_reg;
8679	unsigned long lock_flags = 0;
8680
8681	ENTER;
8682
8683	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8684	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8685	ioa_cfg->msi_received = 0;
8686	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8687	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8688	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8689	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8690
8691	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8692	if (rc) {
8693		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8694		return rc;
8695	} else if (ipr_debug)
8696		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8697
8698	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8699	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8700	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8701	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8702
8703	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8704	if (!ioa_cfg->msi_received) {
8705		/* MSI test failed */
8706		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
8707		rc = -EOPNOTSUPP;
8708	} else if (ipr_debug)
8709		dev_info(&pdev->dev, "MSI test succeeded.\n");
8710
8711	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8712
8713	free_irq(pdev->irq, ioa_cfg);
8714
8715	LEAVE;
8716
8717	return rc;
8718}
8719
8720/**
8721 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8722 * @pdev:		PCI device struct
8723 * @dev_id:		PCI device id struct
8724 *
8725 * Return value:
8726 * 	0 on success / non-zero on failure
8727 **/
8728static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8729				   const struct pci_device_id *dev_id)
8730{
8731	struct ipr_ioa_cfg *ioa_cfg;
8732	struct Scsi_Host *host;
8733	unsigned long ipr_regs_pci;
8734	void __iomem *ipr_regs;
8735	int rc = PCIBIOS_SUCCESSFUL;
8736	volatile u32 mask, uproc, interrupts;
8737
8738	ENTER;
8739
8740	if ((rc = pci_enable_device(pdev))) {
8741		dev_err(&pdev->dev, "Cannot enable adapter\n");
8742		goto out;
8743	}
8744
8745	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8746
8747	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8748
8749	if (!host) {
8750		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8751		rc = -ENOMEM;
8752		goto out_disable;
8753	}
8754
8755	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8756	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8757	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8758		      sata_port_info.flags, &ipr_sata_ops);
8759
8760	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8761
8762	if (!ioa_cfg->ipr_chip) {
8763		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8764			dev_id->vendor, dev_id->device);
8765		goto out_scsi_host_put;
8766	}
8767
8768	/* set SIS 32 or SIS 64 */
8769	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8770	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8771
8772	if (ipr_transop_timeout)
8773		ioa_cfg->transop_timeout = ipr_transop_timeout;
8774	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8775		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8776	else
8777		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8778
8779	ioa_cfg->revid = pdev->revision;
8780
8781	ipr_regs_pci = pci_resource_start(pdev, 0);
8782
8783	rc = pci_request_regions(pdev, IPR_NAME);
8784	if (rc < 0) {
8785		dev_err(&pdev->dev,
8786			"Couldn't register memory range of registers\n");
8787		goto out_scsi_host_put;
8788	}
8789
8790	ipr_regs = pci_ioremap_bar(pdev, 0);
8791
8792	if (!ipr_regs) {
8793		dev_err(&pdev->dev,
8794			"Couldn't map memory range of registers\n");
8795		rc = -ENOMEM;
8796		goto out_release_regions;
8797	}
8798
8799	ioa_cfg->hdw_dma_regs = ipr_regs;
8800	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8801	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8802
8803	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8804
8805	pci_set_master(pdev);
8806
8807	if (ioa_cfg->sis64) {
8808		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8809		if (rc < 0) {
8810			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8811			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8812		}
8813
8814	} else
8815		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8816
8817	if (rc < 0) {
8818		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8819		goto cleanup_nomem;
8820	}
8821
8822	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8823				   ioa_cfg->chip_cfg->cache_line_size);
8824
8825	if (rc != PCIBIOS_SUCCESSFUL) {
8826		dev_err(&pdev->dev, "Write of cache line size failed\n");
8827		rc = -EIO;
8828		goto cleanup_nomem;
8829	}
8830
8831	/* Enable MSI style interrupts if they are supported. */
8832	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8833		rc = ipr_test_msi(ioa_cfg, pdev);
8834		if (rc == -EOPNOTSUPP)
8835			pci_disable_msi(pdev);
8836		else if (rc)
8837			goto out_msi_disable;
8838		else
8839			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8840	} else if (ipr_debug)
8841		dev_info(&pdev->dev, "Cannot enable MSI.\n");
8842
8843	/* Save away PCI config space for use following IOA reset */
8844	rc = pci_save_state(pdev);
8845
8846	if (rc != PCIBIOS_SUCCESSFUL) {
8847		dev_err(&pdev->dev, "Failed to save PCI config space\n");
8848		rc = -EIO;
8849		goto out_msi_disable;
8850	}
8851
8852	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8853		goto out_msi_disable;
8854
8855	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8856		goto out_msi_disable;
8857
8858	if (ioa_cfg->sis64)
8859		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8860				+ ((sizeof(struct ipr_config_table_entry64)
8861				* ioa_cfg->max_devs_supported)));
8862	else
8863		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8864				+ ((sizeof(struct ipr_config_table_entry)
8865				* ioa_cfg->max_devs_supported)));
8866
8867	rc = ipr_alloc_mem(ioa_cfg);
8868	if (rc < 0) {
8869		dev_err(&pdev->dev,
8870			"Couldn't allocate enough memory for device driver!\n");
8871		goto out_msi_disable;
8872	}
8873
8874	/*
8875	 * If HRRQ updated interrupt is not masked, or reset alert is set,
8876	 * the card is in an unknown state and needs a hard reset
8877	 */
8878	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8879	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8880	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8881	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8882		ioa_cfg->needs_hard_reset = 1;
8883	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
8884		ioa_cfg->needs_hard_reset = 1;
8885	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8886		ioa_cfg->ioa_unit_checked = 1;
8887
8888	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8889	rc = request_irq(pdev->irq, ipr_isr,
8890			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8891			 IPR_NAME, ioa_cfg);
8892
8893	if (rc) {
8894		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8895			pdev->irq, rc);
8896		goto cleanup_nolog;
8897	}
8898
8899	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8900	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8901		ioa_cfg->needs_warm_reset = 1;
8902		ioa_cfg->reset = ipr_reset_slot_reset;
8903	} else
8904		ioa_cfg->reset = ipr_reset_start_bist;
8905
8906	spin_lock(&ipr_driver_lock);
8907	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8908	spin_unlock(&ipr_driver_lock);
8909
8910	LEAVE;
8911out:
8912	return rc;
8913
8914cleanup_nolog:
8915	ipr_free_mem(ioa_cfg);
8916out_msi_disable:
8917	pci_disable_msi(pdev);
8918cleanup_nomem:
8919	iounmap(ipr_regs);
8920out_release_regions:
8921	pci_release_regions(pdev);
8922out_scsi_host_put:
8923	scsi_host_put(host);
8924out_disable:
8925	pci_disable_device(pdev);
8926	goto out;
8927}
8928
8929/**
8930 * ipr_scan_vsets - Scans for VSET devices
8931 * @ioa_cfg:	ioa config struct
8932 *
8933 * Description: Since the VSET resources do not follow SAM in that we can have
8934 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8935 *
8936 * Return value:
8937 * 	none
8938 **/
8939static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8940{
8941	int target, lun;
8942
8943	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8944		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8945			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8946}
8947
8948/**
8949 * ipr_initiate_ioa_bringdown - Bring down an adapter
8950 * @ioa_cfg:		ioa config struct
8951 * @shutdown_type:	shutdown type
8952 *
8953 * Description: This function will initiate bringing down the adapter.
8954 * This consists of issuing an IOA shutdown to the adapter
8955 * to flush the cache, and running BIST.
8956 * If the caller needs to wait on the completion of the reset,
8957 * the caller must sleep on the reset_wait_q.
8958 *
8959 * Return value:
8960 * 	none
8961 **/
8962static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8963				       enum ipr_shutdown_type shutdown_type)
8964{
8965	ENTER;
8966	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8967		ioa_cfg->sdt_state = ABORT_DUMP;
8968	ioa_cfg->reset_retries = 0;
8969	ioa_cfg->in_ioa_bringdown = 1;
8970	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8971	LEAVE;
8972}
8973
8974/**
8975 * __ipr_remove - Remove a single adapter
8976 * @pdev:	pci device struct
8977 *
8978 * Adapter hot plug remove entry point.
8979 *
8980 * Return value:
8981 * 	none
8982 **/
8983static void __ipr_remove(struct pci_dev *pdev)
8984{
8985	unsigned long host_lock_flags = 0;
8986	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8987	ENTER;
8988
8989	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8990	while(ioa_cfg->in_reset_reload) {
8991		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8992		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8993		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8994	}
8995
8996	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8997
8998	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8999	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9000	flush_work_sync(&ioa_cfg->work_q);
9001	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9002
9003	spin_lock(&ipr_driver_lock);
9004	list_del(&ioa_cfg->queue);
9005	spin_unlock(&ipr_driver_lock);
9006
9007	if (ioa_cfg->sdt_state == ABORT_DUMP)
9008		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9009	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9010
9011	ipr_free_all_resources(ioa_cfg);
9012
9013	LEAVE;
9014}
9015
9016/**
9017 * ipr_remove - IOA hot plug remove entry point
9018 * @pdev:	pci device struct
9019 *
9020 * Adapter hot plug remove entry point.
9021 *
9022 * Return value:
9023 * 	none
9024 **/
9025static void __devexit ipr_remove(struct pci_dev *pdev)
9026{
9027	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9028
9029	ENTER;
9030
9031	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9032			      &ipr_trace_attr);
9033	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9034			     &ipr_dump_attr);
9035	scsi_remove_host(ioa_cfg->host);
9036
9037	__ipr_remove(pdev);
9038
9039	LEAVE;
9040}
9041
9042/**
9043 * ipr_probe - Adapter hot plug add entry point
9044 *
9045 * Return value:
9046 * 	0 on success / non-zero on failure
9047 **/
9048static int __devinit ipr_probe(struct pci_dev *pdev,
9049			       const struct pci_device_id *dev_id)
9050{
9051	struct ipr_ioa_cfg *ioa_cfg;
9052	int rc;
9053
9054	rc = ipr_probe_ioa(pdev, dev_id);
9055
9056	if (rc)
9057		return rc;
9058
9059	ioa_cfg = pci_get_drvdata(pdev);
9060	rc = ipr_probe_ioa_part2(ioa_cfg);
9061
9062	if (rc) {
9063		__ipr_remove(pdev);
9064		return rc;
9065	}
9066
9067	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9068
9069	if (rc) {
9070		__ipr_remove(pdev);
9071		return rc;
9072	}
9073
9074	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9075				   &ipr_trace_attr);
9076
9077	if (rc) {
9078		scsi_remove_host(ioa_cfg->host);
9079		__ipr_remove(pdev);
9080		return rc;
9081	}
9082
9083	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9084				   &ipr_dump_attr);
9085
9086	if (rc) {
9087		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9088				      &ipr_trace_attr);
9089		scsi_remove_host(ioa_cfg->host);
9090		__ipr_remove(pdev);
9091		return rc;
9092	}
9093
9094	scsi_scan_host(ioa_cfg->host);
9095	ipr_scan_vsets(ioa_cfg);
9096	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9097	ioa_cfg->allow_ml_add_del = 1;
9098	ioa_cfg->host->max_channel = IPR_VSET_BUS;
9099	schedule_work(&ioa_cfg->work_q);
9100	return 0;
9101}
9102
9103/**
9104 * ipr_shutdown - Shutdown handler.
9105 * @pdev:	pci device struct
9106 *
9107 * This function is invoked upon system shutdown/reboot. It will issue
9108 * an adapter shutdown to the adapter to flush the write cache.
9109 *
9110 * Return value:
9111 * 	none
9112 **/
9113static void ipr_shutdown(struct pci_dev *pdev)
9114{
9115	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9116	unsigned long lock_flags = 0;
9117
9118	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9119	while(ioa_cfg->in_reset_reload) {
9120		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9121		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9122		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9123	}
9124
9125	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9126	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9127	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9128}
9129
9130static struct pci_device_id ipr_pci_table[] __devinitdata = {
9131	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9132		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9133	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9134		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9135	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9136		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9137	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9138		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9139	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9140		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9141	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9142		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9143	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9144		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9145	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9146		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9147		IPR_USE_LONG_TRANSOP_TIMEOUT },
9148	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9149	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9150	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9151	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9152	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9153	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9154	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9155	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9156	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9157	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9158	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9159	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9160	      IPR_USE_LONG_TRANSOP_TIMEOUT},
9161	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9162	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9163	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9164	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9165	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9166	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9167	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9168	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9169	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9170	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9171	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9172	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9173	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9174	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9175		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9176	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9177		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9178	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9179		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9180		IPR_USE_LONG_TRANSOP_TIMEOUT },
9181	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9182		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9183		IPR_USE_LONG_TRANSOP_TIMEOUT },
9184	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9185		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9186	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9187		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9188	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9189		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9190	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9191		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9192	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9193		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9194	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9195		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9196	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9197		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9198	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9199		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9200	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9201		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
9202	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9203		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9204	{ }
9205};
9206MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9207
9208static struct pci_error_handlers ipr_err_handler = {
9209	.error_detected = ipr_pci_error_detected,
9210	.slot_reset = ipr_pci_slot_reset,
9211};
9212
9213static struct pci_driver ipr_driver = {
9214	.name = IPR_NAME,
9215	.id_table = ipr_pci_table,
9216	.probe = ipr_probe,
9217	.remove = __devexit_p(ipr_remove),
9218	.shutdown = ipr_shutdown,
9219	.err_handler = &ipr_err_handler,
9220};
9221
9222/**
9223 * ipr_halt_done - Shutdown prepare completion
9224 *
9225 * Return value:
9226 * 	none
9227 **/
9228static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9229{
9230	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9231
9232	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9233}
9234
9235/**
9236 * ipr_halt - Issue shutdown prepare to all adapters
9237 *
9238 * Return value:
9239 * 	NOTIFY_OK on success / NOTIFY_DONE on failure
9240 **/
9241static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9242{
9243	struct ipr_cmnd *ipr_cmd;
9244	struct ipr_ioa_cfg *ioa_cfg;
9245	unsigned long flags = 0;
9246
9247	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9248		return NOTIFY_DONE;
9249
9250	spin_lock(&ipr_driver_lock);
9251
9252	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9253		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9254		if (!ioa_cfg->allow_cmds) {
9255			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9256			continue;
9257		}
9258
9259		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9260		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9261		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9262		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9263		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9264
9265		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9266		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9267	}
9268	spin_unlock(&ipr_driver_lock);
9269
9270	return NOTIFY_OK;
9271}
9272
9273static struct notifier_block ipr_notifier = {
9274	ipr_halt, NULL, 0
9275};
9276
9277/**
9278 * ipr_init - Module entry point
9279 *
9280 * Return value:
9281 * 	0 on success / negative value on failure
9282 **/
9283static int __init ipr_init(void)
9284{
9285	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9286		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9287
9288	register_reboot_notifier(&ipr_notifier);
9289	return pci_register_driver(&ipr_driver);
9290}
9291
9292/**
9293 * ipr_exit - Module unload
9294 *
9295 * Module unload entry point.
9296 *
9297 * Return value:
9298 * 	none
9299 **/
9300static void __exit ipr_exit(void)
9301{
9302	unregister_reboot_notifier(&ipr_notifier);
9303	pci_unregister_driver(&ipr_driver);
9304}
9305
9306module_init(ipr_init);
9307module_exit(ipr_exit);
9308