ipr.c revision 7dd21308b17e2b657d167adc7e20b41b7c6bbe5c
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/slab.h>
63#include <linux/vmalloc.h>
64#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
75#include <linux/libata.h>
76#include <linux/hdreg.h>
77#include <linux/reboot.h>
78#include <linux/stringify.h>
79#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
87#include "ipr.h"
88
89/*
90 *   Global Data
91 */
92static LIST_HEAD(ipr_ioa_head);
93static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
97static unsigned int ipr_transop_timeout = 0;
98static unsigned int ipr_debug = 0;
99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100static unsigned int ipr_dual_ioa_raid = 1;
101static DEFINE_SPINLOCK(ipr_driver_lock);
102
103/* This table describes the differences between DMA controller chips */
104static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
105	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
106		.mailbox = 0x0042C,
107		.cache_line_size = 0x20,
108		.clear_isr = 1,
109		{
110			.set_interrupt_mask_reg = 0x0022C,
111			.clr_interrupt_mask_reg = 0x00230,
112			.clr_interrupt_mask_reg32 = 0x00230,
113			.sense_interrupt_mask_reg = 0x0022C,
114			.sense_interrupt_mask_reg32 = 0x0022C,
115			.clr_interrupt_reg = 0x00228,
116			.clr_interrupt_reg32 = 0x00228,
117			.sense_interrupt_reg = 0x00224,
118			.sense_interrupt_reg32 = 0x00224,
119			.ioarrin_reg = 0x00404,
120			.sense_uproc_interrupt_reg = 0x00214,
121			.sense_uproc_interrupt_reg32 = 0x00214,
122			.set_uproc_interrupt_reg = 0x00214,
123			.set_uproc_interrupt_reg32 = 0x00214,
124			.clr_uproc_interrupt_reg = 0x00218,
125			.clr_uproc_interrupt_reg32 = 0x00218
126		}
127	},
128	{ /* Snipe and Scamp */
129		.mailbox = 0x0052C,
130		.cache_line_size = 0x20,
131		.clear_isr = 1,
132		{
133			.set_interrupt_mask_reg = 0x00288,
134			.clr_interrupt_mask_reg = 0x0028C,
135			.clr_interrupt_mask_reg32 = 0x0028C,
136			.sense_interrupt_mask_reg = 0x00288,
137			.sense_interrupt_mask_reg32 = 0x00288,
138			.clr_interrupt_reg = 0x00284,
139			.clr_interrupt_reg32 = 0x00284,
140			.sense_interrupt_reg = 0x00280,
141			.sense_interrupt_reg32 = 0x00280,
142			.ioarrin_reg = 0x00504,
143			.sense_uproc_interrupt_reg = 0x00290,
144			.sense_uproc_interrupt_reg32 = 0x00290,
145			.set_uproc_interrupt_reg = 0x00290,
146			.set_uproc_interrupt_reg32 = 0x00290,
147			.clr_uproc_interrupt_reg = 0x00294,
148			.clr_uproc_interrupt_reg32 = 0x00294
149		}
150	},
151	{ /* CRoC */
152		.mailbox = 0x00044,
153		.cache_line_size = 0x20,
154		.clear_isr = 0,
155		{
156			.set_interrupt_mask_reg = 0x00010,
157			.clr_interrupt_mask_reg = 0x00018,
158			.clr_interrupt_mask_reg32 = 0x0001C,
159			.sense_interrupt_mask_reg = 0x00010,
160			.sense_interrupt_mask_reg32 = 0x00014,
161			.clr_interrupt_reg = 0x00008,
162			.clr_interrupt_reg32 = 0x0000C,
163			.sense_interrupt_reg = 0x00000,
164			.sense_interrupt_reg32 = 0x00004,
165			.ioarrin_reg = 0x00070,
166			.sense_uproc_interrupt_reg = 0x00020,
167			.sense_uproc_interrupt_reg32 = 0x00024,
168			.set_uproc_interrupt_reg = 0x00020,
169			.set_uproc_interrupt_reg32 = 0x00024,
170			.clr_uproc_interrupt_reg = 0x00028,
171			.clr_uproc_interrupt_reg32 = 0x0002C,
172			.init_feedback_reg = 0x0005C,
173			.dump_addr_reg = 0x00064,
174			.dump_data_reg = 0x00068,
175			.endian_swap_reg = 0x00084
176		}
177	},
178};
179
180static const struct ipr_chip_t ipr_chip[] = {
181	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
182	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
183	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
184	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
185	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
186	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
187	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
188	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
189	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
190};
191
192static int ipr_max_bus_speeds [] = {
193	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
194};
195
196MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
197MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
198module_param_named(max_speed, ipr_max_speed, uint, 0);
199MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
200module_param_named(log_level, ipr_log_level, uint, 0);
201MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
202module_param_named(testmode, ipr_testmode, int, 0);
203MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
204module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
205MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
206module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
207MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
208module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
209MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
210module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
211MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
212module_param_named(max_devs, ipr_max_devs, int, 0);
213MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
214		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
215MODULE_LICENSE("GPL");
216MODULE_VERSION(IPR_DRIVER_VERSION);
217
218/*  A constant array of IOASCs/URCs/Error Messages */
219static const
220struct ipr_error_table_t ipr_error_table[] = {
221	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
222	"8155: An unknown error was received"},
223	{0x00330000, 0, 0,
224	"Soft underlength error"},
225	{0x005A0000, 0, 0,
226	"Command to be cancelled not found"},
227	{0x00808000, 0, 0,
228	"Qualified success"},
229	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
230	"FFFE: Soft device bus error recovered by the IOA"},
231	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
232	"4101: Soft device bus fabric error"},
233	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
234	"FFFC: Logical block guard error recovered by the device"},
235	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
236	"FFFC: Logical block reference tag error recovered by the device"},
237	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
238	"4171: Recovered scatter list tag / sequence number error"},
239	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
240	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
241	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
242	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
243	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
244	"FFFD: Recovered logical block reference tag error detected by the IOA"},
245	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
246	"FFFD: Logical block guard error recovered by the IOA"},
247	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
248	"FFF9: Device sector reassign successful"},
249	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
250	"FFF7: Media error recovered by device rewrite procedures"},
251	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
252	"7001: IOA sector reassignment successful"},
253	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
254	"FFF9: Soft media error. Sector reassignment recommended"},
255	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
256	"FFF7: Media error recovered by IOA rewrite procedures"},
257	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
258	"FF3D: Soft PCI bus error recovered by the IOA"},
259	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
260	"FFF6: Device hardware error recovered by the IOA"},
261	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
262	"FFF6: Device hardware error recovered by the device"},
263	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
264	"FF3D: Soft IOA error recovered by the IOA"},
265	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
266	"FFFA: Undefined device response recovered by the IOA"},
267	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
268	"FFF6: Device bus error, message or command phase"},
269	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
270	"FFFE: Task Management Function failed"},
271	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
272	"FFF6: Failure prediction threshold exceeded"},
273	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
274	"8009: Impending cache battery pack failure"},
275	{0x02040400, 0, 0,
276	"34FF: Disk device format in progress"},
277	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
278	"9070: IOA requested reset"},
279	{0x023F0000, 0, 0,
280	"Synchronization required"},
281	{0x024E0000, 0, 0,
282	"No ready, IOA shutdown"},
283	{0x025A0000, 0, 0,
284	"Not ready, IOA has been shutdown"},
285	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
286	"3020: Storage subsystem configuration error"},
287	{0x03110B00, 0, 0,
288	"FFF5: Medium error, data unreadable, recommend reassign"},
289	{0x03110C00, 0, 0,
290	"7000: Medium error, data unreadable, do not reassign"},
291	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
292	"FFF3: Disk media format bad"},
293	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
294	"3002: Addressed device failed to respond to selection"},
295	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
296	"3100: Device bus error"},
297	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
298	"3109: IOA timed out a device command"},
299	{0x04088000, 0, 0,
300	"3120: SCSI bus is not operational"},
301	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
302	"4100: Hard device bus fabric error"},
303	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
304	"310C: Logical block guard error detected by the device"},
305	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
306	"310C: Logical block reference tag error detected by the device"},
307	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
308	"4170: Scatter list tag / sequence number error"},
309	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
310	"8150: Logical block CRC error on IOA to Host transfer"},
311	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
312	"4170: Logical block sequence number error on IOA to Host transfer"},
313	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
314	"310D: Logical block reference tag error detected by the IOA"},
315	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
316	"310D: Logical block guard error detected by the IOA"},
317	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
318	"9000: IOA reserved area data check"},
319	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
320	"9001: IOA reserved area invalid data pattern"},
321	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
322	"9002: IOA reserved area LRC error"},
323	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
324	"Hardware Error, IOA metadata access error"},
325	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
326	"102E: Out of alternate sectors for disk storage"},
327	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
328	"FFF4: Data transfer underlength error"},
329	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
330	"FFF4: Data transfer overlength error"},
331	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
332	"3400: Logical unit failure"},
333	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
334	"FFF4: Device microcode is corrupt"},
335	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
336	"8150: PCI bus error"},
337	{0x04430000, 1, 0,
338	"Unsupported device bus message received"},
339	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
340	"FFF4: Disk device problem"},
341	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
342	"8150: Permanent IOA failure"},
343	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
344	"3010: Disk device returned wrong response to IOA"},
345	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
346	"8151: IOA microcode error"},
347	{0x04448500, 0, 0,
348	"Device bus status error"},
349	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
350	"8157: IOA error requiring IOA reset to recover"},
351	{0x04448700, 0, 0,
352	"ATA device status error"},
353	{0x04490000, 0, 0,
354	"Message reject received from the device"},
355	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
356	"8008: A permanent cache battery pack failure occurred"},
357	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
358	"9090: Disk unit has been modified after the last known status"},
359	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
360	"9081: IOA detected device error"},
361	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
362	"9082: IOA detected device error"},
363	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
364	"3110: Device bus error, message or command phase"},
365	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
366	"3110: SAS Command / Task Management Function failed"},
367	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
368	"9091: Incorrect hardware configuration change has been detected"},
369	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
370	"9073: Invalid multi-adapter configuration"},
371	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
372	"4010: Incorrect connection between cascaded expanders"},
373	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
374	"4020: Connections exceed IOA design limits"},
375	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
376	"4030: Incorrect multipath connection"},
377	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
378	"4110: Unsupported enclosure function"},
379	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
380	"FFF4: Command to logical unit failed"},
381	{0x05240000, 1, 0,
382	"Illegal request, invalid request type or request packet"},
383	{0x05250000, 0, 0,
384	"Illegal request, invalid resource handle"},
385	{0x05258000, 0, 0,
386	"Illegal request, commands not allowed to this device"},
387	{0x05258100, 0, 0,
388	"Illegal request, command not allowed to a secondary adapter"},
389	{0x05258200, 0, 0,
390	"Illegal request, command not allowed to a non-optimized resource"},
391	{0x05260000, 0, 0,
392	"Illegal request, invalid field in parameter list"},
393	{0x05260100, 0, 0,
394	"Illegal request, parameter not supported"},
395	{0x05260200, 0, 0,
396	"Illegal request, parameter value invalid"},
397	{0x052C0000, 0, 0,
398	"Illegal request, command sequence error"},
399	{0x052C8000, 1, 0,
400	"Illegal request, dual adapter support not enabled"},
401	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
402	"9031: Array protection temporarily suspended, protection resuming"},
403	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
404	"9040: Array protection temporarily suspended, protection resuming"},
405	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
406	"3140: Device bus not ready to ready transition"},
407	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
408	"FFFB: SCSI bus was reset"},
409	{0x06290500, 0, 0,
410	"FFFE: SCSI bus transition to single ended"},
411	{0x06290600, 0, 0,
412	"FFFE: SCSI bus transition to LVD"},
413	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
414	"FFFB: SCSI bus was reset by another initiator"},
415	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
416	"3029: A device replacement has occurred"},
417	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
418	"9051: IOA cache data exists for a missing or failed device"},
419	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
420	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
421	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
422	"9025: Disk unit is not supported at its physical location"},
423	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
424	"3020: IOA detected a SCSI bus configuration error"},
425	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
426	"3150: SCSI bus configuration error"},
427	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
428	"9074: Asymmetric advanced function disk configuration"},
429	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
430	"4040: Incomplete multipath connection between IOA and enclosure"},
431	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
432	"4041: Incomplete multipath connection between enclosure and device"},
433	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
434	"9075: Incomplete multipath connection between IOA and remote IOA"},
435	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
436	"9076: Configuration error, missing remote IOA"},
437	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
438	"4050: Enclosure does not support a required multipath function"},
439	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
440	"4070: Logically bad block written on device"},
441	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
442	"9041: Array protection temporarily suspended"},
443	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
444	"9042: Corrupt array parity detected on specified device"},
445	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
446	"9030: Array no longer protected due to missing or failed disk unit"},
447	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
448	"9071: Link operational transition"},
449	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
450	"9072: Link not operational transition"},
451	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
452	"9032: Array exposed but still protected"},
453	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
454	"70DD: Device forced failed by disrupt device command"},
455	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
456	"4061: Multipath redundancy level got better"},
457	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
458	"4060: Multipath redundancy level got worse"},
459	{0x07270000, 0, 0,
460	"Failure due to other device"},
461	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
462	"9008: IOA does not support functions expected by devices"},
463	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
464	"9010: Cache data associated with attached devices cannot be found"},
465	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
466	"9011: Cache data belongs to devices other than those attached"},
467	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
468	"9020: Array missing 2 or more devices with only 1 device present"},
469	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
470	"9021: Array missing 2 or more devices with 2 or more devices present"},
471	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
472	"9022: Exposed array is missing a required device"},
473	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
474	"9023: Array member(s) not at required physical locations"},
475	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
476	"9024: Array not functional due to present hardware configuration"},
477	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
478	"9026: Array not functional due to present hardware configuration"},
479	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
480	"9027: Array is missing a device and parity is out of sync"},
481	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
482	"9028: Maximum number of arrays already exist"},
483	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
484	"9050: Required cache data cannot be located for a disk unit"},
485	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
486	"9052: Cache data exists for a device that has been modified"},
487	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
488	"9054: IOA resources not available due to previous problems"},
489	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
490	"9092: Disk unit requires initialization before use"},
491	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
492	"9029: Incorrect hardware configuration change has been detected"},
493	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
494	"9060: One or more disk pairs are missing from an array"},
495	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
496	"9061: One or more disks are missing from an array"},
497	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
498	"9062: One or more disks are missing from an array"},
499	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
500	"9063: Maximum number of functional arrays has been exceeded"},
501	{0x0B260000, 0, 0,
502	"Aborted command, invalid descriptor"},
503	{0x0B5A0000, 0, 0,
504	"Command terminated by host"}
505};
506
507static const struct ipr_ses_table_entry ipr_ses_table[] = {
508	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
509	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
510	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
511	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
512	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
513	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
514	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
515	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
516	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
517	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
518	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
519	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
520	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
521};
522
523/*
524 *  Function Prototypes
525 */
526static int ipr_reset_alert(struct ipr_cmnd *);
527static void ipr_process_ccn(struct ipr_cmnd *);
528static void ipr_process_error(struct ipr_cmnd *);
529static void ipr_reset_ioa_job(struct ipr_cmnd *);
530static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
531				   enum ipr_shutdown_type);
532
533#ifdef CONFIG_SCSI_IPR_TRACE
534/**
535 * ipr_trc_hook - Add a trace entry to the driver trace
536 * @ipr_cmd:	ipr command struct
537 * @type:		trace type
538 * @add_data:	additional data
539 *
540 * Return value:
541 * 	none
542 **/
543static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
544			 u8 type, u32 add_data)
545{
546	struct ipr_trace_entry *trace_entry;
547	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
548
549	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
550	trace_entry->time = jiffies;
551	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
552	trace_entry->type = type;
553	if (ipr_cmd->ioa_cfg->sis64)
554		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
555	else
556		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
557	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
558	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
559	trace_entry->u.add_data = add_data;
560}
561#else
562#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
563#endif
564
565/**
566 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
567 * @ipr_cmd:	ipr command struct
568 *
569 * Return value:
570 * 	none
571 **/
572static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
573{
574	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
575	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
576	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
577	dma_addr_t dma_addr = ipr_cmd->dma_addr;
578
579	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
580	ioarcb->data_transfer_length = 0;
581	ioarcb->read_data_transfer_length = 0;
582	ioarcb->ioadl_len = 0;
583	ioarcb->read_ioadl_len = 0;
584
585	if (ipr_cmd->ioa_cfg->sis64) {
586		ioarcb->u.sis64_addr_data.data_ioadl_addr =
587			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
588		ioasa64->u.gata.status = 0;
589	} else {
590		ioarcb->write_ioadl_addr =
591			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
592		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
593		ioasa->u.gata.status = 0;
594	}
595
596	ioasa->hdr.ioasc = 0;
597	ioasa->hdr.residual_data_len = 0;
598	ipr_cmd->scsi_cmd = NULL;
599	ipr_cmd->qc = NULL;
600	ipr_cmd->sense_buffer[0] = 0;
601	ipr_cmd->dma_use_sg = 0;
602}
603
604/**
605 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
606 * @ipr_cmd:	ipr command struct
607 *
608 * Return value:
609 * 	none
610 **/
611static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
612{
613	ipr_reinit_ipr_cmnd(ipr_cmd);
614	ipr_cmd->u.scratch = 0;
615	ipr_cmd->sibling = NULL;
616	init_timer(&ipr_cmd->timer);
617}
618
619/**
620 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
621 * @ioa_cfg:	ioa config struct
622 *
623 * Return value:
624 * 	pointer to ipr command struct
625 **/
626static
627struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
628{
629	struct ipr_cmnd *ipr_cmd;
630
631	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
632	list_del(&ipr_cmd->queue);
633	ipr_init_ipr_cmnd(ipr_cmd);
634
635	return ipr_cmd;
636}
637
638/**
639 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
640 * @ioa_cfg:	ioa config struct
641 * @clr_ints:     interrupts to clear
642 *
643 * This function masks all interrupts on the adapter, then clears the
644 * interrupts specified in the mask
645 *
646 * Return value:
647 * 	none
648 **/
649static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
650					  u32 clr_ints)
651{
652	volatile u32 int_reg;
653
654	/* Stop new interrupts */
655	ioa_cfg->allow_interrupts = 0;
656
657	/* Set interrupt mask to stop all new interrupts */
658	if (ioa_cfg->sis64)
659		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
660	else
661		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
662
663	/* Clear any pending interrupts */
664	if (ioa_cfg->sis64)
665		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
666	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
667	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
668}
669
670/**
671 * ipr_save_pcix_cmd_reg - Save PCI-X command register
672 * @ioa_cfg:	ioa config struct
673 *
674 * Return value:
675 * 	0 on success / -EIO on failure
676 **/
677static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
678{
679	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
680
681	if (pcix_cmd_reg == 0)
682		return 0;
683
684	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
685				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
686		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
687		return -EIO;
688	}
689
690	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
691	return 0;
692}
693
694/**
695 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
696 * @ioa_cfg:	ioa config struct
697 *
698 * Return value:
699 * 	0 on success / -EIO on failure
700 **/
701static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
702{
703	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
704
705	if (pcix_cmd_reg) {
706		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
707					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
708			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
709			return -EIO;
710		}
711	}
712
713	return 0;
714}
715
716/**
717 * ipr_sata_eh_done - done function for aborted SATA commands
718 * @ipr_cmd:	ipr command struct
719 *
720 * This function is invoked for ops generated to SATA
721 * devices which are being aborted.
722 *
723 * Return value:
724 * 	none
725 **/
726static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
727{
728	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
729	struct ata_queued_cmd *qc = ipr_cmd->qc;
730	struct ipr_sata_port *sata_port = qc->ap->private_data;
731
732	qc->err_mask |= AC_ERR_OTHER;
733	sata_port->ioasa.status |= ATA_BUSY;
734	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
735	ata_qc_complete(qc);
736}
737
738/**
739 * ipr_scsi_eh_done - mid-layer done function for aborted ops
740 * @ipr_cmd:	ipr command struct
741 *
742 * This function is invoked by the interrupt handler for
743 * ops generated by the SCSI mid-layer which are being aborted.
744 *
745 * Return value:
746 * 	none
747 **/
748static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
749{
750	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
751	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
752
753	scsi_cmd->result |= (DID_ERROR << 16);
754
755	scsi_dma_unmap(ipr_cmd->scsi_cmd);
756	scsi_cmd->scsi_done(scsi_cmd);
757	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
758}
759
760/**
761 * ipr_fail_all_ops - Fails all outstanding ops.
762 * @ioa_cfg:	ioa config struct
763 *
764 * This function fails all outstanding ops.
765 *
766 * Return value:
767 * 	none
768 **/
769static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
770{
771	struct ipr_cmnd *ipr_cmd, *temp;
772
773	ENTER;
774	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
775		list_del(&ipr_cmd->queue);
776
777		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
778		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
779
780		if (ipr_cmd->scsi_cmd)
781			ipr_cmd->done = ipr_scsi_eh_done;
782		else if (ipr_cmd->qc)
783			ipr_cmd->done = ipr_sata_eh_done;
784
785		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
786		del_timer(&ipr_cmd->timer);
787		ipr_cmd->done(ipr_cmd);
788	}
789
790	LEAVE;
791}
792
793/**
794 * ipr_send_command -  Send driver initiated requests.
795 * @ipr_cmd:		ipr command struct
796 *
797 * This function sends a command to the adapter using the correct write call.
798 * In the case of sis64, calculate the ioarcb size required. Then or in the
799 * appropriate bits.
800 *
801 * Return value:
802 * 	none
803 **/
804static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
805{
806	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
807	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
808
809	if (ioa_cfg->sis64) {
810		/* The default size is 256 bytes */
811		send_dma_addr |= 0x1;
812
813		/* If the number of ioadls * size of ioadl > 128 bytes,
814		   then use a 512 byte ioarcb */
815		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
816			send_dma_addr |= 0x4;
817		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
818	} else
819		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
820}
821
822/**
823 * ipr_do_req -  Send driver initiated requests.
824 * @ipr_cmd:		ipr command struct
825 * @done:			done function
826 * @timeout_func:	timeout function
827 * @timeout:		timeout value
828 *
829 * This function sends the specified command to the adapter with the
830 * timeout given. The done function is invoked on command completion.
831 *
832 * Return value:
833 * 	none
834 **/
835static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
836		       void (*done) (struct ipr_cmnd *),
837		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
838{
839	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
840
841	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
842
843	ipr_cmd->done = done;
844
845	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
846	ipr_cmd->timer.expires = jiffies + timeout;
847	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
848
849	add_timer(&ipr_cmd->timer);
850
851	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
852
853	mb();
854
855	ipr_send_command(ipr_cmd);
856}
857
858/**
859 * ipr_internal_cmd_done - Op done function for an internally generated op.
860 * @ipr_cmd:	ipr command struct
861 *
862 * This function is the op done function for an internally generated,
863 * blocking op. It simply wakes the sleeping thread.
864 *
865 * Return value:
866 * 	none
867 **/
868static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
869{
870	if (ipr_cmd->sibling)
871		ipr_cmd->sibling = NULL;
872	else
873		complete(&ipr_cmd->completion);
874}
875
876/**
877 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
878 * @ipr_cmd:	ipr command struct
879 * @dma_addr:	dma address
880 * @len:	transfer length
881 * @flags:	ioadl flag value
882 *
883 * This function initializes an ioadl in the case where there is only a single
884 * descriptor.
885 *
886 * Return value:
887 * 	nothing
888 **/
889static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
890			   u32 len, int flags)
891{
892	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
893	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
894
895	ipr_cmd->dma_use_sg = 1;
896
897	if (ipr_cmd->ioa_cfg->sis64) {
898		ioadl64->flags = cpu_to_be32(flags);
899		ioadl64->data_len = cpu_to_be32(len);
900		ioadl64->address = cpu_to_be64(dma_addr);
901
902		ipr_cmd->ioarcb.ioadl_len =
903		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
904		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
905	} else {
906		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
907		ioadl->address = cpu_to_be32(dma_addr);
908
909		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
910			ipr_cmd->ioarcb.read_ioadl_len =
911				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
912			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
913		} else {
914			ipr_cmd->ioarcb.ioadl_len =
915			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
916			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
917		}
918	}
919}
920
921/**
922 * ipr_send_blocking_cmd - Send command and sleep on its completion.
923 * @ipr_cmd:	ipr command struct
924 * @timeout_func:	function to invoke if command times out
925 * @timeout:	timeout
926 *
927 * Return value:
928 * 	none
929 **/
930static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
931				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
932				  u32 timeout)
933{
934	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
935
936	init_completion(&ipr_cmd->completion);
937	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
938
939	spin_unlock_irq(ioa_cfg->host->host_lock);
940	wait_for_completion(&ipr_cmd->completion);
941	spin_lock_irq(ioa_cfg->host->host_lock);
942}
943
944/**
945 * ipr_send_hcam - Send an HCAM to the adapter.
946 * @ioa_cfg:	ioa config struct
947 * @type:		HCAM type
948 * @hostrcb:	hostrcb struct
949 *
950 * This function will send a Host Controlled Async command to the adapter.
951 * If HCAMs are currently not allowed to be issued to the adapter, it will
952 * place the hostrcb on the free queue.
953 *
954 * Return value:
955 * 	none
956 **/
957static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
958			  struct ipr_hostrcb *hostrcb)
959{
960	struct ipr_cmnd *ipr_cmd;
961	struct ipr_ioarcb *ioarcb;
962
963	if (ioa_cfg->allow_cmds) {
964		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
965		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
966		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
967
968		ipr_cmd->u.hostrcb = hostrcb;
969		ioarcb = &ipr_cmd->ioarcb;
970
971		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
972		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
973		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
974		ioarcb->cmd_pkt.cdb[1] = type;
975		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
976		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
977
978		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
979			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
980
981		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
982			ipr_cmd->done = ipr_process_ccn;
983		else
984			ipr_cmd->done = ipr_process_error;
985
986		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
987
988		mb();
989
990		ipr_send_command(ipr_cmd);
991	} else {
992		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
993	}
994}
995
996/**
997 * ipr_update_ata_class - Update the ata class in the resource entry
998 * @res:	resource entry struct
999 * @proto:	cfgte device bus protocol value
1000 *
1001 * Return value:
1002 * 	none
1003 **/
1004static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1005{
1006	switch(proto) {
1007	case IPR_PROTO_SATA:
1008	case IPR_PROTO_SAS_STP:
1009		res->ata_class = ATA_DEV_ATA;
1010		break;
1011	case IPR_PROTO_SATA_ATAPI:
1012	case IPR_PROTO_SAS_STP_ATAPI:
1013		res->ata_class = ATA_DEV_ATAPI;
1014		break;
1015	default:
1016		res->ata_class = ATA_DEV_UNKNOWN;
1017		break;
1018	};
1019}
1020
1021/**
1022 * ipr_init_res_entry - Initialize a resource entry struct.
1023 * @res:	resource entry struct
1024 * @cfgtew:	config table entry wrapper struct
1025 *
1026 * Return value:
1027 * 	none
1028 **/
1029static void ipr_init_res_entry(struct ipr_resource_entry *res,
1030			       struct ipr_config_table_entry_wrapper *cfgtew)
1031{
1032	int found = 0;
1033	unsigned int proto;
1034	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1035	struct ipr_resource_entry *gscsi_res = NULL;
1036
1037	res->needs_sync_complete = 0;
1038	res->in_erp = 0;
1039	res->add_to_ml = 0;
1040	res->del_from_ml = 0;
1041	res->resetting_device = 0;
1042	res->sdev = NULL;
1043	res->sata_port = NULL;
1044
1045	if (ioa_cfg->sis64) {
1046		proto = cfgtew->u.cfgte64->proto;
1047		res->res_flags = cfgtew->u.cfgte64->res_flags;
1048		res->qmodel = IPR_QUEUEING_MODEL64(res);
1049		res->type = cfgtew->u.cfgte64->res_type;
1050
1051		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1052			sizeof(res->res_path));
1053
1054		res->bus = 0;
1055		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1056			sizeof(res->dev_lun.scsi_lun));
1057		res->lun = scsilun_to_int(&res->dev_lun);
1058
1059		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1060			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1061				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1062					found = 1;
1063					res->target = gscsi_res->target;
1064					break;
1065				}
1066			}
1067			if (!found) {
1068				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1069								  ioa_cfg->max_devs_supported);
1070				set_bit(res->target, ioa_cfg->target_ids);
1071			}
1072		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1073			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1074			res->target = 0;
1075		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1076			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1077			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1078							  ioa_cfg->max_devs_supported);
1079			set_bit(res->target, ioa_cfg->array_ids);
1080		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1081			res->bus = IPR_VSET_VIRTUAL_BUS;
1082			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1083							  ioa_cfg->max_devs_supported);
1084			set_bit(res->target, ioa_cfg->vset_ids);
1085		} else {
1086			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1087							  ioa_cfg->max_devs_supported);
1088			set_bit(res->target, ioa_cfg->target_ids);
1089		}
1090	} else {
1091		proto = cfgtew->u.cfgte->proto;
1092		res->qmodel = IPR_QUEUEING_MODEL(res);
1093		res->flags = cfgtew->u.cfgte->flags;
1094		if (res->flags & IPR_IS_IOA_RESOURCE)
1095			res->type = IPR_RES_TYPE_IOAFP;
1096		else
1097			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1098
1099		res->bus = cfgtew->u.cfgte->res_addr.bus;
1100		res->target = cfgtew->u.cfgte->res_addr.target;
1101		res->lun = cfgtew->u.cfgte->res_addr.lun;
1102		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1103	}
1104
1105	ipr_update_ata_class(res, proto);
1106}
1107
1108/**
1109 * ipr_is_same_device - Determine if two devices are the same.
1110 * @res:	resource entry struct
1111 * @cfgtew:	config table entry wrapper struct
1112 *
1113 * Return value:
1114 * 	1 if the devices are the same / 0 otherwise
1115 **/
1116static int ipr_is_same_device(struct ipr_resource_entry *res,
1117			      struct ipr_config_table_entry_wrapper *cfgtew)
1118{
1119	if (res->ioa_cfg->sis64) {
1120		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1121					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1122			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1123					sizeof(cfgtew->u.cfgte64->lun))) {
1124			return 1;
1125		}
1126	} else {
1127		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1128		    res->target == cfgtew->u.cfgte->res_addr.target &&
1129		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1130			return 1;
1131	}
1132
1133	return 0;
1134}
1135
1136/**
1137 * ipr_format_res_path - Format the resource path for printing.
1138 * @res_path:	resource path
1139 * @buf:	buffer
1140 *
1141 * Return value:
1142 * 	pointer to buffer
1143 **/
1144static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1145{
1146	int i;
1147	char *p = buffer;
1148
1149	*p = '\0';
1150	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1151	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1152		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1153
1154	return buffer;
1155}
1156
1157/**
1158 * ipr_update_res_entry - Update the resource entry.
1159 * @res:	resource entry struct
1160 * @cfgtew:	config table entry wrapper struct
1161 *
1162 * Return value:
1163 *      none
1164 **/
1165static void ipr_update_res_entry(struct ipr_resource_entry *res,
1166				 struct ipr_config_table_entry_wrapper *cfgtew)
1167{
1168	char buffer[IPR_MAX_RES_PATH_LENGTH];
1169	unsigned int proto;
1170	int new_path = 0;
1171
1172	if (res->ioa_cfg->sis64) {
1173		res->flags = cfgtew->u.cfgte64->flags;
1174		res->res_flags = cfgtew->u.cfgte64->res_flags;
1175		res->type = cfgtew->u.cfgte64->res_type;
1176
1177		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1178			sizeof(struct ipr_std_inq_data));
1179
1180		res->qmodel = IPR_QUEUEING_MODEL64(res);
1181		proto = cfgtew->u.cfgte64->proto;
1182		res->res_handle = cfgtew->u.cfgte64->res_handle;
1183		res->dev_id = cfgtew->u.cfgte64->dev_id;
1184
1185		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1186			sizeof(res->dev_lun.scsi_lun));
1187
1188		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1189					sizeof(res->res_path))) {
1190			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1191				sizeof(res->res_path));
1192			new_path = 1;
1193		}
1194
1195		if (res->sdev && new_path)
1196			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1197				    ipr_format_res_path(res->res_path, buffer,
1198							sizeof(buffer)));
1199	} else {
1200		res->flags = cfgtew->u.cfgte->flags;
1201		if (res->flags & IPR_IS_IOA_RESOURCE)
1202			res->type = IPR_RES_TYPE_IOAFP;
1203		else
1204			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1205
1206		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1207			sizeof(struct ipr_std_inq_data));
1208
1209		res->qmodel = IPR_QUEUEING_MODEL(res);
1210		proto = cfgtew->u.cfgte->proto;
1211		res->res_handle = cfgtew->u.cfgte->res_handle;
1212	}
1213
1214	ipr_update_ata_class(res, proto);
1215}
1216
1217/**
1218 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1219 * 			  for the resource.
1220 * @res:	resource entry struct
1221 * @cfgtew:	config table entry wrapper struct
1222 *
1223 * Return value:
1224 *      none
1225 **/
1226static void ipr_clear_res_target(struct ipr_resource_entry *res)
1227{
1228	struct ipr_resource_entry *gscsi_res = NULL;
1229	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1230
1231	if (!ioa_cfg->sis64)
1232		return;
1233
1234	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1235		clear_bit(res->target, ioa_cfg->array_ids);
1236	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1237		clear_bit(res->target, ioa_cfg->vset_ids);
1238	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1239		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1240			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1241				return;
1242		clear_bit(res->target, ioa_cfg->target_ids);
1243
1244	} else if (res->bus == 0)
1245		clear_bit(res->target, ioa_cfg->target_ids);
1246}
1247
1248/**
1249 * ipr_handle_config_change - Handle a config change from the adapter
1250 * @ioa_cfg:	ioa config struct
1251 * @hostrcb:	hostrcb
1252 *
1253 * Return value:
1254 * 	none
1255 **/
1256static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1257				     struct ipr_hostrcb *hostrcb)
1258{
1259	struct ipr_resource_entry *res = NULL;
1260	struct ipr_config_table_entry_wrapper cfgtew;
1261	__be32 cc_res_handle;
1262
1263	u32 is_ndn = 1;
1264
1265	if (ioa_cfg->sis64) {
1266		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1267		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1268	} else {
1269		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1270		cc_res_handle = cfgtew.u.cfgte->res_handle;
1271	}
1272
1273	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1274		if (res->res_handle == cc_res_handle) {
1275			is_ndn = 0;
1276			break;
1277		}
1278	}
1279
1280	if (is_ndn) {
1281		if (list_empty(&ioa_cfg->free_res_q)) {
1282			ipr_send_hcam(ioa_cfg,
1283				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1284				      hostrcb);
1285			return;
1286		}
1287
1288		res = list_entry(ioa_cfg->free_res_q.next,
1289				 struct ipr_resource_entry, queue);
1290
1291		list_del(&res->queue);
1292		ipr_init_res_entry(res, &cfgtew);
1293		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1294	}
1295
1296	ipr_update_res_entry(res, &cfgtew);
1297
1298	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1299		if (res->sdev) {
1300			res->del_from_ml = 1;
1301			res->res_handle = IPR_INVALID_RES_HANDLE;
1302			if (ioa_cfg->allow_ml_add_del)
1303				schedule_work(&ioa_cfg->work_q);
1304		} else {
1305			ipr_clear_res_target(res);
1306			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1307		}
1308	} else if (!res->sdev || res->del_from_ml) {
1309		res->add_to_ml = 1;
1310		if (ioa_cfg->allow_ml_add_del)
1311			schedule_work(&ioa_cfg->work_q);
1312	}
1313
1314	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1315}
1316
1317/**
1318 * ipr_process_ccn - Op done function for a CCN.
1319 * @ipr_cmd:	ipr command struct
1320 *
1321 * This function is the op done function for a configuration
1322 * change notification host controlled async from the adapter.
1323 *
1324 * Return value:
1325 * 	none
1326 **/
1327static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1328{
1329	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1330	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1331	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1332
1333	list_del(&hostrcb->queue);
1334	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1335
1336	if (ioasc) {
1337		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1338			dev_err(&ioa_cfg->pdev->dev,
1339				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1340
1341		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1342	} else {
1343		ipr_handle_config_change(ioa_cfg, hostrcb);
1344	}
1345}
1346
1347/**
1348 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1349 * @i:		index into buffer
1350 * @buf:		string to modify
1351 *
1352 * This function will strip all trailing whitespace, pad the end
1353 * of the string with a single space, and NULL terminate the string.
1354 *
1355 * Return value:
1356 * 	new length of string
1357 **/
1358static int strip_and_pad_whitespace(int i, char *buf)
1359{
1360	while (i && buf[i] == ' ')
1361		i--;
1362	buf[i+1] = ' ';
1363	buf[i+2] = '\0';
1364	return i + 2;
1365}
1366
1367/**
1368 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1369 * @prefix:		string to print at start of printk
1370 * @hostrcb:	hostrcb pointer
1371 * @vpd:		vendor/product id/sn struct
1372 *
1373 * Return value:
1374 * 	none
1375 **/
1376static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1377				struct ipr_vpd *vpd)
1378{
1379	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1380	int i = 0;
1381
1382	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1383	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1384
1385	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1386	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1387
1388	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1389	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1390
1391	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1392}
1393
1394/**
1395 * ipr_log_vpd - Log the passed VPD to the error log.
1396 * @vpd:		vendor/product id/sn struct
1397 *
1398 * Return value:
1399 * 	none
1400 **/
1401static void ipr_log_vpd(struct ipr_vpd *vpd)
1402{
1403	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1404		    + IPR_SERIAL_NUM_LEN];
1405
1406	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1407	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1408	       IPR_PROD_ID_LEN);
1409	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1410	ipr_err("Vendor/Product ID: %s\n", buffer);
1411
1412	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1413	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1414	ipr_err("    Serial Number: %s\n", buffer);
1415}
1416
1417/**
1418 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1419 * @prefix:		string to print at start of printk
1420 * @hostrcb:	hostrcb pointer
1421 * @vpd:		vendor/product id/sn/wwn struct
1422 *
1423 * Return value:
1424 * 	none
1425 **/
1426static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1427				    struct ipr_ext_vpd *vpd)
1428{
1429	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1430	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1431		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1432}
1433
1434/**
1435 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1436 * @vpd:		vendor/product id/sn/wwn struct
1437 *
1438 * Return value:
1439 * 	none
1440 **/
1441static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1442{
1443	ipr_log_vpd(&vpd->vpd);
1444	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1445		be32_to_cpu(vpd->wwid[1]));
1446}
1447
1448/**
1449 * ipr_log_enhanced_cache_error - Log a cache error.
1450 * @ioa_cfg:	ioa config struct
1451 * @hostrcb:	hostrcb struct
1452 *
1453 * Return value:
1454 * 	none
1455 **/
1456static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1457					 struct ipr_hostrcb *hostrcb)
1458{
1459	struct ipr_hostrcb_type_12_error *error;
1460
1461	if (ioa_cfg->sis64)
1462		error = &hostrcb->hcam.u.error64.u.type_12_error;
1463	else
1464		error = &hostrcb->hcam.u.error.u.type_12_error;
1465
1466	ipr_err("-----Current Configuration-----\n");
1467	ipr_err("Cache Directory Card Information:\n");
1468	ipr_log_ext_vpd(&error->ioa_vpd);
1469	ipr_err("Adapter Card Information:\n");
1470	ipr_log_ext_vpd(&error->cfc_vpd);
1471
1472	ipr_err("-----Expected Configuration-----\n");
1473	ipr_err("Cache Directory Card Information:\n");
1474	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1475	ipr_err("Adapter Card Information:\n");
1476	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1477
1478	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1479		     be32_to_cpu(error->ioa_data[0]),
1480		     be32_to_cpu(error->ioa_data[1]),
1481		     be32_to_cpu(error->ioa_data[2]));
1482}
1483
1484/**
1485 * ipr_log_cache_error - Log a cache error.
1486 * @ioa_cfg:	ioa config struct
1487 * @hostrcb:	hostrcb struct
1488 *
1489 * Return value:
1490 * 	none
1491 **/
1492static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1493				struct ipr_hostrcb *hostrcb)
1494{
1495	struct ipr_hostrcb_type_02_error *error =
1496		&hostrcb->hcam.u.error.u.type_02_error;
1497
1498	ipr_err("-----Current Configuration-----\n");
1499	ipr_err("Cache Directory Card Information:\n");
1500	ipr_log_vpd(&error->ioa_vpd);
1501	ipr_err("Adapter Card Information:\n");
1502	ipr_log_vpd(&error->cfc_vpd);
1503
1504	ipr_err("-----Expected Configuration-----\n");
1505	ipr_err("Cache Directory Card Information:\n");
1506	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1507	ipr_err("Adapter Card Information:\n");
1508	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1509
1510	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1511		     be32_to_cpu(error->ioa_data[0]),
1512		     be32_to_cpu(error->ioa_data[1]),
1513		     be32_to_cpu(error->ioa_data[2]));
1514}
1515
1516/**
1517 * ipr_log_enhanced_config_error - Log a configuration error.
1518 * @ioa_cfg:	ioa config struct
1519 * @hostrcb:	hostrcb struct
1520 *
1521 * Return value:
1522 * 	none
1523 **/
1524static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1525					  struct ipr_hostrcb *hostrcb)
1526{
1527	int errors_logged, i;
1528	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1529	struct ipr_hostrcb_type_13_error *error;
1530
1531	error = &hostrcb->hcam.u.error.u.type_13_error;
1532	errors_logged = be32_to_cpu(error->errors_logged);
1533
1534	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1535		be32_to_cpu(error->errors_detected), errors_logged);
1536
1537	dev_entry = error->dev;
1538
1539	for (i = 0; i < errors_logged; i++, dev_entry++) {
1540		ipr_err_separator;
1541
1542		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1543		ipr_log_ext_vpd(&dev_entry->vpd);
1544
1545		ipr_err("-----New Device Information-----\n");
1546		ipr_log_ext_vpd(&dev_entry->new_vpd);
1547
1548		ipr_err("Cache Directory Card Information:\n");
1549		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1550
1551		ipr_err("Adapter Card Information:\n");
1552		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1553	}
1554}
1555
1556/**
1557 * ipr_log_sis64_config_error - Log a device error.
1558 * @ioa_cfg:	ioa config struct
1559 * @hostrcb:	hostrcb struct
1560 *
1561 * Return value:
1562 * 	none
1563 **/
1564static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1565				       struct ipr_hostrcb *hostrcb)
1566{
1567	int errors_logged, i;
1568	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1569	struct ipr_hostrcb_type_23_error *error;
1570	char buffer[IPR_MAX_RES_PATH_LENGTH];
1571
1572	error = &hostrcb->hcam.u.error64.u.type_23_error;
1573	errors_logged = be32_to_cpu(error->errors_logged);
1574
1575	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1576		be32_to_cpu(error->errors_detected), errors_logged);
1577
1578	dev_entry = error->dev;
1579
1580	for (i = 0; i < errors_logged; i++, dev_entry++) {
1581		ipr_err_separator;
1582
1583		ipr_err("Device %d : %s", i + 1,
1584			 ipr_format_res_path(dev_entry->res_path, buffer,
1585					     sizeof(buffer)));
1586		ipr_log_ext_vpd(&dev_entry->vpd);
1587
1588		ipr_err("-----New Device Information-----\n");
1589		ipr_log_ext_vpd(&dev_entry->new_vpd);
1590
1591		ipr_err("Cache Directory Card Information:\n");
1592		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1593
1594		ipr_err("Adapter Card Information:\n");
1595		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1596	}
1597}
1598
1599/**
1600 * ipr_log_config_error - Log a configuration error.
1601 * @ioa_cfg:	ioa config struct
1602 * @hostrcb:	hostrcb struct
1603 *
1604 * Return value:
1605 * 	none
1606 **/
1607static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1608				 struct ipr_hostrcb *hostrcb)
1609{
1610	int errors_logged, i;
1611	struct ipr_hostrcb_device_data_entry *dev_entry;
1612	struct ipr_hostrcb_type_03_error *error;
1613
1614	error = &hostrcb->hcam.u.error.u.type_03_error;
1615	errors_logged = be32_to_cpu(error->errors_logged);
1616
1617	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1618		be32_to_cpu(error->errors_detected), errors_logged);
1619
1620	dev_entry = error->dev;
1621
1622	for (i = 0; i < errors_logged; i++, dev_entry++) {
1623		ipr_err_separator;
1624
1625		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1626		ipr_log_vpd(&dev_entry->vpd);
1627
1628		ipr_err("-----New Device Information-----\n");
1629		ipr_log_vpd(&dev_entry->new_vpd);
1630
1631		ipr_err("Cache Directory Card Information:\n");
1632		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1633
1634		ipr_err("Adapter Card Information:\n");
1635		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1636
1637		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1638			be32_to_cpu(dev_entry->ioa_data[0]),
1639			be32_to_cpu(dev_entry->ioa_data[1]),
1640			be32_to_cpu(dev_entry->ioa_data[2]),
1641			be32_to_cpu(dev_entry->ioa_data[3]),
1642			be32_to_cpu(dev_entry->ioa_data[4]));
1643	}
1644}
1645
1646/**
1647 * ipr_log_enhanced_array_error - Log an array configuration error.
1648 * @ioa_cfg:	ioa config struct
1649 * @hostrcb:	hostrcb struct
1650 *
1651 * Return value:
1652 * 	none
1653 **/
1654static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1655					 struct ipr_hostrcb *hostrcb)
1656{
1657	int i, num_entries;
1658	struct ipr_hostrcb_type_14_error *error;
1659	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1660	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1661
1662	error = &hostrcb->hcam.u.error.u.type_14_error;
1663
1664	ipr_err_separator;
1665
1666	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1667		error->protection_level,
1668		ioa_cfg->host->host_no,
1669		error->last_func_vset_res_addr.bus,
1670		error->last_func_vset_res_addr.target,
1671		error->last_func_vset_res_addr.lun);
1672
1673	ipr_err_separator;
1674
1675	array_entry = error->array_member;
1676	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1677			    ARRAY_SIZE(error->array_member));
1678
1679	for (i = 0; i < num_entries; i++, array_entry++) {
1680		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1681			continue;
1682
1683		if (be32_to_cpu(error->exposed_mode_adn) == i)
1684			ipr_err("Exposed Array Member %d:\n", i);
1685		else
1686			ipr_err("Array Member %d:\n", i);
1687
1688		ipr_log_ext_vpd(&array_entry->vpd);
1689		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1690		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1691				 "Expected Location");
1692
1693		ipr_err_separator;
1694	}
1695}
1696
1697/**
1698 * ipr_log_array_error - Log an array configuration error.
1699 * @ioa_cfg:	ioa config struct
1700 * @hostrcb:	hostrcb struct
1701 *
1702 * Return value:
1703 * 	none
1704 **/
1705static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1706				struct ipr_hostrcb *hostrcb)
1707{
1708	int i;
1709	struct ipr_hostrcb_type_04_error *error;
1710	struct ipr_hostrcb_array_data_entry *array_entry;
1711	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1712
1713	error = &hostrcb->hcam.u.error.u.type_04_error;
1714
1715	ipr_err_separator;
1716
1717	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1718		error->protection_level,
1719		ioa_cfg->host->host_no,
1720		error->last_func_vset_res_addr.bus,
1721		error->last_func_vset_res_addr.target,
1722		error->last_func_vset_res_addr.lun);
1723
1724	ipr_err_separator;
1725
1726	array_entry = error->array_member;
1727
1728	for (i = 0; i < 18; i++) {
1729		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1730			continue;
1731
1732		if (be32_to_cpu(error->exposed_mode_adn) == i)
1733			ipr_err("Exposed Array Member %d:\n", i);
1734		else
1735			ipr_err("Array Member %d:\n", i);
1736
1737		ipr_log_vpd(&array_entry->vpd);
1738
1739		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1740		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1741				 "Expected Location");
1742
1743		ipr_err_separator;
1744
1745		if (i == 9)
1746			array_entry = error->array_member2;
1747		else
1748			array_entry++;
1749	}
1750}
1751
1752/**
1753 * ipr_log_hex_data - Log additional hex IOA error data.
1754 * @ioa_cfg:	ioa config struct
1755 * @data:		IOA error data
1756 * @len:		data length
1757 *
1758 * Return value:
1759 * 	none
1760 **/
1761static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1762{
1763	int i;
1764
1765	if (len == 0)
1766		return;
1767
1768	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1769		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1770
1771	for (i = 0; i < len / 4; i += 4) {
1772		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1773			be32_to_cpu(data[i]),
1774			be32_to_cpu(data[i+1]),
1775			be32_to_cpu(data[i+2]),
1776			be32_to_cpu(data[i+3]));
1777	}
1778}
1779
1780/**
1781 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1782 * @ioa_cfg:	ioa config struct
1783 * @hostrcb:	hostrcb struct
1784 *
1785 * Return value:
1786 * 	none
1787 **/
1788static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1789					    struct ipr_hostrcb *hostrcb)
1790{
1791	struct ipr_hostrcb_type_17_error *error;
1792
1793	if (ioa_cfg->sis64)
1794		error = &hostrcb->hcam.u.error64.u.type_17_error;
1795	else
1796		error = &hostrcb->hcam.u.error.u.type_17_error;
1797
1798	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1799	strim(error->failure_reason);
1800
1801	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1802		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1803	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1804	ipr_log_hex_data(ioa_cfg, error->data,
1805			 be32_to_cpu(hostrcb->hcam.length) -
1806			 (offsetof(struct ipr_hostrcb_error, u) +
1807			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1808}
1809
1810/**
1811 * ipr_log_dual_ioa_error - Log a dual adapter error.
1812 * @ioa_cfg:	ioa config struct
1813 * @hostrcb:	hostrcb struct
1814 *
1815 * Return value:
1816 * 	none
1817 **/
1818static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1819				   struct ipr_hostrcb *hostrcb)
1820{
1821	struct ipr_hostrcb_type_07_error *error;
1822
1823	error = &hostrcb->hcam.u.error.u.type_07_error;
1824	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1825	strim(error->failure_reason);
1826
1827	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1828		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1829	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1830	ipr_log_hex_data(ioa_cfg, error->data,
1831			 be32_to_cpu(hostrcb->hcam.length) -
1832			 (offsetof(struct ipr_hostrcb_error, u) +
1833			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1834}
1835
1836static const struct {
1837	u8 active;
1838	char *desc;
1839} path_active_desc[] = {
1840	{ IPR_PATH_NO_INFO, "Path" },
1841	{ IPR_PATH_ACTIVE, "Active path" },
1842	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1843};
1844
1845static const struct {
1846	u8 state;
1847	char *desc;
1848} path_state_desc[] = {
1849	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1850	{ IPR_PATH_HEALTHY, "is healthy" },
1851	{ IPR_PATH_DEGRADED, "is degraded" },
1852	{ IPR_PATH_FAILED, "is failed" }
1853};
1854
1855/**
1856 * ipr_log_fabric_path - Log a fabric path error
1857 * @hostrcb:	hostrcb struct
1858 * @fabric:		fabric descriptor
1859 *
1860 * Return value:
1861 * 	none
1862 **/
1863static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1864				struct ipr_hostrcb_fabric_desc *fabric)
1865{
1866	int i, j;
1867	u8 path_state = fabric->path_state;
1868	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1869	u8 state = path_state & IPR_PATH_STATE_MASK;
1870
1871	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1872		if (path_active_desc[i].active != active)
1873			continue;
1874
1875		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1876			if (path_state_desc[j].state != state)
1877				continue;
1878
1879			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1880				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1881					     path_active_desc[i].desc, path_state_desc[j].desc,
1882					     fabric->ioa_port);
1883			} else if (fabric->cascaded_expander == 0xff) {
1884				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1885					     path_active_desc[i].desc, path_state_desc[j].desc,
1886					     fabric->ioa_port, fabric->phy);
1887			} else if (fabric->phy == 0xff) {
1888				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1889					     path_active_desc[i].desc, path_state_desc[j].desc,
1890					     fabric->ioa_port, fabric->cascaded_expander);
1891			} else {
1892				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1893					     path_active_desc[i].desc, path_state_desc[j].desc,
1894					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1895			}
1896			return;
1897		}
1898	}
1899
1900	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1901		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1902}
1903
1904/**
1905 * ipr_log64_fabric_path - Log a fabric path error
1906 * @hostrcb:	hostrcb struct
1907 * @fabric:		fabric descriptor
1908 *
1909 * Return value:
1910 * 	none
1911 **/
1912static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1913				  struct ipr_hostrcb64_fabric_desc *fabric)
1914{
1915	int i, j;
1916	u8 path_state = fabric->path_state;
1917	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1918	u8 state = path_state & IPR_PATH_STATE_MASK;
1919	char buffer[IPR_MAX_RES_PATH_LENGTH];
1920
1921	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1922		if (path_active_desc[i].active != active)
1923			continue;
1924
1925		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1926			if (path_state_desc[j].state != state)
1927				continue;
1928
1929			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1930				     path_active_desc[i].desc, path_state_desc[j].desc,
1931				     ipr_format_res_path(fabric->res_path, buffer,
1932							 sizeof(buffer)));
1933			return;
1934		}
1935	}
1936
1937	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1938		ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1939}
1940
1941static const struct {
1942	u8 type;
1943	char *desc;
1944} path_type_desc[] = {
1945	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1946	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1947	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1948	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1949};
1950
1951static const struct {
1952	u8 status;
1953	char *desc;
1954} path_status_desc[] = {
1955	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1956	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1957	{ IPR_PATH_CFG_FAILED, "Failed" },
1958	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1959	{ IPR_PATH_NOT_DETECTED, "Missing" },
1960	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1961};
1962
1963static const char *link_rate[] = {
1964	"unknown",
1965	"disabled",
1966	"phy reset problem",
1967	"spinup hold",
1968	"port selector",
1969	"unknown",
1970	"unknown",
1971	"unknown",
1972	"1.5Gbps",
1973	"3.0Gbps",
1974	"unknown",
1975	"unknown",
1976	"unknown",
1977	"unknown",
1978	"unknown",
1979	"unknown"
1980};
1981
1982/**
1983 * ipr_log_path_elem - Log a fabric path element.
1984 * @hostrcb:	hostrcb struct
1985 * @cfg:		fabric path element struct
1986 *
1987 * Return value:
1988 * 	none
1989 **/
1990static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1991			      struct ipr_hostrcb_config_element *cfg)
1992{
1993	int i, j;
1994	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1995	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1996
1997	if (type == IPR_PATH_CFG_NOT_EXIST)
1998		return;
1999
2000	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2001		if (path_type_desc[i].type != type)
2002			continue;
2003
2004		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2005			if (path_status_desc[j].status != status)
2006				continue;
2007
2008			if (type == IPR_PATH_CFG_IOA_PORT) {
2009				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2010					     path_status_desc[j].desc, path_type_desc[i].desc,
2011					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2012					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2013			} else {
2014				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2015					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2016						     path_status_desc[j].desc, path_type_desc[i].desc,
2017						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2018						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2019				} else if (cfg->cascaded_expander == 0xff) {
2020					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2021						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2022						     path_type_desc[i].desc, cfg->phy,
2023						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2024						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2025				} else if (cfg->phy == 0xff) {
2026					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2027						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2028						     path_type_desc[i].desc, cfg->cascaded_expander,
2029						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2030						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2031				} else {
2032					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2033						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2034						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2035						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2036						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2037				}
2038			}
2039			return;
2040		}
2041	}
2042
2043	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2044		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2045		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2046		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2047}
2048
2049/**
2050 * ipr_log64_path_elem - Log a fabric path element.
2051 * @hostrcb:	hostrcb struct
2052 * @cfg:		fabric path element struct
2053 *
2054 * Return value:
2055 * 	none
2056 **/
2057static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2058				struct ipr_hostrcb64_config_element *cfg)
2059{
2060	int i, j;
2061	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2062	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2063	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2064	char buffer[IPR_MAX_RES_PATH_LENGTH];
2065
2066	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2067		return;
2068
2069	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2070		if (path_type_desc[i].type != type)
2071			continue;
2072
2073		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2074			if (path_status_desc[j].status != status)
2075				continue;
2076
2077			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2078				     path_status_desc[j].desc, path_type_desc[i].desc,
2079				     ipr_format_res_path(cfg->res_path, buffer,
2080							 sizeof(buffer)),
2081				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2082				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2083			return;
2084		}
2085	}
2086	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2087		     "WWN=%08X%08X\n", cfg->type_status,
2088		     ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2089		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2090		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2091}
2092
2093/**
2094 * ipr_log_fabric_error - Log a fabric error.
2095 * @ioa_cfg:	ioa config struct
2096 * @hostrcb:	hostrcb struct
2097 *
2098 * Return value:
2099 * 	none
2100 **/
2101static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2102				 struct ipr_hostrcb *hostrcb)
2103{
2104	struct ipr_hostrcb_type_20_error *error;
2105	struct ipr_hostrcb_fabric_desc *fabric;
2106	struct ipr_hostrcb_config_element *cfg;
2107	int i, add_len;
2108
2109	error = &hostrcb->hcam.u.error.u.type_20_error;
2110	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2111	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2112
2113	add_len = be32_to_cpu(hostrcb->hcam.length) -
2114		(offsetof(struct ipr_hostrcb_error, u) +
2115		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2116
2117	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2118		ipr_log_fabric_path(hostrcb, fabric);
2119		for_each_fabric_cfg(fabric, cfg)
2120			ipr_log_path_elem(hostrcb, cfg);
2121
2122		add_len -= be16_to_cpu(fabric->length);
2123		fabric = (struct ipr_hostrcb_fabric_desc *)
2124			((unsigned long)fabric + be16_to_cpu(fabric->length));
2125	}
2126
2127	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2128}
2129
2130/**
2131 * ipr_log_sis64_array_error - Log a sis64 array error.
2132 * @ioa_cfg:	ioa config struct
2133 * @hostrcb:	hostrcb struct
2134 *
2135 * Return value:
2136 * 	none
2137 **/
2138static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2139				      struct ipr_hostrcb *hostrcb)
2140{
2141	int i, num_entries;
2142	struct ipr_hostrcb_type_24_error *error;
2143	struct ipr_hostrcb64_array_data_entry *array_entry;
2144	char buffer[IPR_MAX_RES_PATH_LENGTH];
2145	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2146
2147	error = &hostrcb->hcam.u.error64.u.type_24_error;
2148
2149	ipr_err_separator;
2150
2151	ipr_err("RAID %s Array Configuration: %s\n",
2152		error->protection_level,
2153		ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2154
2155	ipr_err_separator;
2156
2157	array_entry = error->array_member;
2158	num_entries = min_t(u32, error->num_entries,
2159			    ARRAY_SIZE(error->array_member));
2160
2161	for (i = 0; i < num_entries; i++, array_entry++) {
2162
2163		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2164			continue;
2165
2166		if (error->exposed_mode_adn == i)
2167			ipr_err("Exposed Array Member %d:\n", i);
2168		else
2169			ipr_err("Array Member %d:\n", i);
2170
2171		ipr_err("Array Member %d:\n", i);
2172		ipr_log_ext_vpd(&array_entry->vpd);
2173		ipr_err("Current Location: %s\n",
2174			 ipr_format_res_path(array_entry->res_path, buffer,
2175					     sizeof(buffer)));
2176		ipr_err("Expected Location: %s\n",
2177			 ipr_format_res_path(array_entry->expected_res_path,
2178					     buffer, sizeof(buffer)));
2179
2180		ipr_err_separator;
2181	}
2182}
2183
2184/**
2185 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2186 * @ioa_cfg:	ioa config struct
2187 * @hostrcb:	hostrcb struct
2188 *
2189 * Return value:
2190 * 	none
2191 **/
2192static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2193				       struct ipr_hostrcb *hostrcb)
2194{
2195	struct ipr_hostrcb_type_30_error *error;
2196	struct ipr_hostrcb64_fabric_desc *fabric;
2197	struct ipr_hostrcb64_config_element *cfg;
2198	int i, add_len;
2199
2200	error = &hostrcb->hcam.u.error64.u.type_30_error;
2201
2202	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2203	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2204
2205	add_len = be32_to_cpu(hostrcb->hcam.length) -
2206		(offsetof(struct ipr_hostrcb64_error, u) +
2207		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2208
2209	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2210		ipr_log64_fabric_path(hostrcb, fabric);
2211		for_each_fabric_cfg(fabric, cfg)
2212			ipr_log64_path_elem(hostrcb, cfg);
2213
2214		add_len -= be16_to_cpu(fabric->length);
2215		fabric = (struct ipr_hostrcb64_fabric_desc *)
2216			((unsigned long)fabric + be16_to_cpu(fabric->length));
2217	}
2218
2219	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2220}
2221
2222/**
2223 * ipr_log_generic_error - Log an adapter error.
2224 * @ioa_cfg:	ioa config struct
2225 * @hostrcb:	hostrcb struct
2226 *
2227 * Return value:
2228 * 	none
2229 **/
2230static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2231				  struct ipr_hostrcb *hostrcb)
2232{
2233	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2234			 be32_to_cpu(hostrcb->hcam.length));
2235}
2236
2237/**
2238 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2239 * @ioasc:	IOASC
2240 *
2241 * This function will return the index of into the ipr_error_table
2242 * for the specified IOASC. If the IOASC is not in the table,
2243 * 0 will be returned, which points to the entry used for unknown errors.
2244 *
2245 * Return value:
2246 * 	index into the ipr_error_table
2247 **/
2248static u32 ipr_get_error(u32 ioasc)
2249{
2250	int i;
2251
2252	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2253		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2254			return i;
2255
2256	return 0;
2257}
2258
2259/**
2260 * ipr_handle_log_data - Log an adapter error.
2261 * @ioa_cfg:	ioa config struct
2262 * @hostrcb:	hostrcb struct
2263 *
2264 * This function logs an adapter error to the system.
2265 *
2266 * Return value:
2267 * 	none
2268 **/
2269static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2270				struct ipr_hostrcb *hostrcb)
2271{
2272	u32 ioasc;
2273	int error_index;
2274
2275	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2276		return;
2277
2278	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2279		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2280
2281	if (ioa_cfg->sis64)
2282		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2283	else
2284		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2285
2286	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2287	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2288		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2289		scsi_report_bus_reset(ioa_cfg->host,
2290				      hostrcb->hcam.u.error.fd_res_addr.bus);
2291	}
2292
2293	error_index = ipr_get_error(ioasc);
2294
2295	if (!ipr_error_table[error_index].log_hcam)
2296		return;
2297
2298	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2299
2300	/* Set indication we have logged an error */
2301	ioa_cfg->errors_logged++;
2302
2303	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2304		return;
2305	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2306		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2307
2308	switch (hostrcb->hcam.overlay_id) {
2309	case IPR_HOST_RCB_OVERLAY_ID_2:
2310		ipr_log_cache_error(ioa_cfg, hostrcb);
2311		break;
2312	case IPR_HOST_RCB_OVERLAY_ID_3:
2313		ipr_log_config_error(ioa_cfg, hostrcb);
2314		break;
2315	case IPR_HOST_RCB_OVERLAY_ID_4:
2316	case IPR_HOST_RCB_OVERLAY_ID_6:
2317		ipr_log_array_error(ioa_cfg, hostrcb);
2318		break;
2319	case IPR_HOST_RCB_OVERLAY_ID_7:
2320		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2321		break;
2322	case IPR_HOST_RCB_OVERLAY_ID_12:
2323		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2324		break;
2325	case IPR_HOST_RCB_OVERLAY_ID_13:
2326		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2327		break;
2328	case IPR_HOST_RCB_OVERLAY_ID_14:
2329	case IPR_HOST_RCB_OVERLAY_ID_16:
2330		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2331		break;
2332	case IPR_HOST_RCB_OVERLAY_ID_17:
2333		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2334		break;
2335	case IPR_HOST_RCB_OVERLAY_ID_20:
2336		ipr_log_fabric_error(ioa_cfg, hostrcb);
2337		break;
2338	case IPR_HOST_RCB_OVERLAY_ID_23:
2339		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2340		break;
2341	case IPR_HOST_RCB_OVERLAY_ID_24:
2342	case IPR_HOST_RCB_OVERLAY_ID_26:
2343		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2344		break;
2345	case IPR_HOST_RCB_OVERLAY_ID_30:
2346		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2347		break;
2348	case IPR_HOST_RCB_OVERLAY_ID_1:
2349	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2350	default:
2351		ipr_log_generic_error(ioa_cfg, hostrcb);
2352		break;
2353	}
2354}
2355
2356/**
2357 * ipr_process_error - Op done function for an adapter error log.
2358 * @ipr_cmd:	ipr command struct
2359 *
2360 * This function is the op done function for an error log host
2361 * controlled async from the adapter. It will log the error and
2362 * send the HCAM back to the adapter.
2363 *
2364 * Return value:
2365 * 	none
2366 **/
2367static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2368{
2369	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2370	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2371	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2372	u32 fd_ioasc;
2373
2374	if (ioa_cfg->sis64)
2375		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2376	else
2377		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2378
2379	list_del(&hostrcb->queue);
2380	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2381
2382	if (!ioasc) {
2383		ipr_handle_log_data(ioa_cfg, hostrcb);
2384		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2385			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2386	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2387		dev_err(&ioa_cfg->pdev->dev,
2388			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2389	}
2390
2391	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2392}
2393
2394/**
2395 * ipr_timeout -  An internally generated op has timed out.
2396 * @ipr_cmd:	ipr command struct
2397 *
2398 * This function blocks host requests and initiates an
2399 * adapter reset.
2400 *
2401 * Return value:
2402 * 	none
2403 **/
2404static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2405{
2406	unsigned long lock_flags = 0;
2407	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2408
2409	ENTER;
2410	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2411
2412	ioa_cfg->errors_logged++;
2413	dev_err(&ioa_cfg->pdev->dev,
2414		"Adapter being reset due to command timeout.\n");
2415
2416	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2417		ioa_cfg->sdt_state = GET_DUMP;
2418
2419	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2420		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2421
2422	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2423	LEAVE;
2424}
2425
2426/**
2427 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2428 * @ipr_cmd:	ipr command struct
2429 *
2430 * This function blocks host requests and initiates an
2431 * adapter reset.
2432 *
2433 * Return value:
2434 * 	none
2435 **/
2436static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2437{
2438	unsigned long lock_flags = 0;
2439	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2440
2441	ENTER;
2442	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443
2444	ioa_cfg->errors_logged++;
2445	dev_err(&ioa_cfg->pdev->dev,
2446		"Adapter timed out transitioning to operational.\n");
2447
2448	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2449		ioa_cfg->sdt_state = GET_DUMP;
2450
2451	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2452		if (ipr_fastfail)
2453			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2454		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2455	}
2456
2457	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2458	LEAVE;
2459}
2460
2461/**
2462 * ipr_reset_reload - Reset/Reload the IOA
2463 * @ioa_cfg:		ioa config struct
2464 * @shutdown_type:	shutdown type
2465 *
2466 * This function resets the adapter and re-initializes it.
2467 * This function assumes that all new host commands have been stopped.
2468 * Return value:
2469 * 	SUCCESS / FAILED
2470 **/
2471static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2472			    enum ipr_shutdown_type shutdown_type)
2473{
2474	if (!ioa_cfg->in_reset_reload)
2475		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2476
2477	spin_unlock_irq(ioa_cfg->host->host_lock);
2478	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2479	spin_lock_irq(ioa_cfg->host->host_lock);
2480
2481	/* If we got hit with a host reset while we were already resetting
2482	 the adapter for some reason, and the reset failed. */
2483	if (ioa_cfg->ioa_is_dead) {
2484		ipr_trace;
2485		return FAILED;
2486	}
2487
2488	return SUCCESS;
2489}
2490
2491/**
2492 * ipr_find_ses_entry - Find matching SES in SES table
2493 * @res:	resource entry struct of SES
2494 *
2495 * Return value:
2496 * 	pointer to SES table entry / NULL on failure
2497 **/
2498static const struct ipr_ses_table_entry *
2499ipr_find_ses_entry(struct ipr_resource_entry *res)
2500{
2501	int i, j, matches;
2502	struct ipr_std_inq_vpids *vpids;
2503	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2504
2505	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2506		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2507			if (ste->compare_product_id_byte[j] == 'X') {
2508				vpids = &res->std_inq_data.vpids;
2509				if (vpids->product_id[j] == ste->product_id[j])
2510					matches++;
2511				else
2512					break;
2513			} else
2514				matches++;
2515		}
2516
2517		if (matches == IPR_PROD_ID_LEN)
2518			return ste;
2519	}
2520
2521	return NULL;
2522}
2523
2524/**
2525 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2526 * @ioa_cfg:	ioa config struct
2527 * @bus:		SCSI bus
2528 * @bus_width:	bus width
2529 *
2530 * Return value:
2531 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2532 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2533 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2534 *	max 160MHz = max 320MB/sec).
2535 **/
2536static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2537{
2538	struct ipr_resource_entry *res;
2539	const struct ipr_ses_table_entry *ste;
2540	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2541
2542	/* Loop through each config table entry in the config table buffer */
2543	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2544		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2545			continue;
2546
2547		if (bus != res->bus)
2548			continue;
2549
2550		if (!(ste = ipr_find_ses_entry(res)))
2551			continue;
2552
2553		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2554	}
2555
2556	return max_xfer_rate;
2557}
2558
2559/**
2560 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2561 * @ioa_cfg:		ioa config struct
2562 * @max_delay:		max delay in micro-seconds to wait
2563 *
2564 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2565 *
2566 * Return value:
2567 * 	0 on success / other on failure
2568 **/
2569static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2570{
2571	volatile u32 pcii_reg;
2572	int delay = 1;
2573
2574	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2575	while (delay < max_delay) {
2576		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2577
2578		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2579			return 0;
2580
2581		/* udelay cannot be used if delay is more than a few milliseconds */
2582		if ((delay / 1000) > MAX_UDELAY_MS)
2583			mdelay(delay / 1000);
2584		else
2585			udelay(delay);
2586
2587		delay += delay;
2588	}
2589	return -EIO;
2590}
2591
2592/**
2593 * ipr_get_sis64_dump_data_section - Dump IOA memory
2594 * @ioa_cfg:			ioa config struct
2595 * @start_addr:			adapter address to dump
2596 * @dest:			destination kernel buffer
2597 * @length_in_words:		length to dump in 4 byte words
2598 *
2599 * Return value:
2600 * 	0 on success
2601 **/
2602static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2603					   u32 start_addr,
2604					   __be32 *dest, u32 length_in_words)
2605{
2606	int i;
2607
2608	for (i = 0; i < length_in_words; i++) {
2609		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2610		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2611		dest++;
2612	}
2613
2614	return 0;
2615}
2616
2617/**
2618 * ipr_get_ldump_data_section - Dump IOA memory
2619 * @ioa_cfg:			ioa config struct
2620 * @start_addr:			adapter address to dump
2621 * @dest:				destination kernel buffer
2622 * @length_in_words:	length to dump in 4 byte words
2623 *
2624 * Return value:
2625 * 	0 on success / -EIO on failure
2626 **/
2627static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2628				      u32 start_addr,
2629				      __be32 *dest, u32 length_in_words)
2630{
2631	volatile u32 temp_pcii_reg;
2632	int i, delay = 0;
2633
2634	if (ioa_cfg->sis64)
2635		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2636						       dest, length_in_words);
2637
2638	/* Write IOA interrupt reg starting LDUMP state  */
2639	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2640	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2641
2642	/* Wait for IO debug acknowledge */
2643	if (ipr_wait_iodbg_ack(ioa_cfg,
2644			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2645		dev_err(&ioa_cfg->pdev->dev,
2646			"IOA dump long data transfer timeout\n");
2647		return -EIO;
2648	}
2649
2650	/* Signal LDUMP interlocked - clear IO debug ack */
2651	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2652	       ioa_cfg->regs.clr_interrupt_reg);
2653
2654	/* Write Mailbox with starting address */
2655	writel(start_addr, ioa_cfg->ioa_mailbox);
2656
2657	/* Signal address valid - clear IOA Reset alert */
2658	writel(IPR_UPROCI_RESET_ALERT,
2659	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2660
2661	for (i = 0; i < length_in_words; i++) {
2662		/* Wait for IO debug acknowledge */
2663		if (ipr_wait_iodbg_ack(ioa_cfg,
2664				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2665			dev_err(&ioa_cfg->pdev->dev,
2666				"IOA dump short data transfer timeout\n");
2667			return -EIO;
2668		}
2669
2670		/* Read data from mailbox and increment destination pointer */
2671		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2672		dest++;
2673
2674		/* For all but the last word of data, signal data received */
2675		if (i < (length_in_words - 1)) {
2676			/* Signal dump data received - Clear IO debug Ack */
2677			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2678			       ioa_cfg->regs.clr_interrupt_reg);
2679		}
2680	}
2681
2682	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2683	writel(IPR_UPROCI_RESET_ALERT,
2684	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2685
2686	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2687	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2688
2689	/* Signal dump data received - Clear IO debug Ack */
2690	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2691	       ioa_cfg->regs.clr_interrupt_reg);
2692
2693	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2694	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2695		temp_pcii_reg =
2696		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2697
2698		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2699			return 0;
2700
2701		udelay(10);
2702		delay += 10;
2703	}
2704
2705	return 0;
2706}
2707
2708#ifdef CONFIG_SCSI_IPR_DUMP
2709/**
2710 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2711 * @ioa_cfg:		ioa config struct
2712 * @pci_address:	adapter address
2713 * @length:			length of data to copy
2714 *
2715 * Copy data from PCI adapter to kernel buffer.
2716 * Note: length MUST be a 4 byte multiple
2717 * Return value:
2718 * 	0 on success / other on failure
2719 **/
2720static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2721			unsigned long pci_address, u32 length)
2722{
2723	int bytes_copied = 0;
2724	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2725	__be32 *page;
2726	unsigned long lock_flags = 0;
2727	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2728
2729	if (ioa_cfg->sis64)
2730		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2731	else
2732		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2733
2734	while (bytes_copied < length &&
2735	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2736		if (ioa_dump->page_offset >= PAGE_SIZE ||
2737		    ioa_dump->page_offset == 0) {
2738			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2739
2740			if (!page) {
2741				ipr_trace;
2742				return bytes_copied;
2743			}
2744
2745			ioa_dump->page_offset = 0;
2746			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2747			ioa_dump->next_page_index++;
2748		} else
2749			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2750
2751		rem_len = length - bytes_copied;
2752		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2753		cur_len = min(rem_len, rem_page_len);
2754
2755		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2756		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2757			rc = -EIO;
2758		} else {
2759			rc = ipr_get_ldump_data_section(ioa_cfg,
2760							pci_address + bytes_copied,
2761							&page[ioa_dump->page_offset / 4],
2762							(cur_len / sizeof(u32)));
2763		}
2764		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2765
2766		if (!rc) {
2767			ioa_dump->page_offset += cur_len;
2768			bytes_copied += cur_len;
2769		} else {
2770			ipr_trace;
2771			break;
2772		}
2773		schedule();
2774	}
2775
2776	return bytes_copied;
2777}
2778
2779/**
2780 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2781 * @hdr:	dump entry header struct
2782 *
2783 * Return value:
2784 * 	nothing
2785 **/
2786static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2787{
2788	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2789	hdr->num_elems = 1;
2790	hdr->offset = sizeof(*hdr);
2791	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2792}
2793
2794/**
2795 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2796 * @ioa_cfg:	ioa config struct
2797 * @driver_dump:	driver dump struct
2798 *
2799 * Return value:
2800 * 	nothing
2801 **/
2802static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2803				   struct ipr_driver_dump *driver_dump)
2804{
2805	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2806
2807	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2808	driver_dump->ioa_type_entry.hdr.len =
2809		sizeof(struct ipr_dump_ioa_type_entry) -
2810		sizeof(struct ipr_dump_entry_header);
2811	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2812	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2813	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2814	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2815		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2816		ucode_vpd->minor_release[1];
2817	driver_dump->hdr.num_entries++;
2818}
2819
2820/**
2821 * ipr_dump_version_data - Fill in the driver version in the dump.
2822 * @ioa_cfg:	ioa config struct
2823 * @driver_dump:	driver dump struct
2824 *
2825 * Return value:
2826 * 	nothing
2827 **/
2828static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2829				  struct ipr_driver_dump *driver_dump)
2830{
2831	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2832	driver_dump->version_entry.hdr.len =
2833		sizeof(struct ipr_dump_version_entry) -
2834		sizeof(struct ipr_dump_entry_header);
2835	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2836	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2837	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2838	driver_dump->hdr.num_entries++;
2839}
2840
2841/**
2842 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2843 * @ioa_cfg:	ioa config struct
2844 * @driver_dump:	driver dump struct
2845 *
2846 * Return value:
2847 * 	nothing
2848 **/
2849static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2850				   struct ipr_driver_dump *driver_dump)
2851{
2852	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2853	driver_dump->trace_entry.hdr.len =
2854		sizeof(struct ipr_dump_trace_entry) -
2855		sizeof(struct ipr_dump_entry_header);
2856	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2857	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2858	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2859	driver_dump->hdr.num_entries++;
2860}
2861
2862/**
2863 * ipr_dump_location_data - Fill in the IOA location in the dump.
2864 * @ioa_cfg:	ioa config struct
2865 * @driver_dump:	driver dump struct
2866 *
2867 * Return value:
2868 * 	nothing
2869 **/
2870static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2871				   struct ipr_driver_dump *driver_dump)
2872{
2873	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2874	driver_dump->location_entry.hdr.len =
2875		sizeof(struct ipr_dump_location_entry) -
2876		sizeof(struct ipr_dump_entry_header);
2877	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2878	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2879	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2880	driver_dump->hdr.num_entries++;
2881}
2882
2883/**
2884 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2885 * @ioa_cfg:	ioa config struct
2886 * @dump:		dump struct
2887 *
2888 * Return value:
2889 * 	nothing
2890 **/
2891static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2892{
2893	unsigned long start_addr, sdt_word;
2894	unsigned long lock_flags = 0;
2895	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2896	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2897	u32 num_entries, max_num_entries, start_off, end_off;
2898	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2899	struct ipr_sdt *sdt;
2900	int valid = 1;
2901	int i;
2902
2903	ENTER;
2904
2905	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2906
2907	if (ioa_cfg->sdt_state != READ_DUMP) {
2908		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2909		return;
2910	}
2911
2912	if (ioa_cfg->sis64) {
2913		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2914		ssleep(IPR_DUMP_DELAY_SECONDS);
2915		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2916	}
2917
2918	start_addr = readl(ioa_cfg->ioa_mailbox);
2919
2920	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2921		dev_err(&ioa_cfg->pdev->dev,
2922			"Invalid dump table format: %lx\n", start_addr);
2923		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2924		return;
2925	}
2926
2927	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2928
2929	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2930
2931	/* Initialize the overall dump header */
2932	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2933	driver_dump->hdr.num_entries = 1;
2934	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2935	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2936	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2937	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2938
2939	ipr_dump_version_data(ioa_cfg, driver_dump);
2940	ipr_dump_location_data(ioa_cfg, driver_dump);
2941	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2942	ipr_dump_trace_data(ioa_cfg, driver_dump);
2943
2944	/* Update dump_header */
2945	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2946
2947	/* IOA Dump entry */
2948	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2949	ioa_dump->hdr.len = 0;
2950	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2951	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2952
2953	/* First entries in sdt are actually a list of dump addresses and
2954	 lengths to gather the real dump data.  sdt represents the pointer
2955	 to the ioa generated dump table.  Dump data will be extracted based
2956	 on entries in this table */
2957	sdt = &ioa_dump->sdt;
2958
2959	if (ioa_cfg->sis64) {
2960		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
2961		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2962	} else {
2963		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
2964		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2965	}
2966
2967	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
2968			(max_num_entries * sizeof(struct ipr_sdt_entry));
2969	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2970					bytes_to_copy / sizeof(__be32));
2971
2972	/* Smart Dump table is ready to use and the first entry is valid */
2973	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2974	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2975		dev_err(&ioa_cfg->pdev->dev,
2976			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2977			rc, be32_to_cpu(sdt->hdr.state));
2978		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2979		ioa_cfg->sdt_state = DUMP_OBTAINED;
2980		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2981		return;
2982	}
2983
2984	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2985
2986	if (num_entries > max_num_entries)
2987		num_entries = max_num_entries;
2988
2989	/* Update dump length to the actual data to be copied */
2990	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
2991	if (ioa_cfg->sis64)
2992		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
2993	else
2994		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
2995
2996	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2997
2998	for (i = 0; i < num_entries; i++) {
2999		if (ioa_dump->hdr.len > max_dump_size) {
3000			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3001			break;
3002		}
3003
3004		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3005			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3006			if (ioa_cfg->sis64)
3007				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3008			else {
3009				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3010				end_off = be32_to_cpu(sdt->entry[i].end_token);
3011
3012				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3013					bytes_to_copy = end_off - start_off;
3014				else
3015					valid = 0;
3016			}
3017			if (valid) {
3018				if (bytes_to_copy > max_dump_size) {
3019					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3020					continue;
3021				}
3022
3023				/* Copy data from adapter to driver buffers */
3024				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3025							    bytes_to_copy);
3026
3027				ioa_dump->hdr.len += bytes_copied;
3028
3029				if (bytes_copied != bytes_to_copy) {
3030					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3031					break;
3032				}
3033			}
3034		}
3035	}
3036
3037	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3038
3039	/* Update dump_header */
3040	driver_dump->hdr.len += ioa_dump->hdr.len;
3041	wmb();
3042	ioa_cfg->sdt_state = DUMP_OBTAINED;
3043	LEAVE;
3044}
3045
3046#else
3047#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3048#endif
3049
3050/**
3051 * ipr_release_dump - Free adapter dump memory
3052 * @kref:	kref struct
3053 *
3054 * Return value:
3055 *	nothing
3056 **/
3057static void ipr_release_dump(struct kref *kref)
3058{
3059	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3060	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3061	unsigned long lock_flags = 0;
3062	int i;
3063
3064	ENTER;
3065	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3066	ioa_cfg->dump = NULL;
3067	ioa_cfg->sdt_state = INACTIVE;
3068	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3069
3070	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3071		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3072
3073	vfree(dump->ioa_dump.ioa_data);
3074	kfree(dump);
3075	LEAVE;
3076}
3077
3078/**
3079 * ipr_worker_thread - Worker thread
3080 * @work:		ioa config struct
3081 *
3082 * Called at task level from a work thread. This function takes care
3083 * of adding and removing device from the mid-layer as configuration
3084 * changes are detected by the adapter.
3085 *
3086 * Return value:
3087 * 	nothing
3088 **/
3089static void ipr_worker_thread(struct work_struct *work)
3090{
3091	unsigned long lock_flags;
3092	struct ipr_resource_entry *res;
3093	struct scsi_device *sdev;
3094	struct ipr_dump *dump;
3095	struct ipr_ioa_cfg *ioa_cfg =
3096		container_of(work, struct ipr_ioa_cfg, work_q);
3097	u8 bus, target, lun;
3098	int did_work;
3099
3100	ENTER;
3101	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3102
3103	if (ioa_cfg->sdt_state == READ_DUMP) {
3104		dump = ioa_cfg->dump;
3105		if (!dump) {
3106			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3107			return;
3108		}
3109		kref_get(&dump->kref);
3110		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3111		ipr_get_ioa_dump(ioa_cfg, dump);
3112		kref_put(&dump->kref, ipr_release_dump);
3113
3114		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3115		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3116			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3117		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3118		return;
3119	}
3120
3121restart:
3122	do {
3123		did_work = 0;
3124		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3125			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3126			return;
3127		}
3128
3129		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3130			if (res->del_from_ml && res->sdev) {
3131				did_work = 1;
3132				sdev = res->sdev;
3133				if (!scsi_device_get(sdev)) {
3134					if (!res->add_to_ml)
3135						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3136					else
3137						res->del_from_ml = 0;
3138					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139					scsi_remove_device(sdev);
3140					scsi_device_put(sdev);
3141					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3142				}
3143				break;
3144			}
3145		}
3146	} while(did_work);
3147
3148	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3149		if (res->add_to_ml) {
3150			bus = res->bus;
3151			target = res->target;
3152			lun = res->lun;
3153			res->add_to_ml = 0;
3154			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3155			scsi_add_device(ioa_cfg->host, bus, target, lun);
3156			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3157			goto restart;
3158		}
3159	}
3160
3161	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3162	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3163	LEAVE;
3164}
3165
3166#ifdef CONFIG_SCSI_IPR_TRACE
3167/**
3168 * ipr_read_trace - Dump the adapter trace
3169 * @filp:		open sysfs file
3170 * @kobj:		kobject struct
3171 * @bin_attr:		bin_attribute struct
3172 * @buf:		buffer
3173 * @off:		offset
3174 * @count:		buffer size
3175 *
3176 * Return value:
3177 *	number of bytes printed to buffer
3178 **/
3179static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3180			      struct bin_attribute *bin_attr,
3181			      char *buf, loff_t off, size_t count)
3182{
3183	struct device *dev = container_of(kobj, struct device, kobj);
3184	struct Scsi_Host *shost = class_to_shost(dev);
3185	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3186	unsigned long lock_flags = 0;
3187	ssize_t ret;
3188
3189	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3190	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3191				IPR_TRACE_SIZE);
3192	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3193
3194	return ret;
3195}
3196
3197static struct bin_attribute ipr_trace_attr = {
3198	.attr =	{
3199		.name = "trace",
3200		.mode = S_IRUGO,
3201	},
3202	.size = 0,
3203	.read = ipr_read_trace,
3204};
3205#endif
3206
3207/**
3208 * ipr_show_fw_version - Show the firmware version
3209 * @dev:	class device struct
3210 * @buf:	buffer
3211 *
3212 * Return value:
3213 *	number of bytes printed to buffer
3214 **/
3215static ssize_t ipr_show_fw_version(struct device *dev,
3216				   struct device_attribute *attr, char *buf)
3217{
3218	struct Scsi_Host *shost = class_to_shost(dev);
3219	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3220	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3221	unsigned long lock_flags = 0;
3222	int len;
3223
3224	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3225	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3226		       ucode_vpd->major_release, ucode_vpd->card_type,
3227		       ucode_vpd->minor_release[0],
3228		       ucode_vpd->minor_release[1]);
3229	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230	return len;
3231}
3232
3233static struct device_attribute ipr_fw_version_attr = {
3234	.attr = {
3235		.name =		"fw_version",
3236		.mode =		S_IRUGO,
3237	},
3238	.show = ipr_show_fw_version,
3239};
3240
3241/**
3242 * ipr_show_log_level - Show the adapter's error logging level
3243 * @dev:	class device struct
3244 * @buf:	buffer
3245 *
3246 * Return value:
3247 * 	number of bytes printed to buffer
3248 **/
3249static ssize_t ipr_show_log_level(struct device *dev,
3250				   struct device_attribute *attr, char *buf)
3251{
3252	struct Scsi_Host *shost = class_to_shost(dev);
3253	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3254	unsigned long lock_flags = 0;
3255	int len;
3256
3257	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3258	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3259	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3260	return len;
3261}
3262
3263/**
3264 * ipr_store_log_level - Change the adapter's error logging level
3265 * @dev:	class device struct
3266 * @buf:	buffer
3267 *
3268 * Return value:
3269 * 	number of bytes printed to buffer
3270 **/
3271static ssize_t ipr_store_log_level(struct device *dev,
3272			           struct device_attribute *attr,
3273				   const char *buf, size_t count)
3274{
3275	struct Scsi_Host *shost = class_to_shost(dev);
3276	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3277	unsigned long lock_flags = 0;
3278
3279	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3280	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3281	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282	return strlen(buf);
3283}
3284
3285static struct device_attribute ipr_log_level_attr = {
3286	.attr = {
3287		.name =		"log_level",
3288		.mode =		S_IRUGO | S_IWUSR,
3289	},
3290	.show = ipr_show_log_level,
3291	.store = ipr_store_log_level
3292};
3293
3294/**
3295 * ipr_store_diagnostics - IOA Diagnostics interface
3296 * @dev:	device struct
3297 * @buf:	buffer
3298 * @count:	buffer size
3299 *
3300 * This function will reset the adapter and wait a reasonable
3301 * amount of time for any errors that the adapter might log.
3302 *
3303 * Return value:
3304 * 	count on success / other on failure
3305 **/
3306static ssize_t ipr_store_diagnostics(struct device *dev,
3307				     struct device_attribute *attr,
3308				     const char *buf, size_t count)
3309{
3310	struct Scsi_Host *shost = class_to_shost(dev);
3311	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3312	unsigned long lock_flags = 0;
3313	int rc = count;
3314
3315	if (!capable(CAP_SYS_ADMIN))
3316		return -EACCES;
3317
3318	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3319	while(ioa_cfg->in_reset_reload) {
3320		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3321		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3322		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3323	}
3324
3325	ioa_cfg->errors_logged = 0;
3326	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3327
3328	if (ioa_cfg->in_reset_reload) {
3329		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3330		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3331
3332		/* Wait for a second for any errors to be logged */
3333		msleep(1000);
3334	} else {
3335		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3336		return -EIO;
3337	}
3338
3339	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3341		rc = -EIO;
3342	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343
3344	return rc;
3345}
3346
3347static struct device_attribute ipr_diagnostics_attr = {
3348	.attr = {
3349		.name =		"run_diagnostics",
3350		.mode =		S_IWUSR,
3351	},
3352	.store = ipr_store_diagnostics
3353};
3354
3355/**
3356 * ipr_show_adapter_state - Show the adapter's state
3357 * @class_dev:	device struct
3358 * @buf:	buffer
3359 *
3360 * Return value:
3361 * 	number of bytes printed to buffer
3362 **/
3363static ssize_t ipr_show_adapter_state(struct device *dev,
3364				      struct device_attribute *attr, char *buf)
3365{
3366	struct Scsi_Host *shost = class_to_shost(dev);
3367	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3368	unsigned long lock_flags = 0;
3369	int len;
3370
3371	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372	if (ioa_cfg->ioa_is_dead)
3373		len = snprintf(buf, PAGE_SIZE, "offline\n");
3374	else
3375		len = snprintf(buf, PAGE_SIZE, "online\n");
3376	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377	return len;
3378}
3379
3380/**
3381 * ipr_store_adapter_state - Change adapter state
3382 * @dev:	device struct
3383 * @buf:	buffer
3384 * @count:	buffer size
3385 *
3386 * This function will change the adapter's state.
3387 *
3388 * Return value:
3389 * 	count on success / other on failure
3390 **/
3391static ssize_t ipr_store_adapter_state(struct device *dev,
3392				       struct device_attribute *attr,
3393				       const char *buf, size_t count)
3394{
3395	struct Scsi_Host *shost = class_to_shost(dev);
3396	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3397	unsigned long lock_flags;
3398	int result = count;
3399
3400	if (!capable(CAP_SYS_ADMIN))
3401		return -EACCES;
3402
3403	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3405		ioa_cfg->ioa_is_dead = 0;
3406		ioa_cfg->reset_retries = 0;
3407		ioa_cfg->in_ioa_bringdown = 0;
3408		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3409	}
3410	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3412
3413	return result;
3414}
3415
3416static struct device_attribute ipr_ioa_state_attr = {
3417	.attr = {
3418		.name =		"online_state",
3419		.mode =		S_IRUGO | S_IWUSR,
3420	},
3421	.show = ipr_show_adapter_state,
3422	.store = ipr_store_adapter_state
3423};
3424
3425/**
3426 * ipr_store_reset_adapter - Reset the adapter
3427 * @dev:	device struct
3428 * @buf:	buffer
3429 * @count:	buffer size
3430 *
3431 * This function will reset the adapter.
3432 *
3433 * Return value:
3434 * 	count on success / other on failure
3435 **/
3436static ssize_t ipr_store_reset_adapter(struct device *dev,
3437				       struct device_attribute *attr,
3438				       const char *buf, size_t count)
3439{
3440	struct Scsi_Host *shost = class_to_shost(dev);
3441	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3442	unsigned long lock_flags;
3443	int result = count;
3444
3445	if (!capable(CAP_SYS_ADMIN))
3446		return -EACCES;
3447
3448	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3449	if (!ioa_cfg->in_reset_reload)
3450		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3451	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3452	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3453
3454	return result;
3455}
3456
3457static struct device_attribute ipr_ioa_reset_attr = {
3458	.attr = {
3459		.name =		"reset_host",
3460		.mode =		S_IWUSR,
3461	},
3462	.store = ipr_store_reset_adapter
3463};
3464
3465/**
3466 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3467 * @buf_len:		buffer length
3468 *
3469 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3470 * list to use for microcode download
3471 *
3472 * Return value:
3473 * 	pointer to sglist / NULL on failure
3474 **/
3475static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3476{
3477	int sg_size, order, bsize_elem, num_elem, i, j;
3478	struct ipr_sglist *sglist;
3479	struct scatterlist *scatterlist;
3480	struct page *page;
3481
3482	/* Get the minimum size per scatter/gather element */
3483	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3484
3485	/* Get the actual size per element */
3486	order = get_order(sg_size);
3487
3488	/* Determine the actual number of bytes per element */
3489	bsize_elem = PAGE_SIZE * (1 << order);
3490
3491	/* Determine the actual number of sg entries needed */
3492	if (buf_len % bsize_elem)
3493		num_elem = (buf_len / bsize_elem) + 1;
3494	else
3495		num_elem = buf_len / bsize_elem;
3496
3497	/* Allocate a scatter/gather list for the DMA */
3498	sglist = kzalloc(sizeof(struct ipr_sglist) +
3499			 (sizeof(struct scatterlist) * (num_elem - 1)),
3500			 GFP_KERNEL);
3501
3502	if (sglist == NULL) {
3503		ipr_trace;
3504		return NULL;
3505	}
3506
3507	scatterlist = sglist->scatterlist;
3508	sg_init_table(scatterlist, num_elem);
3509
3510	sglist->order = order;
3511	sglist->num_sg = num_elem;
3512
3513	/* Allocate a bunch of sg elements */
3514	for (i = 0; i < num_elem; i++) {
3515		page = alloc_pages(GFP_KERNEL, order);
3516		if (!page) {
3517			ipr_trace;
3518
3519			/* Free up what we already allocated */
3520			for (j = i - 1; j >= 0; j--)
3521				__free_pages(sg_page(&scatterlist[j]), order);
3522			kfree(sglist);
3523			return NULL;
3524		}
3525
3526		sg_set_page(&scatterlist[i], page, 0, 0);
3527	}
3528
3529	return sglist;
3530}
3531
3532/**
3533 * ipr_free_ucode_buffer - Frees a microcode download buffer
3534 * @p_dnld:		scatter/gather list pointer
3535 *
3536 * Free a DMA'able ucode download buffer previously allocated with
3537 * ipr_alloc_ucode_buffer
3538 *
3539 * Return value:
3540 * 	nothing
3541 **/
3542static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3543{
3544	int i;
3545
3546	for (i = 0; i < sglist->num_sg; i++)
3547		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3548
3549	kfree(sglist);
3550}
3551
3552/**
3553 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3554 * @sglist:		scatter/gather list pointer
3555 * @buffer:		buffer pointer
3556 * @len:		buffer length
3557 *
3558 * Copy a microcode image from a user buffer into a buffer allocated by
3559 * ipr_alloc_ucode_buffer
3560 *
3561 * Return value:
3562 * 	0 on success / other on failure
3563 **/
3564static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3565				 u8 *buffer, u32 len)
3566{
3567	int bsize_elem, i, result = 0;
3568	struct scatterlist *scatterlist;
3569	void *kaddr;
3570
3571	/* Determine the actual number of bytes per element */
3572	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3573
3574	scatterlist = sglist->scatterlist;
3575
3576	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3577		struct page *page = sg_page(&scatterlist[i]);
3578
3579		kaddr = kmap(page);
3580		memcpy(kaddr, buffer, bsize_elem);
3581		kunmap(page);
3582
3583		scatterlist[i].length = bsize_elem;
3584
3585		if (result != 0) {
3586			ipr_trace;
3587			return result;
3588		}
3589	}
3590
3591	if (len % bsize_elem) {
3592		struct page *page = sg_page(&scatterlist[i]);
3593
3594		kaddr = kmap(page);
3595		memcpy(kaddr, buffer, len % bsize_elem);
3596		kunmap(page);
3597
3598		scatterlist[i].length = len % bsize_elem;
3599	}
3600
3601	sglist->buffer_len = len;
3602	return result;
3603}
3604
3605/**
3606 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3607 * @ipr_cmd:		ipr command struct
3608 * @sglist:		scatter/gather list
3609 *
3610 * Builds a microcode download IOA data list (IOADL).
3611 *
3612 **/
3613static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3614				    struct ipr_sglist *sglist)
3615{
3616	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3617	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3618	struct scatterlist *scatterlist = sglist->scatterlist;
3619	int i;
3620
3621	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3622	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3623	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3624
3625	ioarcb->ioadl_len =
3626		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3627	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3628		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3629		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3630		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3631	}
3632
3633	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3634}
3635
3636/**
3637 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3638 * @ipr_cmd:	ipr command struct
3639 * @sglist:		scatter/gather list
3640 *
3641 * Builds a microcode download IOA data list (IOADL).
3642 *
3643 **/
3644static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3645				  struct ipr_sglist *sglist)
3646{
3647	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3648	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3649	struct scatterlist *scatterlist = sglist->scatterlist;
3650	int i;
3651
3652	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3653	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3654	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3655
3656	ioarcb->ioadl_len =
3657		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3658
3659	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3660		ioadl[i].flags_and_data_len =
3661			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3662		ioadl[i].address =
3663			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3664	}
3665
3666	ioadl[i-1].flags_and_data_len |=
3667		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3668}
3669
3670/**
3671 * ipr_update_ioa_ucode - Update IOA's microcode
3672 * @ioa_cfg:	ioa config struct
3673 * @sglist:		scatter/gather list
3674 *
3675 * Initiate an adapter reset to update the IOA's microcode
3676 *
3677 * Return value:
3678 * 	0 on success / -EIO on failure
3679 **/
3680static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3681				struct ipr_sglist *sglist)
3682{
3683	unsigned long lock_flags;
3684
3685	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3686	while(ioa_cfg->in_reset_reload) {
3687		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3688		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3689		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3690	}
3691
3692	if (ioa_cfg->ucode_sglist) {
3693		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3694		dev_err(&ioa_cfg->pdev->dev,
3695			"Microcode download already in progress\n");
3696		return -EIO;
3697	}
3698
3699	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3700					sglist->num_sg, DMA_TO_DEVICE);
3701
3702	if (!sglist->num_dma_sg) {
3703		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3704		dev_err(&ioa_cfg->pdev->dev,
3705			"Failed to map microcode download buffer!\n");
3706		return -EIO;
3707	}
3708
3709	ioa_cfg->ucode_sglist = sglist;
3710	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3711	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3712	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3713
3714	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3715	ioa_cfg->ucode_sglist = NULL;
3716	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3717	return 0;
3718}
3719
3720/**
3721 * ipr_store_update_fw - Update the firmware on the adapter
3722 * @class_dev:	device struct
3723 * @buf:	buffer
3724 * @count:	buffer size
3725 *
3726 * This function will update the firmware on the adapter.
3727 *
3728 * Return value:
3729 * 	count on success / other on failure
3730 **/
3731static ssize_t ipr_store_update_fw(struct device *dev,
3732				   struct device_attribute *attr,
3733				   const char *buf, size_t count)
3734{
3735	struct Scsi_Host *shost = class_to_shost(dev);
3736	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3737	struct ipr_ucode_image_header *image_hdr;
3738	const struct firmware *fw_entry;
3739	struct ipr_sglist *sglist;
3740	char fname[100];
3741	char *src;
3742	int len, result, dnld_size;
3743
3744	if (!capable(CAP_SYS_ADMIN))
3745		return -EACCES;
3746
3747	len = snprintf(fname, 99, "%s", buf);
3748	fname[len-1] = '\0';
3749
3750	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3751		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3752		return -EIO;
3753	}
3754
3755	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3756
3757	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3758	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3759	sglist = ipr_alloc_ucode_buffer(dnld_size);
3760
3761	if (!sglist) {
3762		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3763		release_firmware(fw_entry);
3764		return -ENOMEM;
3765	}
3766
3767	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3768
3769	if (result) {
3770		dev_err(&ioa_cfg->pdev->dev,
3771			"Microcode buffer copy to DMA buffer failed\n");
3772		goto out;
3773	}
3774
3775	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
3776
3777	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3778
3779	if (!result)
3780		result = count;
3781out:
3782	ipr_free_ucode_buffer(sglist);
3783	release_firmware(fw_entry);
3784	return result;
3785}
3786
3787static struct device_attribute ipr_update_fw_attr = {
3788	.attr = {
3789		.name =		"update_fw",
3790		.mode =		S_IWUSR,
3791	},
3792	.store = ipr_store_update_fw
3793};
3794
3795/**
3796 * ipr_show_fw_type - Show the adapter's firmware type.
3797 * @dev:	class device struct
3798 * @buf:	buffer
3799 *
3800 * Return value:
3801 *	number of bytes printed to buffer
3802 **/
3803static ssize_t ipr_show_fw_type(struct device *dev,
3804				struct device_attribute *attr, char *buf)
3805{
3806	struct Scsi_Host *shost = class_to_shost(dev);
3807	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3808	unsigned long lock_flags = 0;
3809	int len;
3810
3811	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3812	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3813	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3814	return len;
3815}
3816
3817static struct device_attribute ipr_ioa_fw_type_attr = {
3818	.attr = {
3819		.name =		"fw_type",
3820		.mode =		S_IRUGO,
3821	},
3822	.show = ipr_show_fw_type
3823};
3824
3825static struct device_attribute *ipr_ioa_attrs[] = {
3826	&ipr_fw_version_attr,
3827	&ipr_log_level_attr,
3828	&ipr_diagnostics_attr,
3829	&ipr_ioa_state_attr,
3830	&ipr_ioa_reset_attr,
3831	&ipr_update_fw_attr,
3832	&ipr_ioa_fw_type_attr,
3833	NULL,
3834};
3835
3836#ifdef CONFIG_SCSI_IPR_DUMP
3837/**
3838 * ipr_read_dump - Dump the adapter
3839 * @filp:		open sysfs file
3840 * @kobj:		kobject struct
3841 * @bin_attr:		bin_attribute struct
3842 * @buf:		buffer
3843 * @off:		offset
3844 * @count:		buffer size
3845 *
3846 * Return value:
3847 *	number of bytes printed to buffer
3848 **/
3849static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3850			     struct bin_attribute *bin_attr,
3851			     char *buf, loff_t off, size_t count)
3852{
3853	struct device *cdev = container_of(kobj, struct device, kobj);
3854	struct Scsi_Host *shost = class_to_shost(cdev);
3855	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3856	struct ipr_dump *dump;
3857	unsigned long lock_flags = 0;
3858	char *src;
3859	int len, sdt_end;
3860	size_t rc = count;
3861
3862	if (!capable(CAP_SYS_ADMIN))
3863		return -EACCES;
3864
3865	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3866	dump = ioa_cfg->dump;
3867
3868	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3869		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3870		return 0;
3871	}
3872	kref_get(&dump->kref);
3873	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3874
3875	if (off > dump->driver_dump.hdr.len) {
3876		kref_put(&dump->kref, ipr_release_dump);
3877		return 0;
3878	}
3879
3880	if (off + count > dump->driver_dump.hdr.len) {
3881		count = dump->driver_dump.hdr.len - off;
3882		rc = count;
3883	}
3884
3885	if (count && off < sizeof(dump->driver_dump)) {
3886		if (off + count > sizeof(dump->driver_dump))
3887			len = sizeof(dump->driver_dump) - off;
3888		else
3889			len = count;
3890		src = (u8 *)&dump->driver_dump + off;
3891		memcpy(buf, src, len);
3892		buf += len;
3893		off += len;
3894		count -= len;
3895	}
3896
3897	off -= sizeof(dump->driver_dump);
3898
3899	if (ioa_cfg->sis64)
3900		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3901			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
3902			   sizeof(struct ipr_sdt_entry));
3903	else
3904		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
3905			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
3906
3907	if (count && off < sdt_end) {
3908		if (off + count > sdt_end)
3909			len = sdt_end - off;
3910		else
3911			len = count;
3912		src = (u8 *)&dump->ioa_dump + off;
3913		memcpy(buf, src, len);
3914		buf += len;
3915		off += len;
3916		count -= len;
3917	}
3918
3919	off -= sdt_end;
3920
3921	while (count) {
3922		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3923			len = PAGE_ALIGN(off) - off;
3924		else
3925			len = count;
3926		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3927		src += off & ~PAGE_MASK;
3928		memcpy(buf, src, len);
3929		buf += len;
3930		off += len;
3931		count -= len;
3932	}
3933
3934	kref_put(&dump->kref, ipr_release_dump);
3935	return rc;
3936}
3937
3938/**
3939 * ipr_alloc_dump - Prepare for adapter dump
3940 * @ioa_cfg:	ioa config struct
3941 *
3942 * Return value:
3943 *	0 on success / other on failure
3944 **/
3945static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3946{
3947	struct ipr_dump *dump;
3948	__be32 **ioa_data;
3949	unsigned long lock_flags = 0;
3950
3951	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3952
3953	if (!dump) {
3954		ipr_err("Dump memory allocation failed\n");
3955		return -ENOMEM;
3956	}
3957
3958	if (ioa_cfg->sis64)
3959		ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3960	else
3961		ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
3962
3963	if (!ioa_data) {
3964		ipr_err("Dump memory allocation failed\n");
3965		kfree(dump);
3966		return -ENOMEM;
3967	}
3968
3969	dump->ioa_dump.ioa_data = ioa_data;
3970
3971	kref_init(&dump->kref);
3972	dump->ioa_cfg = ioa_cfg;
3973
3974	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3975
3976	if (INACTIVE != ioa_cfg->sdt_state) {
3977		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3978		vfree(dump->ioa_dump.ioa_data);
3979		kfree(dump);
3980		return 0;
3981	}
3982
3983	ioa_cfg->dump = dump;
3984	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3985	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3986		ioa_cfg->dump_taken = 1;
3987		schedule_work(&ioa_cfg->work_q);
3988	}
3989	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3990
3991	return 0;
3992}
3993
3994/**
3995 * ipr_free_dump - Free adapter dump memory
3996 * @ioa_cfg:	ioa config struct
3997 *
3998 * Return value:
3999 *	0 on success / other on failure
4000 **/
4001static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4002{
4003	struct ipr_dump *dump;
4004	unsigned long lock_flags = 0;
4005
4006	ENTER;
4007
4008	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4009	dump = ioa_cfg->dump;
4010	if (!dump) {
4011		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4012		return 0;
4013	}
4014
4015	ioa_cfg->dump = NULL;
4016	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4017
4018	kref_put(&dump->kref, ipr_release_dump);
4019
4020	LEAVE;
4021	return 0;
4022}
4023
4024/**
4025 * ipr_write_dump - Setup dump state of adapter
4026 * @filp:		open sysfs file
4027 * @kobj:		kobject struct
4028 * @bin_attr:		bin_attribute struct
4029 * @buf:		buffer
4030 * @off:		offset
4031 * @count:		buffer size
4032 *
4033 * Return value:
4034 *	number of bytes printed to buffer
4035 **/
4036static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4037			      struct bin_attribute *bin_attr,
4038			      char *buf, loff_t off, size_t count)
4039{
4040	struct device *cdev = container_of(kobj, struct device, kobj);
4041	struct Scsi_Host *shost = class_to_shost(cdev);
4042	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4043	int rc;
4044
4045	if (!capable(CAP_SYS_ADMIN))
4046		return -EACCES;
4047
4048	if (buf[0] == '1')
4049		rc = ipr_alloc_dump(ioa_cfg);
4050	else if (buf[0] == '0')
4051		rc = ipr_free_dump(ioa_cfg);
4052	else
4053		return -EINVAL;
4054
4055	if (rc)
4056		return rc;
4057	else
4058		return count;
4059}
4060
4061static struct bin_attribute ipr_dump_attr = {
4062	.attr =	{
4063		.name = "dump",
4064		.mode = S_IRUSR | S_IWUSR,
4065	},
4066	.size = 0,
4067	.read = ipr_read_dump,
4068	.write = ipr_write_dump
4069};
4070#else
4071static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4072#endif
4073
4074/**
4075 * ipr_change_queue_depth - Change the device's queue depth
4076 * @sdev:	scsi device struct
4077 * @qdepth:	depth to set
4078 * @reason:	calling context
4079 *
4080 * Return value:
4081 * 	actual depth set
4082 **/
4083static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4084				  int reason)
4085{
4086	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4087	struct ipr_resource_entry *res;
4088	unsigned long lock_flags = 0;
4089
4090	if (reason != SCSI_QDEPTH_DEFAULT)
4091		return -EOPNOTSUPP;
4092
4093	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4094	res = (struct ipr_resource_entry *)sdev->hostdata;
4095
4096	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4097		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4098	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4099
4100	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4101	return sdev->queue_depth;
4102}
4103
4104/**
4105 * ipr_change_queue_type - Change the device's queue type
4106 * @dsev:		scsi device struct
4107 * @tag_type:	type of tags to use
4108 *
4109 * Return value:
4110 * 	actual queue type set
4111 **/
4112static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4113{
4114	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4115	struct ipr_resource_entry *res;
4116	unsigned long lock_flags = 0;
4117
4118	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4119	res = (struct ipr_resource_entry *)sdev->hostdata;
4120
4121	if (res) {
4122		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4123			/*
4124			 * We don't bother quiescing the device here since the
4125			 * adapter firmware does it for us.
4126			 */
4127			scsi_set_tag_type(sdev, tag_type);
4128
4129			if (tag_type)
4130				scsi_activate_tcq(sdev, sdev->queue_depth);
4131			else
4132				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4133		} else
4134			tag_type = 0;
4135	} else
4136		tag_type = 0;
4137
4138	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4139	return tag_type;
4140}
4141
4142/**
4143 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4144 * @dev:	device struct
4145 * @attr:	device attribute structure
4146 * @buf:	buffer
4147 *
4148 * Return value:
4149 * 	number of bytes printed to buffer
4150 **/
4151static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4152{
4153	struct scsi_device *sdev = to_scsi_device(dev);
4154	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4155	struct ipr_resource_entry *res;
4156	unsigned long lock_flags = 0;
4157	ssize_t len = -ENXIO;
4158
4159	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4160	res = (struct ipr_resource_entry *)sdev->hostdata;
4161	if (res)
4162		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4163	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4164	return len;
4165}
4166
4167static struct device_attribute ipr_adapter_handle_attr = {
4168	.attr = {
4169		.name = 	"adapter_handle",
4170		.mode =		S_IRUSR,
4171	},
4172	.show = ipr_show_adapter_handle
4173};
4174
4175/**
4176 * ipr_show_resource_path - Show the resource path or the resource address for
4177 *			    this device.
4178 * @dev:	device struct
4179 * @attr:	device attribute structure
4180 * @buf:	buffer
4181 *
4182 * Return value:
4183 * 	number of bytes printed to buffer
4184 **/
4185static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4186{
4187	struct scsi_device *sdev = to_scsi_device(dev);
4188	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4189	struct ipr_resource_entry *res;
4190	unsigned long lock_flags = 0;
4191	ssize_t len = -ENXIO;
4192	char buffer[IPR_MAX_RES_PATH_LENGTH];
4193
4194	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4195	res = (struct ipr_resource_entry *)sdev->hostdata;
4196	if (res && ioa_cfg->sis64)
4197		len = snprintf(buf, PAGE_SIZE, "%s\n",
4198			       ipr_format_res_path(res->res_path, buffer,
4199						   sizeof(buffer)));
4200	else if (res)
4201		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4202			       res->bus, res->target, res->lun);
4203
4204	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4205	return len;
4206}
4207
4208static struct device_attribute ipr_resource_path_attr = {
4209	.attr = {
4210		.name = 	"resource_path",
4211		.mode =		S_IRUGO,
4212	},
4213	.show = ipr_show_resource_path
4214};
4215
4216/**
4217 * ipr_show_device_id - Show the device_id for this device.
4218 * @dev:	device struct
4219 * @attr:	device attribute structure
4220 * @buf:	buffer
4221 *
4222 * Return value:
4223 *	number of bytes printed to buffer
4224 **/
4225static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4226{
4227	struct scsi_device *sdev = to_scsi_device(dev);
4228	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4229	struct ipr_resource_entry *res;
4230	unsigned long lock_flags = 0;
4231	ssize_t len = -ENXIO;
4232
4233	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4234	res = (struct ipr_resource_entry *)sdev->hostdata;
4235	if (res && ioa_cfg->sis64)
4236		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4237	else if (res)
4238		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4239
4240	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4241	return len;
4242}
4243
4244static struct device_attribute ipr_device_id_attr = {
4245	.attr = {
4246		.name =		"device_id",
4247		.mode =		S_IRUGO,
4248	},
4249	.show = ipr_show_device_id
4250};
4251
4252/**
4253 * ipr_show_resource_type - Show the resource type for this device.
4254 * @dev:	device struct
4255 * @attr:	device attribute structure
4256 * @buf:	buffer
4257 *
4258 * Return value:
4259 *	number of bytes printed to buffer
4260 **/
4261static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4262{
4263	struct scsi_device *sdev = to_scsi_device(dev);
4264	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4265	struct ipr_resource_entry *res;
4266	unsigned long lock_flags = 0;
4267	ssize_t len = -ENXIO;
4268
4269	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4270	res = (struct ipr_resource_entry *)sdev->hostdata;
4271
4272	if (res)
4273		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4274
4275	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4276	return len;
4277}
4278
4279static struct device_attribute ipr_resource_type_attr = {
4280	.attr = {
4281		.name =		"resource_type",
4282		.mode =		S_IRUGO,
4283	},
4284	.show = ipr_show_resource_type
4285};
4286
4287static struct device_attribute *ipr_dev_attrs[] = {
4288	&ipr_adapter_handle_attr,
4289	&ipr_resource_path_attr,
4290	&ipr_device_id_attr,
4291	&ipr_resource_type_attr,
4292	NULL,
4293};
4294
4295/**
4296 * ipr_biosparam - Return the HSC mapping
4297 * @sdev:			scsi device struct
4298 * @block_device:	block device pointer
4299 * @capacity:		capacity of the device
4300 * @parm:			Array containing returned HSC values.
4301 *
4302 * This function generates the HSC parms that fdisk uses.
4303 * We want to make sure we return something that places partitions
4304 * on 4k boundaries for best performance with the IOA.
4305 *
4306 * Return value:
4307 * 	0 on success
4308 **/
4309static int ipr_biosparam(struct scsi_device *sdev,
4310			 struct block_device *block_device,
4311			 sector_t capacity, int *parm)
4312{
4313	int heads, sectors;
4314	sector_t cylinders;
4315
4316	heads = 128;
4317	sectors = 32;
4318
4319	cylinders = capacity;
4320	sector_div(cylinders, (128 * 32));
4321
4322	/* return result */
4323	parm[0] = heads;
4324	parm[1] = sectors;
4325	parm[2] = cylinders;
4326
4327	return 0;
4328}
4329
4330/**
4331 * ipr_find_starget - Find target based on bus/target.
4332 * @starget:	scsi target struct
4333 *
4334 * Return value:
4335 * 	resource entry pointer if found / NULL if not found
4336 **/
4337static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4338{
4339	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4340	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4341	struct ipr_resource_entry *res;
4342
4343	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4344		if ((res->bus == starget->channel) &&
4345		    (res->target == starget->id)) {
4346			return res;
4347		}
4348	}
4349
4350	return NULL;
4351}
4352
4353static struct ata_port_info sata_port_info;
4354
4355/**
4356 * ipr_target_alloc - Prepare for commands to a SCSI target
4357 * @starget:	scsi target struct
4358 *
4359 * If the device is a SATA device, this function allocates an
4360 * ATA port with libata, else it does nothing.
4361 *
4362 * Return value:
4363 * 	0 on success / non-0 on failure
4364 **/
4365static int ipr_target_alloc(struct scsi_target *starget)
4366{
4367	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4368	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4369	struct ipr_sata_port *sata_port;
4370	struct ata_port *ap;
4371	struct ipr_resource_entry *res;
4372	unsigned long lock_flags;
4373
4374	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4375	res = ipr_find_starget(starget);
4376	starget->hostdata = NULL;
4377
4378	if (res && ipr_is_gata(res)) {
4379		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4380		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4381		if (!sata_port)
4382			return -ENOMEM;
4383
4384		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4385		if (ap) {
4386			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4387			sata_port->ioa_cfg = ioa_cfg;
4388			sata_port->ap = ap;
4389			sata_port->res = res;
4390
4391			res->sata_port = sata_port;
4392			ap->private_data = sata_port;
4393			starget->hostdata = sata_port;
4394		} else {
4395			kfree(sata_port);
4396			return -ENOMEM;
4397		}
4398	}
4399	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4400
4401	return 0;
4402}
4403
4404/**
4405 * ipr_target_destroy - Destroy a SCSI target
4406 * @starget:	scsi target struct
4407 *
4408 * If the device was a SATA device, this function frees the libata
4409 * ATA port, else it does nothing.
4410 *
4411 **/
4412static void ipr_target_destroy(struct scsi_target *starget)
4413{
4414	struct ipr_sata_port *sata_port = starget->hostdata;
4415	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4416	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4417
4418	if (ioa_cfg->sis64) {
4419		if (!ipr_find_starget(starget)) {
4420			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4421				clear_bit(starget->id, ioa_cfg->array_ids);
4422			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4423				clear_bit(starget->id, ioa_cfg->vset_ids);
4424			else if (starget->channel == 0)
4425				clear_bit(starget->id, ioa_cfg->target_ids);
4426		}
4427	}
4428
4429	if (sata_port) {
4430		starget->hostdata = NULL;
4431		ata_sas_port_destroy(sata_port->ap);
4432		kfree(sata_port);
4433	}
4434}
4435
4436/**
4437 * ipr_find_sdev - Find device based on bus/target/lun.
4438 * @sdev:	scsi device struct
4439 *
4440 * Return value:
4441 * 	resource entry pointer if found / NULL if not found
4442 **/
4443static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4444{
4445	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4446	struct ipr_resource_entry *res;
4447
4448	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4449		if ((res->bus == sdev->channel) &&
4450		    (res->target == sdev->id) &&
4451		    (res->lun == sdev->lun))
4452			return res;
4453	}
4454
4455	return NULL;
4456}
4457
4458/**
4459 * ipr_slave_destroy - Unconfigure a SCSI device
4460 * @sdev:	scsi device struct
4461 *
4462 * Return value:
4463 * 	nothing
4464 **/
4465static void ipr_slave_destroy(struct scsi_device *sdev)
4466{
4467	struct ipr_resource_entry *res;
4468	struct ipr_ioa_cfg *ioa_cfg;
4469	unsigned long lock_flags = 0;
4470
4471	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4472
4473	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4474	res = (struct ipr_resource_entry *) sdev->hostdata;
4475	if (res) {
4476		if (res->sata_port)
4477			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4478		sdev->hostdata = NULL;
4479		res->sdev = NULL;
4480		res->sata_port = NULL;
4481	}
4482	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4483}
4484
4485/**
4486 * ipr_slave_configure - Configure a SCSI device
4487 * @sdev:	scsi device struct
4488 *
4489 * This function configures the specified scsi device.
4490 *
4491 * Return value:
4492 * 	0 on success
4493 **/
4494static int ipr_slave_configure(struct scsi_device *sdev)
4495{
4496	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4497	struct ipr_resource_entry *res;
4498	struct ata_port *ap = NULL;
4499	unsigned long lock_flags = 0;
4500	char buffer[IPR_MAX_RES_PATH_LENGTH];
4501
4502	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4503	res = sdev->hostdata;
4504	if (res) {
4505		if (ipr_is_af_dasd_device(res))
4506			sdev->type = TYPE_RAID;
4507		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4508			sdev->scsi_level = 4;
4509			sdev->no_uld_attach = 1;
4510		}
4511		if (ipr_is_vset_device(res)) {
4512			blk_queue_rq_timeout(sdev->request_queue,
4513					     IPR_VSET_RW_TIMEOUT);
4514			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4515		}
4516		if (ipr_is_gata(res) && res->sata_port)
4517			ap = res->sata_port->ap;
4518		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4519
4520		if (ap) {
4521			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4522			ata_sas_slave_configure(sdev, ap);
4523		} else
4524			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4525		if (ioa_cfg->sis64)
4526			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4527				    ipr_format_res_path(res->res_path, buffer,
4528							sizeof(buffer)));
4529		return 0;
4530	}
4531	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4532	return 0;
4533}
4534
4535/**
4536 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4537 * @sdev:	scsi device struct
4538 *
4539 * This function initializes an ATA port so that future commands
4540 * sent through queuecommand will work.
4541 *
4542 * Return value:
4543 * 	0 on success
4544 **/
4545static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4546{
4547	struct ipr_sata_port *sata_port = NULL;
4548	int rc = -ENXIO;
4549
4550	ENTER;
4551	if (sdev->sdev_target)
4552		sata_port = sdev->sdev_target->hostdata;
4553	if (sata_port)
4554		rc = ata_sas_port_init(sata_port->ap);
4555	if (rc)
4556		ipr_slave_destroy(sdev);
4557
4558	LEAVE;
4559	return rc;
4560}
4561
4562/**
4563 * ipr_slave_alloc - Prepare for commands to a device.
4564 * @sdev:	scsi device struct
4565 *
4566 * This function saves a pointer to the resource entry
4567 * in the scsi device struct if the device exists. We
4568 * can then use this pointer in ipr_queuecommand when
4569 * handling new commands.
4570 *
4571 * Return value:
4572 * 	0 on success / -ENXIO if device does not exist
4573 **/
4574static int ipr_slave_alloc(struct scsi_device *sdev)
4575{
4576	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4577	struct ipr_resource_entry *res;
4578	unsigned long lock_flags;
4579	int rc = -ENXIO;
4580
4581	sdev->hostdata = NULL;
4582
4583	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4584
4585	res = ipr_find_sdev(sdev);
4586	if (res) {
4587		res->sdev = sdev;
4588		res->add_to_ml = 0;
4589		res->in_erp = 0;
4590		sdev->hostdata = res;
4591		if (!ipr_is_naca_model(res))
4592			res->needs_sync_complete = 1;
4593		rc = 0;
4594		if (ipr_is_gata(res)) {
4595			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4596			return ipr_ata_slave_alloc(sdev);
4597		}
4598	}
4599
4600	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4601
4602	return rc;
4603}
4604
4605/**
4606 * ipr_eh_host_reset - Reset the host adapter
4607 * @scsi_cmd:	scsi command struct
4608 *
4609 * Return value:
4610 * 	SUCCESS / FAILED
4611 **/
4612static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4613{
4614	struct ipr_ioa_cfg *ioa_cfg;
4615	int rc;
4616
4617	ENTER;
4618	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4619
4620	if (!ioa_cfg->in_reset_reload) {
4621		dev_err(&ioa_cfg->pdev->dev,
4622			"Adapter being reset as a result of error recovery.\n");
4623
4624		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4625			ioa_cfg->sdt_state = GET_DUMP;
4626	}
4627
4628	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4629
4630	LEAVE;
4631	return rc;
4632}
4633
4634static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4635{
4636	int rc;
4637
4638	spin_lock_irq(cmd->device->host->host_lock);
4639	rc = __ipr_eh_host_reset(cmd);
4640	spin_unlock_irq(cmd->device->host->host_lock);
4641
4642	return rc;
4643}
4644
4645/**
4646 * ipr_device_reset - Reset the device
4647 * @ioa_cfg:	ioa config struct
4648 * @res:		resource entry struct
4649 *
4650 * This function issues a device reset to the affected device.
4651 * If the device is a SCSI device, a LUN reset will be sent
4652 * to the device first. If that does not work, a target reset
4653 * will be sent. If the device is a SATA device, a PHY reset will
4654 * be sent.
4655 *
4656 * Return value:
4657 *	0 on success / non-zero on failure
4658 **/
4659static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4660			    struct ipr_resource_entry *res)
4661{
4662	struct ipr_cmnd *ipr_cmd;
4663	struct ipr_ioarcb *ioarcb;
4664	struct ipr_cmd_pkt *cmd_pkt;
4665	struct ipr_ioarcb_ata_regs *regs;
4666	u32 ioasc;
4667
4668	ENTER;
4669	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4670	ioarcb = &ipr_cmd->ioarcb;
4671	cmd_pkt = &ioarcb->cmd_pkt;
4672
4673	if (ipr_cmd->ioa_cfg->sis64) {
4674		regs = &ipr_cmd->i.ata_ioadl.regs;
4675		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4676	} else
4677		regs = &ioarcb->u.add_data.u.regs;
4678
4679	ioarcb->res_handle = res->res_handle;
4680	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4681	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4682	if (ipr_is_gata(res)) {
4683		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4684		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4685		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4686	}
4687
4688	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4689	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4690	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4691	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4692		if (ipr_cmd->ioa_cfg->sis64)
4693			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4694			       sizeof(struct ipr_ioasa_gata));
4695		else
4696			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4697			       sizeof(struct ipr_ioasa_gata));
4698	}
4699
4700	LEAVE;
4701	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4702}
4703
4704/**
4705 * ipr_sata_reset - Reset the SATA port
4706 * @link:	SATA link to reset
4707 * @classes:	class of the attached device
4708 *
4709 * This function issues a SATA phy reset to the affected ATA link.
4710 *
4711 * Return value:
4712 *	0 on success / non-zero on failure
4713 **/
4714static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4715				unsigned long deadline)
4716{
4717	struct ipr_sata_port *sata_port = link->ap->private_data;
4718	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4719	struct ipr_resource_entry *res;
4720	unsigned long lock_flags = 0;
4721	int rc = -ENXIO;
4722
4723	ENTER;
4724	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4725	while(ioa_cfg->in_reset_reload) {
4726		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4727		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4728		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4729	}
4730
4731	res = sata_port->res;
4732	if (res) {
4733		rc = ipr_device_reset(ioa_cfg, res);
4734		*classes = res->ata_class;
4735	}
4736
4737	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4738	LEAVE;
4739	return rc;
4740}
4741
4742/**
4743 * ipr_eh_dev_reset - Reset the device
4744 * @scsi_cmd:	scsi command struct
4745 *
4746 * This function issues a device reset to the affected device.
4747 * A LUN reset will be sent to the device first. If that does
4748 * not work, a target reset will be sent.
4749 *
4750 * Return value:
4751 *	SUCCESS / FAILED
4752 **/
4753static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4754{
4755	struct ipr_cmnd *ipr_cmd;
4756	struct ipr_ioa_cfg *ioa_cfg;
4757	struct ipr_resource_entry *res;
4758	struct ata_port *ap;
4759	int rc = 0;
4760
4761	ENTER;
4762	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4763	res = scsi_cmd->device->hostdata;
4764
4765	if (!res)
4766		return FAILED;
4767
4768	/*
4769	 * If we are currently going through reset/reload, return failed. This will force the
4770	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4771	 * reset to complete
4772	 */
4773	if (ioa_cfg->in_reset_reload)
4774		return FAILED;
4775	if (ioa_cfg->ioa_is_dead)
4776		return FAILED;
4777
4778	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4779		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4780			if (ipr_cmd->scsi_cmd)
4781				ipr_cmd->done = ipr_scsi_eh_done;
4782			if (ipr_cmd->qc)
4783				ipr_cmd->done = ipr_sata_eh_done;
4784			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4785				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4786				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4787			}
4788		}
4789	}
4790
4791	res->resetting_device = 1;
4792	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4793
4794	if (ipr_is_gata(res) && res->sata_port) {
4795		ap = res->sata_port->ap;
4796		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4797		ata_std_error_handler(ap);
4798		spin_lock_irq(scsi_cmd->device->host->host_lock);
4799
4800		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4801			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4802				rc = -EIO;
4803				break;
4804			}
4805		}
4806	} else
4807		rc = ipr_device_reset(ioa_cfg, res);
4808	res->resetting_device = 0;
4809
4810	LEAVE;
4811	return (rc ? FAILED : SUCCESS);
4812}
4813
4814static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4815{
4816	int rc;
4817
4818	spin_lock_irq(cmd->device->host->host_lock);
4819	rc = __ipr_eh_dev_reset(cmd);
4820	spin_unlock_irq(cmd->device->host->host_lock);
4821
4822	return rc;
4823}
4824
4825/**
4826 * ipr_bus_reset_done - Op done function for bus reset.
4827 * @ipr_cmd:	ipr command struct
4828 *
4829 * This function is the op done function for a bus reset
4830 *
4831 * Return value:
4832 * 	none
4833 **/
4834static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4835{
4836	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4837	struct ipr_resource_entry *res;
4838
4839	ENTER;
4840	if (!ioa_cfg->sis64)
4841		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4842			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4843				scsi_report_bus_reset(ioa_cfg->host, res->bus);
4844				break;
4845			}
4846		}
4847
4848	/*
4849	 * If abort has not completed, indicate the reset has, else call the
4850	 * abort's done function to wake the sleeping eh thread
4851	 */
4852	if (ipr_cmd->sibling->sibling)
4853		ipr_cmd->sibling->sibling = NULL;
4854	else
4855		ipr_cmd->sibling->done(ipr_cmd->sibling);
4856
4857	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4858	LEAVE;
4859}
4860
4861/**
4862 * ipr_abort_timeout - An abort task has timed out
4863 * @ipr_cmd:	ipr command struct
4864 *
4865 * This function handles when an abort task times out. If this
4866 * happens we issue a bus reset since we have resources tied
4867 * up that must be freed before returning to the midlayer.
4868 *
4869 * Return value:
4870 *	none
4871 **/
4872static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4873{
4874	struct ipr_cmnd *reset_cmd;
4875	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4876	struct ipr_cmd_pkt *cmd_pkt;
4877	unsigned long lock_flags = 0;
4878
4879	ENTER;
4880	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4881	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4882		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4883		return;
4884	}
4885
4886	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4887	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4888	ipr_cmd->sibling = reset_cmd;
4889	reset_cmd->sibling = ipr_cmd;
4890	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4891	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4892	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4893	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4894	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4895
4896	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4897	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4898	LEAVE;
4899}
4900
4901/**
4902 * ipr_cancel_op - Cancel specified op
4903 * @scsi_cmd:	scsi command struct
4904 *
4905 * This function cancels specified op.
4906 *
4907 * Return value:
4908 *	SUCCESS / FAILED
4909 **/
4910static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4911{
4912	struct ipr_cmnd *ipr_cmd;
4913	struct ipr_ioa_cfg *ioa_cfg;
4914	struct ipr_resource_entry *res;
4915	struct ipr_cmd_pkt *cmd_pkt;
4916	u32 ioasc, int_reg;
4917	int op_found = 0;
4918
4919	ENTER;
4920	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4921	res = scsi_cmd->device->hostdata;
4922
4923	/* If we are currently going through reset/reload, return failed.
4924	 * This will force the mid-layer to call ipr_eh_host_reset,
4925	 * which will then go to sleep and wait for the reset to complete
4926	 */
4927	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4928		return FAILED;
4929	if (!res)
4930		return FAILED;
4931
4932	/*
4933	 * If we are aborting a timed out op, chances are that the timeout was caused
4934	 * by a still not detected EEH error. In such cases, reading a register will
4935	 * trigger the EEH recovery infrastructure.
4936	 */
4937	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4938
4939	if (!ipr_is_gscsi(res))
4940		return FAILED;
4941
4942	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4943		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4944			ipr_cmd->done = ipr_scsi_eh_done;
4945			op_found = 1;
4946			break;
4947		}
4948	}
4949
4950	if (!op_found)
4951		return SUCCESS;
4952
4953	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4954	ipr_cmd->ioarcb.res_handle = res->res_handle;
4955	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4956	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4957	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4958	ipr_cmd->u.sdev = scsi_cmd->device;
4959
4960	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4961		    scsi_cmd->cmnd[0]);
4962	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4963	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4964
4965	/*
4966	 * If the abort task timed out and we sent a bus reset, we will get
4967	 * one the following responses to the abort
4968	 */
4969	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4970		ioasc = 0;
4971		ipr_trace;
4972	}
4973
4974	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4975	if (!ipr_is_naca_model(res))
4976		res->needs_sync_complete = 1;
4977
4978	LEAVE;
4979	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4980}
4981
4982/**
4983 * ipr_eh_abort - Abort a single op
4984 * @scsi_cmd:	scsi command struct
4985 *
4986 * Return value:
4987 * 	SUCCESS / FAILED
4988 **/
4989static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4990{
4991	unsigned long flags;
4992	int rc;
4993
4994	ENTER;
4995
4996	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4997	rc = ipr_cancel_op(scsi_cmd);
4998	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4999
5000	LEAVE;
5001	return rc;
5002}
5003
5004/**
5005 * ipr_handle_other_interrupt - Handle "other" interrupts
5006 * @ioa_cfg:	ioa config struct
5007 * @int_reg:	interrupt register
5008 *
5009 * Return value:
5010 * 	IRQ_NONE / IRQ_HANDLED
5011 **/
5012static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5013					      u32 int_reg)
5014{
5015	irqreturn_t rc = IRQ_HANDLED;
5016	u32 int_mask_reg;
5017
5018	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5019	int_reg &= ~int_mask_reg;
5020
5021	/* If an interrupt on the adapter did not occur, ignore it.
5022	 * Or in the case of SIS 64, check for a stage change interrupt.
5023	 */
5024	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5025		if (ioa_cfg->sis64) {
5026			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5027			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5028			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5029
5030				/* clear stage change */
5031				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5032				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5033				list_del(&ioa_cfg->reset_cmd->queue);
5034				del_timer(&ioa_cfg->reset_cmd->timer);
5035				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5036				return IRQ_HANDLED;
5037			}
5038		}
5039
5040		return IRQ_NONE;
5041	}
5042
5043	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5044		/* Mask the interrupt */
5045		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5046
5047		/* Clear the interrupt */
5048		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5049		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5050
5051		list_del(&ioa_cfg->reset_cmd->queue);
5052		del_timer(&ioa_cfg->reset_cmd->timer);
5053		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5054	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5055		if (ioa_cfg->clear_isr) {
5056			if (ipr_debug && printk_ratelimit())
5057				dev_err(&ioa_cfg->pdev->dev,
5058					"Spurious interrupt detected. 0x%08X\n", int_reg);
5059			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5060			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5061			return IRQ_NONE;
5062		}
5063	} else {
5064		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5065			ioa_cfg->ioa_unit_checked = 1;
5066		else
5067			dev_err(&ioa_cfg->pdev->dev,
5068				"Permanent IOA failure. 0x%08X\n", int_reg);
5069
5070		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5071			ioa_cfg->sdt_state = GET_DUMP;
5072
5073		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5074		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5075	}
5076
5077	return rc;
5078}
5079
5080/**
5081 * ipr_isr_eh - Interrupt service routine error handler
5082 * @ioa_cfg:	ioa config struct
5083 * @msg:	message to log
5084 *
5085 * Return value:
5086 * 	none
5087 **/
5088static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5089{
5090	ioa_cfg->errors_logged++;
5091	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5092
5093	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5094		ioa_cfg->sdt_state = GET_DUMP;
5095
5096	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5097}
5098
5099/**
5100 * ipr_isr - Interrupt service routine
5101 * @irq:	irq number
5102 * @devp:	pointer to ioa config struct
5103 *
5104 * Return value:
5105 * 	IRQ_NONE / IRQ_HANDLED
5106 **/
5107static irqreturn_t ipr_isr(int irq, void *devp)
5108{
5109	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5110	unsigned long lock_flags = 0;
5111	u32 int_reg = 0;
5112	u32 ioasc;
5113	u16 cmd_index;
5114	int num_hrrq = 0;
5115	int irq_none = 0;
5116	struct ipr_cmnd *ipr_cmd;
5117	irqreturn_t rc = IRQ_NONE;
5118
5119	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5120
5121	/* If interrupts are disabled, ignore the interrupt */
5122	if (!ioa_cfg->allow_interrupts) {
5123		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5124		return IRQ_NONE;
5125	}
5126
5127	while (1) {
5128		ipr_cmd = NULL;
5129
5130		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5131		       ioa_cfg->toggle_bit) {
5132
5133			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5134				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5135
5136			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5137				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5138				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5139				return IRQ_HANDLED;
5140			}
5141
5142			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5143
5144			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5145
5146			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5147
5148			list_del(&ipr_cmd->queue);
5149			del_timer(&ipr_cmd->timer);
5150			ipr_cmd->done(ipr_cmd);
5151
5152			rc = IRQ_HANDLED;
5153
5154			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5155				ioa_cfg->hrrq_curr++;
5156			} else {
5157				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5158				ioa_cfg->toggle_bit ^= 1u;
5159			}
5160		}
5161
5162		if (ipr_cmd && !ioa_cfg->clear_isr)
5163			break;
5164
5165		if (ipr_cmd != NULL) {
5166			/* Clear the PCI interrupt */
5167			num_hrrq = 0;
5168			do {
5169				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5170				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5171			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5172					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5173
5174		} else if (rc == IRQ_NONE && irq_none == 0) {
5175			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5176			irq_none++;
5177		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5178			   int_reg & IPR_PCII_HRRQ_UPDATED) {
5179			ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5180			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5181			return IRQ_HANDLED;
5182		} else
5183			break;
5184	}
5185
5186	if (unlikely(rc == IRQ_NONE))
5187		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5188
5189	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5190	return rc;
5191}
5192
5193/**
5194 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5195 * @ioa_cfg:	ioa config struct
5196 * @ipr_cmd:	ipr command struct
5197 *
5198 * Return value:
5199 * 	0 on success / -1 on failure
5200 **/
5201static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5202			     struct ipr_cmnd *ipr_cmd)
5203{
5204	int i, nseg;
5205	struct scatterlist *sg;
5206	u32 length;
5207	u32 ioadl_flags = 0;
5208	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5209	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5210	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5211
5212	length = scsi_bufflen(scsi_cmd);
5213	if (!length)
5214		return 0;
5215
5216	nseg = scsi_dma_map(scsi_cmd);
5217	if (nseg < 0) {
5218		if (printk_ratelimit())
5219			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5220		return -1;
5221	}
5222
5223	ipr_cmd->dma_use_sg = nseg;
5224
5225	ioarcb->data_transfer_length = cpu_to_be32(length);
5226	ioarcb->ioadl_len =
5227		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5228
5229	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5230		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5231		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5232	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5233		ioadl_flags = IPR_IOADL_FLAGS_READ;
5234
5235	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5236		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5237		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5238		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5239	}
5240
5241	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5242	return 0;
5243}
5244
5245/**
5246 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5247 * @ioa_cfg:	ioa config struct
5248 * @ipr_cmd:	ipr command struct
5249 *
5250 * Return value:
5251 * 	0 on success / -1 on failure
5252 **/
5253static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5254			   struct ipr_cmnd *ipr_cmd)
5255{
5256	int i, nseg;
5257	struct scatterlist *sg;
5258	u32 length;
5259	u32 ioadl_flags = 0;
5260	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5261	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5262	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5263
5264	length = scsi_bufflen(scsi_cmd);
5265	if (!length)
5266		return 0;
5267
5268	nseg = scsi_dma_map(scsi_cmd);
5269	if (nseg < 0) {
5270		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5271		return -1;
5272	}
5273
5274	ipr_cmd->dma_use_sg = nseg;
5275
5276	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5277		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5278		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5279		ioarcb->data_transfer_length = cpu_to_be32(length);
5280		ioarcb->ioadl_len =
5281			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5282	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5283		ioadl_flags = IPR_IOADL_FLAGS_READ;
5284		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5285		ioarcb->read_ioadl_len =
5286			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5287	}
5288
5289	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5290		ioadl = ioarcb->u.add_data.u.ioadl;
5291		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5292				    offsetof(struct ipr_ioarcb, u.add_data));
5293		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5294	}
5295
5296	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5297		ioadl[i].flags_and_data_len =
5298			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5299		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5300	}
5301
5302	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5303	return 0;
5304}
5305
5306/**
5307 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5308 * @scsi_cmd:	scsi command struct
5309 *
5310 * Return value:
5311 * 	task attributes
5312 **/
5313static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5314{
5315	u8 tag[2];
5316	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5317
5318	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5319		switch (tag[0]) {
5320		case MSG_SIMPLE_TAG:
5321			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5322			break;
5323		case MSG_HEAD_TAG:
5324			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5325			break;
5326		case MSG_ORDERED_TAG:
5327			rc = IPR_FLAGS_LO_ORDERED_TASK;
5328			break;
5329		};
5330	}
5331
5332	return rc;
5333}
5334
5335/**
5336 * ipr_erp_done - Process completion of ERP for a device
5337 * @ipr_cmd:		ipr command struct
5338 *
5339 * This function copies the sense buffer into the scsi_cmd
5340 * struct and pushes the scsi_done function.
5341 *
5342 * Return value:
5343 * 	nothing
5344 **/
5345static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5346{
5347	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5348	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5349	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5350	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5351
5352	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5353		scsi_cmd->result |= (DID_ERROR << 16);
5354		scmd_printk(KERN_ERR, scsi_cmd,
5355			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5356	} else {
5357		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5358		       SCSI_SENSE_BUFFERSIZE);
5359	}
5360
5361	if (res) {
5362		if (!ipr_is_naca_model(res))
5363			res->needs_sync_complete = 1;
5364		res->in_erp = 0;
5365	}
5366	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5367	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5368	scsi_cmd->scsi_done(scsi_cmd);
5369}
5370
5371/**
5372 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5373 * @ipr_cmd:	ipr command struct
5374 *
5375 * Return value:
5376 * 	none
5377 **/
5378static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5379{
5380	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5381	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5382	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5383
5384	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5385	ioarcb->data_transfer_length = 0;
5386	ioarcb->read_data_transfer_length = 0;
5387	ioarcb->ioadl_len = 0;
5388	ioarcb->read_ioadl_len = 0;
5389	ioasa->hdr.ioasc = 0;
5390	ioasa->hdr.residual_data_len = 0;
5391
5392	if (ipr_cmd->ioa_cfg->sis64)
5393		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5394			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5395	else {
5396		ioarcb->write_ioadl_addr =
5397			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5398		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5399	}
5400}
5401
5402/**
5403 * ipr_erp_request_sense - Send request sense to a device
5404 * @ipr_cmd:	ipr command struct
5405 *
5406 * This function sends a request sense to a device as a result
5407 * of a check condition.
5408 *
5409 * Return value:
5410 * 	nothing
5411 **/
5412static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5413{
5414	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5415	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5416
5417	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5418		ipr_erp_done(ipr_cmd);
5419		return;
5420	}
5421
5422	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5423
5424	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5425	cmd_pkt->cdb[0] = REQUEST_SENSE;
5426	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5427	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5428	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5429	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5430
5431	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5432		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5433
5434	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5435		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5436}
5437
5438/**
5439 * ipr_erp_cancel_all - Send cancel all to a device
5440 * @ipr_cmd:	ipr command struct
5441 *
5442 * This function sends a cancel all to a device to clear the
5443 * queue. If we are running TCQ on the device, QERR is set to 1,
5444 * which means all outstanding ops have been dropped on the floor.
5445 * Cancel all will return them to us.
5446 *
5447 * Return value:
5448 * 	nothing
5449 **/
5450static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5451{
5452	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5453	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5454	struct ipr_cmd_pkt *cmd_pkt;
5455
5456	res->in_erp = 1;
5457
5458	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5459
5460	if (!scsi_get_tag_type(scsi_cmd->device)) {
5461		ipr_erp_request_sense(ipr_cmd);
5462		return;
5463	}
5464
5465	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5466	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5467	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5468
5469	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5470		   IPR_CANCEL_ALL_TIMEOUT);
5471}
5472
5473/**
5474 * ipr_dump_ioasa - Dump contents of IOASA
5475 * @ioa_cfg:	ioa config struct
5476 * @ipr_cmd:	ipr command struct
5477 * @res:		resource entry struct
5478 *
5479 * This function is invoked by the interrupt handler when ops
5480 * fail. It will log the IOASA if appropriate. Only called
5481 * for GPDD ops.
5482 *
5483 * Return value:
5484 * 	none
5485 **/
5486static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5487			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5488{
5489	int i;
5490	u16 data_len;
5491	u32 ioasc, fd_ioasc;
5492	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5493	__be32 *ioasa_data = (__be32 *)ioasa;
5494	int error_index;
5495
5496	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5497	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5498
5499	if (0 == ioasc)
5500		return;
5501
5502	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5503		return;
5504
5505	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5506		error_index = ipr_get_error(fd_ioasc);
5507	else
5508		error_index = ipr_get_error(ioasc);
5509
5510	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5511		/* Don't log an error if the IOA already logged one */
5512		if (ioasa->hdr.ilid != 0)
5513			return;
5514
5515		if (!ipr_is_gscsi(res))
5516			return;
5517
5518		if (ipr_error_table[error_index].log_ioasa == 0)
5519			return;
5520	}
5521
5522	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5523
5524	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5525	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5526		data_len = sizeof(struct ipr_ioasa64);
5527	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5528		data_len = sizeof(struct ipr_ioasa);
5529
5530	ipr_err("IOASA Dump:\n");
5531
5532	for (i = 0; i < data_len / 4; i += 4) {
5533		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5534			be32_to_cpu(ioasa_data[i]),
5535			be32_to_cpu(ioasa_data[i+1]),
5536			be32_to_cpu(ioasa_data[i+2]),
5537			be32_to_cpu(ioasa_data[i+3]));
5538	}
5539}
5540
5541/**
5542 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5543 * @ioasa:		IOASA
5544 * @sense_buf:	sense data buffer
5545 *
5546 * Return value:
5547 * 	none
5548 **/
5549static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5550{
5551	u32 failing_lba;
5552	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5553	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5554	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5555	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5556
5557	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5558
5559	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5560		return;
5561
5562	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5563
5564	if (ipr_is_vset_device(res) &&
5565	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5566	    ioasa->u.vset.failing_lba_hi != 0) {
5567		sense_buf[0] = 0x72;
5568		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5569		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5570		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5571
5572		sense_buf[7] = 12;
5573		sense_buf[8] = 0;
5574		sense_buf[9] = 0x0A;
5575		sense_buf[10] = 0x80;
5576
5577		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5578
5579		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5580		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5581		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5582		sense_buf[15] = failing_lba & 0x000000ff;
5583
5584		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5585
5586		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5587		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5588		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5589		sense_buf[19] = failing_lba & 0x000000ff;
5590	} else {
5591		sense_buf[0] = 0x70;
5592		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5593		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5594		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5595
5596		/* Illegal request */
5597		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5598		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5599			sense_buf[7] = 10;	/* additional length */
5600
5601			/* IOARCB was in error */
5602			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5603				sense_buf[15] = 0xC0;
5604			else	/* Parameter data was invalid */
5605				sense_buf[15] = 0x80;
5606
5607			sense_buf[16] =
5608			    ((IPR_FIELD_POINTER_MASK &
5609			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5610			sense_buf[17] =
5611			    (IPR_FIELD_POINTER_MASK &
5612			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5613		} else {
5614			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5615				if (ipr_is_vset_device(res))
5616					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5617				else
5618					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5619
5620				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5621				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5622				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5623				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5624				sense_buf[6] = failing_lba & 0x000000ff;
5625			}
5626
5627			sense_buf[7] = 6;	/* additional length */
5628		}
5629	}
5630}
5631
5632/**
5633 * ipr_get_autosense - Copy autosense data to sense buffer
5634 * @ipr_cmd:	ipr command struct
5635 *
5636 * This function copies the autosense buffer to the buffer
5637 * in the scsi_cmd, if there is autosense available.
5638 *
5639 * Return value:
5640 *	1 if autosense was available / 0 if not
5641 **/
5642static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5643{
5644	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5645	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5646
5647	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5648		return 0;
5649
5650	if (ipr_cmd->ioa_cfg->sis64)
5651		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5652		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5653			   SCSI_SENSE_BUFFERSIZE));
5654	else
5655		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5656		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5657			   SCSI_SENSE_BUFFERSIZE));
5658	return 1;
5659}
5660
5661/**
5662 * ipr_erp_start - Process an error response for a SCSI op
5663 * @ioa_cfg:	ioa config struct
5664 * @ipr_cmd:	ipr command struct
5665 *
5666 * This function determines whether or not to initiate ERP
5667 * on the affected device.
5668 *
5669 * Return value:
5670 * 	nothing
5671 **/
5672static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5673			      struct ipr_cmnd *ipr_cmd)
5674{
5675	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5676	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5677	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5678	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5679
5680	if (!res) {
5681		ipr_scsi_eh_done(ipr_cmd);
5682		return;
5683	}
5684
5685	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5686		ipr_gen_sense(ipr_cmd);
5687
5688	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5689
5690	switch (masked_ioasc) {
5691	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5692		if (ipr_is_naca_model(res))
5693			scsi_cmd->result |= (DID_ABORT << 16);
5694		else
5695			scsi_cmd->result |= (DID_IMM_RETRY << 16);
5696		break;
5697	case IPR_IOASC_IR_RESOURCE_HANDLE:
5698	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5699		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5700		break;
5701	case IPR_IOASC_HW_SEL_TIMEOUT:
5702		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5703		if (!ipr_is_naca_model(res))
5704			res->needs_sync_complete = 1;
5705		break;
5706	case IPR_IOASC_SYNC_REQUIRED:
5707		if (!res->in_erp)
5708			res->needs_sync_complete = 1;
5709		scsi_cmd->result |= (DID_IMM_RETRY << 16);
5710		break;
5711	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5712	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5713		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5714		break;
5715	case IPR_IOASC_BUS_WAS_RESET:
5716	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5717		/*
5718		 * Report the bus reset and ask for a retry. The device
5719		 * will give CC/UA the next command.
5720		 */
5721		if (!res->resetting_device)
5722			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5723		scsi_cmd->result |= (DID_ERROR << 16);
5724		if (!ipr_is_naca_model(res))
5725			res->needs_sync_complete = 1;
5726		break;
5727	case IPR_IOASC_HW_DEV_BUS_STATUS:
5728		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5729		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5730			if (!ipr_get_autosense(ipr_cmd)) {
5731				if (!ipr_is_naca_model(res)) {
5732					ipr_erp_cancel_all(ipr_cmd);
5733					return;
5734				}
5735			}
5736		}
5737		if (!ipr_is_naca_model(res))
5738			res->needs_sync_complete = 1;
5739		break;
5740	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5741		break;
5742	default:
5743		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5744			scsi_cmd->result |= (DID_ERROR << 16);
5745		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5746			res->needs_sync_complete = 1;
5747		break;
5748	}
5749
5750	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5751	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5752	scsi_cmd->scsi_done(scsi_cmd);
5753}
5754
5755/**
5756 * ipr_scsi_done - mid-layer done function
5757 * @ipr_cmd:	ipr command struct
5758 *
5759 * This function is invoked by the interrupt handler for
5760 * ops generated by the SCSI mid-layer
5761 *
5762 * Return value:
5763 * 	none
5764 **/
5765static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5766{
5767	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5768	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5769	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5770
5771	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5772
5773	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5774		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5775		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5776		scsi_cmd->scsi_done(scsi_cmd);
5777	} else
5778		ipr_erp_start(ioa_cfg, ipr_cmd);
5779}
5780
5781/**
5782 * ipr_queuecommand - Queue a mid-layer request
5783 * @scsi_cmd:	scsi command struct
5784 * @done:		done function
5785 *
5786 * This function queues a request generated by the mid-layer.
5787 *
5788 * Return value:
5789 *	0 on success
5790 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5791 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5792 **/
5793static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5794			    void (*done) (struct scsi_cmnd *))
5795{
5796	struct ipr_ioa_cfg *ioa_cfg;
5797	struct ipr_resource_entry *res;
5798	struct ipr_ioarcb *ioarcb;
5799	struct ipr_cmnd *ipr_cmd;
5800	int rc = 0;
5801
5802	scsi_cmd->scsi_done = done;
5803	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5804	res = scsi_cmd->device->hostdata;
5805	scsi_cmd->result = (DID_OK << 16);
5806
5807	/*
5808	 * We are currently blocking all devices due to a host reset
5809	 * We have told the host to stop giving us new requests, but
5810	 * ERP ops don't count. FIXME
5811	 */
5812	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5813		return SCSI_MLQUEUE_HOST_BUSY;
5814
5815	/*
5816	 * FIXME - Create scsi_set_host_offline interface
5817	 *  and the ioa_is_dead check can be removed
5818	 */
5819	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5820		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5821		scsi_cmd->result = (DID_NO_CONNECT << 16);
5822		scsi_cmd->scsi_done(scsi_cmd);
5823		return 0;
5824	}
5825
5826	if (ipr_is_gata(res) && res->sata_port)
5827		return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5828
5829	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5830	ioarcb = &ipr_cmd->ioarcb;
5831	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5832
5833	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5834	ipr_cmd->scsi_cmd = scsi_cmd;
5835	ioarcb->res_handle = res->res_handle;
5836	ipr_cmd->done = ipr_scsi_done;
5837	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5838
5839	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5840		if (scsi_cmd->underflow == 0)
5841			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5842
5843		if (res->needs_sync_complete) {
5844			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5845			res->needs_sync_complete = 0;
5846		}
5847
5848		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5849		if (ipr_is_gscsi(res))
5850			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5851		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5852		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5853	}
5854
5855	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5856	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5857		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5858
5859	if (likely(rc == 0)) {
5860		if (ioa_cfg->sis64)
5861			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5862		else
5863			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5864	}
5865
5866	if (likely(rc == 0)) {
5867		mb();
5868		ipr_send_command(ipr_cmd);
5869	} else {
5870		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5871		 return SCSI_MLQUEUE_HOST_BUSY;
5872	}
5873
5874	return 0;
5875}
5876
5877static DEF_SCSI_QCMD(ipr_queuecommand)
5878
5879/**
5880 * ipr_ioctl - IOCTL handler
5881 * @sdev:	scsi device struct
5882 * @cmd:	IOCTL cmd
5883 * @arg:	IOCTL arg
5884 *
5885 * Return value:
5886 * 	0 on success / other on failure
5887 **/
5888static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5889{
5890	struct ipr_resource_entry *res;
5891
5892	res = (struct ipr_resource_entry *)sdev->hostdata;
5893	if (res && ipr_is_gata(res)) {
5894		if (cmd == HDIO_GET_IDENTITY)
5895			return -ENOTTY;
5896		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5897	}
5898
5899	return -EINVAL;
5900}
5901
5902/**
5903 * ipr_info - Get information about the card/driver
5904 * @scsi_host:	scsi host struct
5905 *
5906 * Return value:
5907 * 	pointer to buffer with description string
5908 **/
5909static const char * ipr_ioa_info(struct Scsi_Host *host)
5910{
5911	static char buffer[512];
5912	struct ipr_ioa_cfg *ioa_cfg;
5913	unsigned long lock_flags = 0;
5914
5915	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5916
5917	spin_lock_irqsave(host->host_lock, lock_flags);
5918	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5919	spin_unlock_irqrestore(host->host_lock, lock_flags);
5920
5921	return buffer;
5922}
5923
5924static struct scsi_host_template driver_template = {
5925	.module = THIS_MODULE,
5926	.name = "IPR",
5927	.info = ipr_ioa_info,
5928	.ioctl = ipr_ioctl,
5929	.queuecommand = ipr_queuecommand,
5930	.eh_abort_handler = ipr_eh_abort,
5931	.eh_device_reset_handler = ipr_eh_dev_reset,
5932	.eh_host_reset_handler = ipr_eh_host_reset,
5933	.slave_alloc = ipr_slave_alloc,
5934	.slave_configure = ipr_slave_configure,
5935	.slave_destroy = ipr_slave_destroy,
5936	.target_alloc = ipr_target_alloc,
5937	.target_destroy = ipr_target_destroy,
5938	.change_queue_depth = ipr_change_queue_depth,
5939	.change_queue_type = ipr_change_queue_type,
5940	.bios_param = ipr_biosparam,
5941	.can_queue = IPR_MAX_COMMANDS,
5942	.this_id = -1,
5943	.sg_tablesize = IPR_MAX_SGLIST,
5944	.max_sectors = IPR_IOA_MAX_SECTORS,
5945	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5946	.use_clustering = ENABLE_CLUSTERING,
5947	.shost_attrs = ipr_ioa_attrs,
5948	.sdev_attrs = ipr_dev_attrs,
5949	.proc_name = IPR_NAME
5950};
5951
5952/**
5953 * ipr_ata_phy_reset - libata phy_reset handler
5954 * @ap:		ata port to reset
5955 *
5956 **/
5957static void ipr_ata_phy_reset(struct ata_port *ap)
5958{
5959	unsigned long flags;
5960	struct ipr_sata_port *sata_port = ap->private_data;
5961	struct ipr_resource_entry *res = sata_port->res;
5962	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5963	int rc;
5964
5965	ENTER;
5966	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5967	while(ioa_cfg->in_reset_reload) {
5968		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5969		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5970		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5971	}
5972
5973	if (!ioa_cfg->allow_cmds)
5974		goto out_unlock;
5975
5976	rc = ipr_device_reset(ioa_cfg, res);
5977
5978	if (rc) {
5979		ap->link.device[0].class = ATA_DEV_NONE;
5980		goto out_unlock;
5981	}
5982
5983	ap->link.device[0].class = res->ata_class;
5984	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5985		ap->link.device[0].class = ATA_DEV_NONE;
5986
5987out_unlock:
5988	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5989	LEAVE;
5990}
5991
5992/**
5993 * ipr_ata_post_internal - Cleanup after an internal command
5994 * @qc:	ATA queued command
5995 *
5996 * Return value:
5997 * 	none
5998 **/
5999static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6000{
6001	struct ipr_sata_port *sata_port = qc->ap->private_data;
6002	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6003	struct ipr_cmnd *ipr_cmd;
6004	unsigned long flags;
6005
6006	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6007	while(ioa_cfg->in_reset_reload) {
6008		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6009		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6010		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6011	}
6012
6013	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6014		if (ipr_cmd->qc == qc) {
6015			ipr_device_reset(ioa_cfg, sata_port->res);
6016			break;
6017		}
6018	}
6019	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6020}
6021
6022/**
6023 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6024 * @regs:	destination
6025 * @tf:	source ATA taskfile
6026 *
6027 * Return value:
6028 * 	none
6029 **/
6030static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6031			     struct ata_taskfile *tf)
6032{
6033	regs->feature = tf->feature;
6034	regs->nsect = tf->nsect;
6035	regs->lbal = tf->lbal;
6036	regs->lbam = tf->lbam;
6037	regs->lbah = tf->lbah;
6038	regs->device = tf->device;
6039	regs->command = tf->command;
6040	regs->hob_feature = tf->hob_feature;
6041	regs->hob_nsect = tf->hob_nsect;
6042	regs->hob_lbal = tf->hob_lbal;
6043	regs->hob_lbam = tf->hob_lbam;
6044	regs->hob_lbah = tf->hob_lbah;
6045	regs->ctl = tf->ctl;
6046}
6047
6048/**
6049 * ipr_sata_done - done function for SATA commands
6050 * @ipr_cmd:	ipr command struct
6051 *
6052 * This function is invoked by the interrupt handler for
6053 * ops generated by the SCSI mid-layer to SATA devices
6054 *
6055 * Return value:
6056 * 	none
6057 **/
6058static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6059{
6060	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6061	struct ata_queued_cmd *qc = ipr_cmd->qc;
6062	struct ipr_sata_port *sata_port = qc->ap->private_data;
6063	struct ipr_resource_entry *res = sata_port->res;
6064	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6065
6066	if (ipr_cmd->ioa_cfg->sis64)
6067		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6068		       sizeof(struct ipr_ioasa_gata));
6069	else
6070		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6071		       sizeof(struct ipr_ioasa_gata));
6072	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6073
6074	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6075		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6076
6077	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6078		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6079	else
6080		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6081	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6082	ata_qc_complete(qc);
6083}
6084
6085/**
6086 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6087 * @ipr_cmd:	ipr command struct
6088 * @qc:		ATA queued command
6089 *
6090 **/
6091static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6092				  struct ata_queued_cmd *qc)
6093{
6094	u32 ioadl_flags = 0;
6095	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6096	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6097	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6098	int len = qc->nbytes;
6099	struct scatterlist *sg;
6100	unsigned int si;
6101	dma_addr_t dma_addr = ipr_cmd->dma_addr;
6102
6103	if (len == 0)
6104		return;
6105
6106	if (qc->dma_dir == DMA_TO_DEVICE) {
6107		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6108		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6109	} else if (qc->dma_dir == DMA_FROM_DEVICE)
6110		ioadl_flags = IPR_IOADL_FLAGS_READ;
6111
6112	ioarcb->data_transfer_length = cpu_to_be32(len);
6113	ioarcb->ioadl_len =
6114		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6115	ioarcb->u.sis64_addr_data.data_ioadl_addr =
6116		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6117
6118	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6119		ioadl64->flags = cpu_to_be32(ioadl_flags);
6120		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6121		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6122
6123		last_ioadl64 = ioadl64;
6124		ioadl64++;
6125	}
6126
6127	if (likely(last_ioadl64))
6128		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6129}
6130
6131/**
6132 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6133 * @ipr_cmd:	ipr command struct
6134 * @qc:		ATA queued command
6135 *
6136 **/
6137static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6138				struct ata_queued_cmd *qc)
6139{
6140	u32 ioadl_flags = 0;
6141	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6142	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6143	struct ipr_ioadl_desc *last_ioadl = NULL;
6144	int len = qc->nbytes;
6145	struct scatterlist *sg;
6146	unsigned int si;
6147
6148	if (len == 0)
6149		return;
6150
6151	if (qc->dma_dir == DMA_TO_DEVICE) {
6152		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6153		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6154		ioarcb->data_transfer_length = cpu_to_be32(len);
6155		ioarcb->ioadl_len =
6156			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6157	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6158		ioadl_flags = IPR_IOADL_FLAGS_READ;
6159		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6160		ioarcb->read_ioadl_len =
6161			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6162	}
6163
6164	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6165		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6166		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6167
6168		last_ioadl = ioadl;
6169		ioadl++;
6170	}
6171
6172	if (likely(last_ioadl))
6173		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6174}
6175
6176/**
6177 * ipr_qc_issue - Issue a SATA qc to a device
6178 * @qc:	queued command
6179 *
6180 * Return value:
6181 * 	0 if success
6182 **/
6183static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6184{
6185	struct ata_port *ap = qc->ap;
6186	struct ipr_sata_port *sata_port = ap->private_data;
6187	struct ipr_resource_entry *res = sata_port->res;
6188	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6189	struct ipr_cmnd *ipr_cmd;
6190	struct ipr_ioarcb *ioarcb;
6191	struct ipr_ioarcb_ata_regs *regs;
6192
6193	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6194		return AC_ERR_SYSTEM;
6195
6196	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6197	ioarcb = &ipr_cmd->ioarcb;
6198
6199	if (ioa_cfg->sis64) {
6200		regs = &ipr_cmd->i.ata_ioadl.regs;
6201		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6202	} else
6203		regs = &ioarcb->u.add_data.u.regs;
6204
6205	memset(regs, 0, sizeof(*regs));
6206	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6207
6208	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6209	ipr_cmd->qc = qc;
6210	ipr_cmd->done = ipr_sata_done;
6211	ipr_cmd->ioarcb.res_handle = res->res_handle;
6212	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6213	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6214	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6215	ipr_cmd->dma_use_sg = qc->n_elem;
6216
6217	if (ioa_cfg->sis64)
6218		ipr_build_ata_ioadl64(ipr_cmd, qc);
6219	else
6220		ipr_build_ata_ioadl(ipr_cmd, qc);
6221
6222	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6223	ipr_copy_sata_tf(regs, &qc->tf);
6224	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6225	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6226
6227	switch (qc->tf.protocol) {
6228	case ATA_PROT_NODATA:
6229	case ATA_PROT_PIO:
6230		break;
6231
6232	case ATA_PROT_DMA:
6233		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6234		break;
6235
6236	case ATAPI_PROT_PIO:
6237	case ATAPI_PROT_NODATA:
6238		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6239		break;
6240
6241	case ATAPI_PROT_DMA:
6242		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6243		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6244		break;
6245
6246	default:
6247		WARN_ON(1);
6248		return AC_ERR_INVALID;
6249	}
6250
6251	mb();
6252
6253	ipr_send_command(ipr_cmd);
6254
6255	return 0;
6256}
6257
6258/**
6259 * ipr_qc_fill_rtf - Read result TF
6260 * @qc: ATA queued command
6261 *
6262 * Return value:
6263 * 	true
6264 **/
6265static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6266{
6267	struct ipr_sata_port *sata_port = qc->ap->private_data;
6268	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6269	struct ata_taskfile *tf = &qc->result_tf;
6270
6271	tf->feature = g->error;
6272	tf->nsect = g->nsect;
6273	tf->lbal = g->lbal;
6274	tf->lbam = g->lbam;
6275	tf->lbah = g->lbah;
6276	tf->device = g->device;
6277	tf->command = g->status;
6278	tf->hob_nsect = g->hob_nsect;
6279	tf->hob_lbal = g->hob_lbal;
6280	tf->hob_lbam = g->hob_lbam;
6281	tf->hob_lbah = g->hob_lbah;
6282	tf->ctl = g->alt_status;
6283
6284	return true;
6285}
6286
6287static struct ata_port_operations ipr_sata_ops = {
6288	.phy_reset = ipr_ata_phy_reset,
6289	.hardreset = ipr_sata_reset,
6290	.post_internal_cmd = ipr_ata_post_internal,
6291	.qc_prep = ata_noop_qc_prep,
6292	.qc_issue = ipr_qc_issue,
6293	.qc_fill_rtf = ipr_qc_fill_rtf,
6294	.port_start = ata_sas_port_start,
6295	.port_stop = ata_sas_port_stop
6296};
6297
6298static struct ata_port_info sata_port_info = {
6299	.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6300	.pio_mask	= ATA_PIO4_ONLY,
6301	.mwdma_mask	= ATA_MWDMA2,
6302	.udma_mask	= ATA_UDMA6,
6303	.port_ops	= &ipr_sata_ops
6304};
6305
6306#ifdef CONFIG_PPC_PSERIES
6307static const u16 ipr_blocked_processors[] = {
6308	PV_NORTHSTAR,
6309	PV_PULSAR,
6310	PV_POWER4,
6311	PV_ICESTAR,
6312	PV_SSTAR,
6313	PV_POWER4p,
6314	PV_630,
6315	PV_630p
6316};
6317
6318/**
6319 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6320 * @ioa_cfg:	ioa cfg struct
6321 *
6322 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6323 * certain pSeries hardware. This function determines if the given
6324 * adapter is in one of these confgurations or not.
6325 *
6326 * Return value:
6327 * 	1 if adapter is not supported / 0 if adapter is supported
6328 **/
6329static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6330{
6331	int i;
6332
6333	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6334		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6335			if (__is_processor(ipr_blocked_processors[i]))
6336				return 1;
6337		}
6338	}
6339	return 0;
6340}
6341#else
6342#define ipr_invalid_adapter(ioa_cfg) 0
6343#endif
6344
6345/**
6346 * ipr_ioa_bringdown_done - IOA bring down completion.
6347 * @ipr_cmd:	ipr command struct
6348 *
6349 * This function processes the completion of an adapter bring down.
6350 * It wakes any reset sleepers.
6351 *
6352 * Return value:
6353 * 	IPR_RC_JOB_RETURN
6354 **/
6355static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6356{
6357	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6358
6359	ENTER;
6360	ioa_cfg->in_reset_reload = 0;
6361	ioa_cfg->reset_retries = 0;
6362	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6363	wake_up_all(&ioa_cfg->reset_wait_q);
6364
6365	spin_unlock_irq(ioa_cfg->host->host_lock);
6366	scsi_unblock_requests(ioa_cfg->host);
6367	spin_lock_irq(ioa_cfg->host->host_lock);
6368	LEAVE;
6369
6370	return IPR_RC_JOB_RETURN;
6371}
6372
6373/**
6374 * ipr_ioa_reset_done - IOA reset completion.
6375 * @ipr_cmd:	ipr command struct
6376 *
6377 * This function processes the completion of an adapter reset.
6378 * It schedules any necessary mid-layer add/removes and
6379 * wakes any reset sleepers.
6380 *
6381 * Return value:
6382 * 	IPR_RC_JOB_RETURN
6383 **/
6384static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6385{
6386	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6387	struct ipr_resource_entry *res;
6388	struct ipr_hostrcb *hostrcb, *temp;
6389	int i = 0;
6390
6391	ENTER;
6392	ioa_cfg->in_reset_reload = 0;
6393	ioa_cfg->allow_cmds = 1;
6394	ioa_cfg->reset_cmd = NULL;
6395	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6396
6397	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6398		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6399			ipr_trace;
6400			break;
6401		}
6402	}
6403	schedule_work(&ioa_cfg->work_q);
6404
6405	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6406		list_del(&hostrcb->queue);
6407		if (i++ < IPR_NUM_LOG_HCAMS)
6408			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6409		else
6410			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6411	}
6412
6413	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6414	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6415
6416	ioa_cfg->reset_retries = 0;
6417	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6418	wake_up_all(&ioa_cfg->reset_wait_q);
6419
6420	spin_unlock(ioa_cfg->host->host_lock);
6421	scsi_unblock_requests(ioa_cfg->host);
6422	spin_lock(ioa_cfg->host->host_lock);
6423
6424	if (!ioa_cfg->allow_cmds)
6425		scsi_block_requests(ioa_cfg->host);
6426
6427	LEAVE;
6428	return IPR_RC_JOB_RETURN;
6429}
6430
6431/**
6432 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6433 * @supported_dev:	supported device struct
6434 * @vpids:			vendor product id struct
6435 *
6436 * Return value:
6437 * 	none
6438 **/
6439static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6440				 struct ipr_std_inq_vpids *vpids)
6441{
6442	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6443	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6444	supported_dev->num_records = 1;
6445	supported_dev->data_length =
6446		cpu_to_be16(sizeof(struct ipr_supported_device));
6447	supported_dev->reserved = 0;
6448}
6449
6450/**
6451 * ipr_set_supported_devs - Send Set Supported Devices for a device
6452 * @ipr_cmd:	ipr command struct
6453 *
6454 * This function sends a Set Supported Devices to the adapter
6455 *
6456 * Return value:
6457 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6458 **/
6459static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6460{
6461	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6462	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6463	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6464	struct ipr_resource_entry *res = ipr_cmd->u.res;
6465
6466	ipr_cmd->job_step = ipr_ioa_reset_done;
6467
6468	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6469		if (!ipr_is_scsi_disk(res))
6470			continue;
6471
6472		ipr_cmd->u.res = res;
6473		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6474
6475		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6476		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6477		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6478
6479		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6480		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6481		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6482		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6483
6484		ipr_init_ioadl(ipr_cmd,
6485			       ioa_cfg->vpd_cbs_dma +
6486				 offsetof(struct ipr_misc_cbs, supp_dev),
6487			       sizeof(struct ipr_supported_device),
6488			       IPR_IOADL_FLAGS_WRITE_LAST);
6489
6490		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6491			   IPR_SET_SUP_DEVICE_TIMEOUT);
6492
6493		if (!ioa_cfg->sis64)
6494			ipr_cmd->job_step = ipr_set_supported_devs;
6495		return IPR_RC_JOB_RETURN;
6496	}
6497
6498	return IPR_RC_JOB_CONTINUE;
6499}
6500
6501/**
6502 * ipr_get_mode_page - Locate specified mode page
6503 * @mode_pages:	mode page buffer
6504 * @page_code:	page code to find
6505 * @len:		minimum required length for mode page
6506 *
6507 * Return value:
6508 * 	pointer to mode page / NULL on failure
6509 **/
6510static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6511			       u32 page_code, u32 len)
6512{
6513	struct ipr_mode_page_hdr *mode_hdr;
6514	u32 page_length;
6515	u32 length;
6516
6517	if (!mode_pages || (mode_pages->hdr.length == 0))
6518		return NULL;
6519
6520	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6521	mode_hdr = (struct ipr_mode_page_hdr *)
6522		(mode_pages->data + mode_pages->hdr.block_desc_len);
6523
6524	while (length) {
6525		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6526			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6527				return mode_hdr;
6528			break;
6529		} else {
6530			page_length = (sizeof(struct ipr_mode_page_hdr) +
6531				       mode_hdr->page_length);
6532			length -= page_length;
6533			mode_hdr = (struct ipr_mode_page_hdr *)
6534				((unsigned long)mode_hdr + page_length);
6535		}
6536	}
6537	return NULL;
6538}
6539
6540/**
6541 * ipr_check_term_power - Check for term power errors
6542 * @ioa_cfg:	ioa config struct
6543 * @mode_pages:	IOAFP mode pages buffer
6544 *
6545 * Check the IOAFP's mode page 28 for term power errors
6546 *
6547 * Return value:
6548 * 	nothing
6549 **/
6550static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6551				 struct ipr_mode_pages *mode_pages)
6552{
6553	int i;
6554	int entry_length;
6555	struct ipr_dev_bus_entry *bus;
6556	struct ipr_mode_page28 *mode_page;
6557
6558	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6559				      sizeof(struct ipr_mode_page28));
6560
6561	entry_length = mode_page->entry_length;
6562
6563	bus = mode_page->bus;
6564
6565	for (i = 0; i < mode_page->num_entries; i++) {
6566		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6567			dev_err(&ioa_cfg->pdev->dev,
6568				"Term power is absent on scsi bus %d\n",
6569				bus->res_addr.bus);
6570		}
6571
6572		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6573	}
6574}
6575
6576/**
6577 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6578 * @ioa_cfg:	ioa config struct
6579 *
6580 * Looks through the config table checking for SES devices. If
6581 * the SES device is in the SES table indicating a maximum SCSI
6582 * bus speed, the speed is limited for the bus.
6583 *
6584 * Return value:
6585 * 	none
6586 **/
6587static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6588{
6589	u32 max_xfer_rate;
6590	int i;
6591
6592	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6593		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6594						       ioa_cfg->bus_attr[i].bus_width);
6595
6596		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6597			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6598	}
6599}
6600
6601/**
6602 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6603 * @ioa_cfg:	ioa config struct
6604 * @mode_pages:	mode page 28 buffer
6605 *
6606 * Updates mode page 28 based on driver configuration
6607 *
6608 * Return value:
6609 * 	none
6610 **/
6611static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6612					  	struct ipr_mode_pages *mode_pages)
6613{
6614	int i, entry_length;
6615	struct ipr_dev_bus_entry *bus;
6616	struct ipr_bus_attributes *bus_attr;
6617	struct ipr_mode_page28 *mode_page;
6618
6619	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6620				      sizeof(struct ipr_mode_page28));
6621
6622	entry_length = mode_page->entry_length;
6623
6624	/* Loop for each device bus entry */
6625	for (i = 0, bus = mode_page->bus;
6626	     i < mode_page->num_entries;
6627	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6628		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6629			dev_err(&ioa_cfg->pdev->dev,
6630				"Invalid resource address reported: 0x%08X\n",
6631				IPR_GET_PHYS_LOC(bus->res_addr));
6632			continue;
6633		}
6634
6635		bus_attr = &ioa_cfg->bus_attr[i];
6636		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6637		bus->bus_width = bus_attr->bus_width;
6638		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6639		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6640		if (bus_attr->qas_enabled)
6641			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6642		else
6643			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6644	}
6645}
6646
6647/**
6648 * ipr_build_mode_select - Build a mode select command
6649 * @ipr_cmd:	ipr command struct
6650 * @res_handle:	resource handle to send command to
6651 * @parm:		Byte 2 of Mode Sense command
6652 * @dma_addr:	DMA buffer address
6653 * @xfer_len:	data transfer length
6654 *
6655 * Return value:
6656 * 	none
6657 **/
6658static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6659				  __be32 res_handle, u8 parm,
6660				  dma_addr_t dma_addr, u8 xfer_len)
6661{
6662	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6663
6664	ioarcb->res_handle = res_handle;
6665	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6666	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6667	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6668	ioarcb->cmd_pkt.cdb[1] = parm;
6669	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6670
6671	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6672}
6673
6674/**
6675 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6676 * @ipr_cmd:	ipr command struct
6677 *
6678 * This function sets up the SCSI bus attributes and sends
6679 * a Mode Select for Page 28 to activate them.
6680 *
6681 * Return value:
6682 * 	IPR_RC_JOB_RETURN
6683 **/
6684static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6685{
6686	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6687	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6688	int length;
6689
6690	ENTER;
6691	ipr_scsi_bus_speed_limit(ioa_cfg);
6692	ipr_check_term_power(ioa_cfg, mode_pages);
6693	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6694	length = mode_pages->hdr.length + 1;
6695	mode_pages->hdr.length = 0;
6696
6697	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6698			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6699			      length);
6700
6701	ipr_cmd->job_step = ipr_set_supported_devs;
6702	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6703				    struct ipr_resource_entry, queue);
6704	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6705
6706	LEAVE;
6707	return IPR_RC_JOB_RETURN;
6708}
6709
6710/**
6711 * ipr_build_mode_sense - Builds a mode sense command
6712 * @ipr_cmd:	ipr command struct
6713 * @res:		resource entry struct
6714 * @parm:		Byte 2 of mode sense command
6715 * @dma_addr:	DMA address of mode sense buffer
6716 * @xfer_len:	Size of DMA buffer
6717 *
6718 * Return value:
6719 * 	none
6720 **/
6721static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6722				 __be32 res_handle,
6723				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6724{
6725	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6726
6727	ioarcb->res_handle = res_handle;
6728	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6729	ioarcb->cmd_pkt.cdb[2] = parm;
6730	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6731	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6732
6733	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6734}
6735
6736/**
6737 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6738 * @ipr_cmd:	ipr command struct
6739 *
6740 * This function handles the failure of an IOA bringup command.
6741 *
6742 * Return value:
6743 * 	IPR_RC_JOB_RETURN
6744 **/
6745static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6746{
6747	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6748	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6749
6750	dev_err(&ioa_cfg->pdev->dev,
6751		"0x%02X failed with IOASC: 0x%08X\n",
6752		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6753
6754	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6755	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6756	return IPR_RC_JOB_RETURN;
6757}
6758
6759/**
6760 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6761 * @ipr_cmd:	ipr command struct
6762 *
6763 * This function handles the failure of a Mode Sense to the IOAFP.
6764 * Some adapters do not handle all mode pages.
6765 *
6766 * Return value:
6767 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6768 **/
6769static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6770{
6771	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6772	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6773
6774	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6775		ipr_cmd->job_step = ipr_set_supported_devs;
6776		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6777					    struct ipr_resource_entry, queue);
6778		return IPR_RC_JOB_CONTINUE;
6779	}
6780
6781	return ipr_reset_cmd_failed(ipr_cmd);
6782}
6783
6784/**
6785 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6786 * @ipr_cmd:	ipr command struct
6787 *
6788 * This function send a Page 28 mode sense to the IOA to
6789 * retrieve SCSI bus attributes.
6790 *
6791 * Return value:
6792 * 	IPR_RC_JOB_RETURN
6793 **/
6794static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6795{
6796	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6797
6798	ENTER;
6799	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6800			     0x28, ioa_cfg->vpd_cbs_dma +
6801			     offsetof(struct ipr_misc_cbs, mode_pages),
6802			     sizeof(struct ipr_mode_pages));
6803
6804	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6805	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6806
6807	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6808
6809	LEAVE;
6810	return IPR_RC_JOB_RETURN;
6811}
6812
6813/**
6814 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6815 * @ipr_cmd:	ipr command struct
6816 *
6817 * This function enables dual IOA RAID support if possible.
6818 *
6819 * Return value:
6820 * 	IPR_RC_JOB_RETURN
6821 **/
6822static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6823{
6824	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6825	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6826	struct ipr_mode_page24 *mode_page;
6827	int length;
6828
6829	ENTER;
6830	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6831				      sizeof(struct ipr_mode_page24));
6832
6833	if (mode_page)
6834		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6835
6836	length = mode_pages->hdr.length + 1;
6837	mode_pages->hdr.length = 0;
6838
6839	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6840			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6841			      length);
6842
6843	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6844	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6845
6846	LEAVE;
6847	return IPR_RC_JOB_RETURN;
6848}
6849
6850/**
6851 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6852 * @ipr_cmd:	ipr command struct
6853 *
6854 * This function handles the failure of a Mode Sense to the IOAFP.
6855 * Some adapters do not handle all mode pages.
6856 *
6857 * Return value:
6858 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6859 **/
6860static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6861{
6862	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6863
6864	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6865		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6866		return IPR_RC_JOB_CONTINUE;
6867	}
6868
6869	return ipr_reset_cmd_failed(ipr_cmd);
6870}
6871
6872/**
6873 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6874 * @ipr_cmd:	ipr command struct
6875 *
6876 * This function send a mode sense to the IOA to retrieve
6877 * the IOA Advanced Function Control mode page.
6878 *
6879 * Return value:
6880 * 	IPR_RC_JOB_RETURN
6881 **/
6882static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6883{
6884	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6885
6886	ENTER;
6887	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6888			     0x24, ioa_cfg->vpd_cbs_dma +
6889			     offsetof(struct ipr_misc_cbs, mode_pages),
6890			     sizeof(struct ipr_mode_pages));
6891
6892	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6893	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6894
6895	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6896
6897	LEAVE;
6898	return IPR_RC_JOB_RETURN;
6899}
6900
6901/**
6902 * ipr_init_res_table - Initialize the resource table
6903 * @ipr_cmd:	ipr command struct
6904 *
6905 * This function looks through the existing resource table, comparing
6906 * it with the config table. This function will take care of old/new
6907 * devices and schedule adding/removing them from the mid-layer
6908 * as appropriate.
6909 *
6910 * Return value:
6911 * 	IPR_RC_JOB_CONTINUE
6912 **/
6913static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6914{
6915	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6916	struct ipr_resource_entry *res, *temp;
6917	struct ipr_config_table_entry_wrapper cfgtew;
6918	int entries, found, flag, i;
6919	LIST_HEAD(old_res);
6920
6921	ENTER;
6922	if (ioa_cfg->sis64)
6923		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6924	else
6925		flag = ioa_cfg->u.cfg_table->hdr.flags;
6926
6927	if (flag & IPR_UCODE_DOWNLOAD_REQ)
6928		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6929
6930	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6931		list_move_tail(&res->queue, &old_res);
6932
6933	if (ioa_cfg->sis64)
6934		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6935	else
6936		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6937
6938	for (i = 0; i < entries; i++) {
6939		if (ioa_cfg->sis64)
6940			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6941		else
6942			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6943		found = 0;
6944
6945		list_for_each_entry_safe(res, temp, &old_res, queue) {
6946			if (ipr_is_same_device(res, &cfgtew)) {
6947				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6948				found = 1;
6949				break;
6950			}
6951		}
6952
6953		if (!found) {
6954			if (list_empty(&ioa_cfg->free_res_q)) {
6955				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6956				break;
6957			}
6958
6959			found = 1;
6960			res = list_entry(ioa_cfg->free_res_q.next,
6961					 struct ipr_resource_entry, queue);
6962			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6963			ipr_init_res_entry(res, &cfgtew);
6964			res->add_to_ml = 1;
6965		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6966			res->sdev->allow_restart = 1;
6967
6968		if (found)
6969			ipr_update_res_entry(res, &cfgtew);
6970	}
6971
6972	list_for_each_entry_safe(res, temp, &old_res, queue) {
6973		if (res->sdev) {
6974			res->del_from_ml = 1;
6975			res->res_handle = IPR_INVALID_RES_HANDLE;
6976			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6977		}
6978	}
6979
6980	list_for_each_entry_safe(res, temp, &old_res, queue) {
6981		ipr_clear_res_target(res);
6982		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6983	}
6984
6985	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6986		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6987	else
6988		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6989
6990	LEAVE;
6991	return IPR_RC_JOB_CONTINUE;
6992}
6993
6994/**
6995 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6996 * @ipr_cmd:	ipr command struct
6997 *
6998 * This function sends a Query IOA Configuration command
6999 * to the adapter to retrieve the IOA configuration table.
7000 *
7001 * Return value:
7002 * 	IPR_RC_JOB_RETURN
7003 **/
7004static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7005{
7006	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7007	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7008	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7009	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7010
7011	ENTER;
7012	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7013		ioa_cfg->dual_raid = 1;
7014	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7015		 ucode_vpd->major_release, ucode_vpd->card_type,
7016		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7017	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7018	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7019
7020	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7021	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7022	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7023	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7024
7025	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7026		       IPR_IOADL_FLAGS_READ_LAST);
7027
7028	ipr_cmd->job_step = ipr_init_res_table;
7029
7030	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7031
7032	LEAVE;
7033	return IPR_RC_JOB_RETURN;
7034}
7035
7036/**
7037 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7038 * @ipr_cmd:	ipr command struct
7039 *
7040 * This utility function sends an inquiry to the adapter.
7041 *
7042 * Return value:
7043 * 	none
7044 **/
7045static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7046			      dma_addr_t dma_addr, u8 xfer_len)
7047{
7048	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7049
7050	ENTER;
7051	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7052	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7053
7054	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7055	ioarcb->cmd_pkt.cdb[1] = flags;
7056	ioarcb->cmd_pkt.cdb[2] = page;
7057	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7058
7059	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7060
7061	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7062	LEAVE;
7063}
7064
7065/**
7066 * ipr_inquiry_page_supported - Is the given inquiry page supported
7067 * @page0:		inquiry page 0 buffer
7068 * @page:		page code.
7069 *
7070 * This function determines if the specified inquiry page is supported.
7071 *
7072 * Return value:
7073 *	1 if page is supported / 0 if not
7074 **/
7075static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7076{
7077	int i;
7078
7079	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7080		if (page0->page[i] == page)
7081			return 1;
7082
7083	return 0;
7084}
7085
7086/**
7087 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7088 * @ipr_cmd:	ipr command struct
7089 *
7090 * This function sends a Page 0xD0 inquiry to the adapter
7091 * to retrieve adapter capabilities.
7092 *
7093 * Return value:
7094 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7095 **/
7096static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7097{
7098	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7099	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7100	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7101
7102	ENTER;
7103	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7104	memset(cap, 0, sizeof(*cap));
7105
7106	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7107		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7108				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7109				  sizeof(struct ipr_inquiry_cap));
7110		return IPR_RC_JOB_RETURN;
7111	}
7112
7113	LEAVE;
7114	return IPR_RC_JOB_CONTINUE;
7115}
7116
7117/**
7118 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7119 * @ipr_cmd:	ipr command struct
7120 *
7121 * This function sends a Page 3 inquiry to the adapter
7122 * to retrieve software VPD information.
7123 *
7124 * Return value:
7125 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7126 **/
7127static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7128{
7129	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7130
7131	ENTER;
7132
7133	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7134
7135	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7136			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7137			  sizeof(struct ipr_inquiry_page3));
7138
7139	LEAVE;
7140	return IPR_RC_JOB_RETURN;
7141}
7142
7143/**
7144 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7145 * @ipr_cmd:	ipr command struct
7146 *
7147 * This function sends a Page 0 inquiry to the adapter
7148 * to retrieve supported inquiry pages.
7149 *
7150 * Return value:
7151 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7152 **/
7153static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7154{
7155	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7156	char type[5];
7157
7158	ENTER;
7159
7160	/* Grab the type out of the VPD and store it away */
7161	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7162	type[4] = '\0';
7163	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7164
7165	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7166
7167	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7168			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7169			  sizeof(struct ipr_inquiry_page0));
7170
7171	LEAVE;
7172	return IPR_RC_JOB_RETURN;
7173}
7174
7175/**
7176 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7177 * @ipr_cmd:	ipr command struct
7178 *
7179 * This function sends a standard inquiry to the adapter.
7180 *
7181 * Return value:
7182 * 	IPR_RC_JOB_RETURN
7183 **/
7184static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7185{
7186	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7187
7188	ENTER;
7189	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7190
7191	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7192			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7193			  sizeof(struct ipr_ioa_vpd));
7194
7195	LEAVE;
7196	return IPR_RC_JOB_RETURN;
7197}
7198
7199/**
7200 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7201 * @ipr_cmd:	ipr command struct
7202 *
7203 * This function send an Identify Host Request Response Queue
7204 * command to establish the HRRQ with the adapter.
7205 *
7206 * Return value:
7207 * 	IPR_RC_JOB_RETURN
7208 **/
7209static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7210{
7211	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7212	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7213
7214	ENTER;
7215	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7216
7217	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7218	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7219
7220	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7221	if (ioa_cfg->sis64)
7222		ioarcb->cmd_pkt.cdb[1] = 0x1;
7223	ioarcb->cmd_pkt.cdb[2] =
7224		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7225	ioarcb->cmd_pkt.cdb[3] =
7226		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7227	ioarcb->cmd_pkt.cdb[4] =
7228		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7229	ioarcb->cmd_pkt.cdb[5] =
7230		((u64) ioa_cfg->host_rrq_dma) & 0xff;
7231	ioarcb->cmd_pkt.cdb[7] =
7232		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7233	ioarcb->cmd_pkt.cdb[8] =
7234		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7235
7236	if (ioa_cfg->sis64) {
7237		ioarcb->cmd_pkt.cdb[10] =
7238			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7239		ioarcb->cmd_pkt.cdb[11] =
7240			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7241		ioarcb->cmd_pkt.cdb[12] =
7242			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7243		ioarcb->cmd_pkt.cdb[13] =
7244			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7245	}
7246
7247	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7248
7249	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7250
7251	LEAVE;
7252	return IPR_RC_JOB_RETURN;
7253}
7254
7255/**
7256 * ipr_reset_timer_done - Adapter reset timer function
7257 * @ipr_cmd:	ipr command struct
7258 *
7259 * Description: This function is used in adapter reset processing
7260 * for timing events. If the reset_cmd pointer in the IOA
7261 * config struct is not this adapter's we are doing nested
7262 * resets and fail_all_ops will take care of freeing the
7263 * command block.
7264 *
7265 * Return value:
7266 * 	none
7267 **/
7268static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7269{
7270	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7271	unsigned long lock_flags = 0;
7272
7273	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7274
7275	if (ioa_cfg->reset_cmd == ipr_cmd) {
7276		list_del(&ipr_cmd->queue);
7277		ipr_cmd->done(ipr_cmd);
7278	}
7279
7280	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7281}
7282
7283/**
7284 * ipr_reset_start_timer - Start a timer for adapter reset job
7285 * @ipr_cmd:	ipr command struct
7286 * @timeout:	timeout value
7287 *
7288 * Description: This function is used in adapter reset processing
7289 * for timing events. If the reset_cmd pointer in the IOA
7290 * config struct is not this adapter's we are doing nested
7291 * resets and fail_all_ops will take care of freeing the
7292 * command block.
7293 *
7294 * Return value:
7295 * 	none
7296 **/
7297static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7298				  unsigned long timeout)
7299{
7300	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7301	ipr_cmd->done = ipr_reset_ioa_job;
7302
7303	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7304	ipr_cmd->timer.expires = jiffies + timeout;
7305	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7306	add_timer(&ipr_cmd->timer);
7307}
7308
7309/**
7310 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7311 * @ioa_cfg:	ioa cfg struct
7312 *
7313 * Return value:
7314 * 	nothing
7315 **/
7316static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7317{
7318	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7319
7320	/* Initialize Host RRQ pointers */
7321	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7322	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7323	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7324	ioa_cfg->toggle_bit = 1;
7325
7326	/* Zero out config table */
7327	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7328}
7329
7330/**
7331 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7332 * @ipr_cmd:	ipr command struct
7333 *
7334 * Return value:
7335 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7336 **/
7337static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7338{
7339	unsigned long stage, stage_time;
7340	u32 feedback;
7341	volatile u32 int_reg;
7342	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7343	u64 maskval = 0;
7344
7345	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7346	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7347	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7348
7349	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7350
7351	/* sanity check the stage_time value */
7352	if (stage_time == 0)
7353		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7354	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7355		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7356	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7357		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7358
7359	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7360		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7361		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7362		stage_time = ioa_cfg->transop_timeout;
7363		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7364	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7365		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7366		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7367			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7368			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7369			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7370			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7371			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7372			return IPR_RC_JOB_CONTINUE;
7373		}
7374	}
7375
7376	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7377	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7378	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7379	ipr_cmd->done = ipr_reset_ioa_job;
7380	add_timer(&ipr_cmd->timer);
7381	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7382
7383	return IPR_RC_JOB_RETURN;
7384}
7385
7386/**
7387 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7388 * @ipr_cmd:	ipr command struct
7389 *
7390 * This function reinitializes some control blocks and
7391 * enables destructive diagnostics on the adapter.
7392 *
7393 * Return value:
7394 * 	IPR_RC_JOB_RETURN
7395 **/
7396static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7397{
7398	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7399	volatile u32 int_reg;
7400	volatile u64 maskval;
7401
7402	ENTER;
7403	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7404	ipr_init_ioa_mem(ioa_cfg);
7405
7406	ioa_cfg->allow_interrupts = 1;
7407	if (ioa_cfg->sis64) {
7408		/* Set the adapter to the correct endian mode. */
7409		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7410		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7411	}
7412
7413	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7414
7415	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7416		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7417		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7418		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7419		return IPR_RC_JOB_CONTINUE;
7420	}
7421
7422	/* Enable destructive diagnostics on IOA */
7423	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7424
7425	if (ioa_cfg->sis64) {
7426		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7427		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7428		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7429	} else
7430		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7431
7432	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7433
7434	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7435
7436	if (ioa_cfg->sis64) {
7437		ipr_cmd->job_step = ipr_reset_next_stage;
7438		return IPR_RC_JOB_CONTINUE;
7439	}
7440
7441	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7442	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7443	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7444	ipr_cmd->done = ipr_reset_ioa_job;
7445	add_timer(&ipr_cmd->timer);
7446	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7447
7448	LEAVE;
7449	return IPR_RC_JOB_RETURN;
7450}
7451
7452/**
7453 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7454 * @ipr_cmd:	ipr command struct
7455 *
7456 * This function is invoked when an adapter dump has run out
7457 * of processing time.
7458 *
7459 * Return value:
7460 * 	IPR_RC_JOB_CONTINUE
7461 **/
7462static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7463{
7464	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7465
7466	if (ioa_cfg->sdt_state == GET_DUMP)
7467		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7468	else if (ioa_cfg->sdt_state == READ_DUMP)
7469		ioa_cfg->sdt_state = ABORT_DUMP;
7470
7471	ioa_cfg->dump_timeout = 1;
7472	ipr_cmd->job_step = ipr_reset_alert;
7473
7474	return IPR_RC_JOB_CONTINUE;
7475}
7476
7477/**
7478 * ipr_unit_check_no_data - Log a unit check/no data error log
7479 * @ioa_cfg:		ioa config struct
7480 *
7481 * Logs an error indicating the adapter unit checked, but for some
7482 * reason, we were unable to fetch the unit check buffer.
7483 *
7484 * Return value:
7485 * 	nothing
7486 **/
7487static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7488{
7489	ioa_cfg->errors_logged++;
7490	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7491}
7492
7493/**
7494 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7495 * @ioa_cfg:		ioa config struct
7496 *
7497 * Fetches the unit check buffer from the adapter by clocking the data
7498 * through the mailbox register.
7499 *
7500 * Return value:
7501 * 	nothing
7502 **/
7503static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7504{
7505	unsigned long mailbox;
7506	struct ipr_hostrcb *hostrcb;
7507	struct ipr_uc_sdt sdt;
7508	int rc, length;
7509	u32 ioasc;
7510
7511	mailbox = readl(ioa_cfg->ioa_mailbox);
7512
7513	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7514		ipr_unit_check_no_data(ioa_cfg);
7515		return;
7516	}
7517
7518	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7519	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7520					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7521
7522	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7523	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7524	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7525		ipr_unit_check_no_data(ioa_cfg);
7526		return;
7527	}
7528
7529	/* Find length of the first sdt entry (UC buffer) */
7530	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7531		length = be32_to_cpu(sdt.entry[0].end_token);
7532	else
7533		length = (be32_to_cpu(sdt.entry[0].end_token) -
7534			  be32_to_cpu(sdt.entry[0].start_token)) &
7535			  IPR_FMT2_MBX_ADDR_MASK;
7536
7537	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7538			     struct ipr_hostrcb, queue);
7539	list_del(&hostrcb->queue);
7540	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7541
7542	rc = ipr_get_ldump_data_section(ioa_cfg,
7543					be32_to_cpu(sdt.entry[0].start_token),
7544					(__be32 *)&hostrcb->hcam,
7545					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7546
7547	if (!rc) {
7548		ipr_handle_log_data(ioa_cfg, hostrcb);
7549		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7550		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7551		    ioa_cfg->sdt_state == GET_DUMP)
7552			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7553	} else
7554		ipr_unit_check_no_data(ioa_cfg);
7555
7556	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7557}
7558
7559/**
7560 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7561 * @ipr_cmd:	ipr command struct
7562 *
7563 * Description: This function will call to get the unit check buffer.
7564 *
7565 * Return value:
7566 *	IPR_RC_JOB_RETURN
7567 **/
7568static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7569{
7570	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7571
7572	ENTER;
7573	ioa_cfg->ioa_unit_checked = 0;
7574	ipr_get_unit_check_buffer(ioa_cfg);
7575	ipr_cmd->job_step = ipr_reset_alert;
7576	ipr_reset_start_timer(ipr_cmd, 0);
7577
7578	LEAVE;
7579	return IPR_RC_JOB_RETURN;
7580}
7581
7582/**
7583 * ipr_reset_restore_cfg_space - Restore PCI config space.
7584 * @ipr_cmd:	ipr command struct
7585 *
7586 * Description: This function restores the saved PCI config space of
7587 * the adapter, fails all outstanding ops back to the callers, and
7588 * fetches the dump/unit check if applicable to this reset.
7589 *
7590 * Return value:
7591 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7592 **/
7593static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7594{
7595	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7596	u32 int_reg;
7597
7598	ENTER;
7599	ioa_cfg->pdev->state_saved = true;
7600	pci_restore_state(ioa_cfg->pdev);
7601
7602	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7603		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7604		return IPR_RC_JOB_CONTINUE;
7605	}
7606
7607	ipr_fail_all_ops(ioa_cfg);
7608
7609	if (ioa_cfg->sis64) {
7610		/* Set the adapter to the correct endian mode. */
7611		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7612		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7613	}
7614
7615	if (ioa_cfg->ioa_unit_checked) {
7616		if (ioa_cfg->sis64) {
7617			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7618			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7619			return IPR_RC_JOB_RETURN;
7620		} else {
7621			ioa_cfg->ioa_unit_checked = 0;
7622			ipr_get_unit_check_buffer(ioa_cfg);
7623			ipr_cmd->job_step = ipr_reset_alert;
7624			ipr_reset_start_timer(ipr_cmd, 0);
7625			return IPR_RC_JOB_RETURN;
7626		}
7627	}
7628
7629	if (ioa_cfg->in_ioa_bringdown) {
7630		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7631	} else {
7632		ipr_cmd->job_step = ipr_reset_enable_ioa;
7633
7634		if (GET_DUMP == ioa_cfg->sdt_state) {
7635			ioa_cfg->sdt_state = READ_DUMP;
7636			ioa_cfg->dump_timeout = 0;
7637			if (ioa_cfg->sis64)
7638				ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7639			else
7640				ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7641			ipr_cmd->job_step = ipr_reset_wait_for_dump;
7642			schedule_work(&ioa_cfg->work_q);
7643			return IPR_RC_JOB_RETURN;
7644		}
7645	}
7646
7647	LEAVE;
7648	return IPR_RC_JOB_CONTINUE;
7649}
7650
7651/**
7652 * ipr_reset_bist_done - BIST has completed on the adapter.
7653 * @ipr_cmd:	ipr command struct
7654 *
7655 * Description: Unblock config space and resume the reset process.
7656 *
7657 * Return value:
7658 * 	IPR_RC_JOB_CONTINUE
7659 **/
7660static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7661{
7662	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7663
7664	ENTER;
7665	if (ioa_cfg->cfg_locked)
7666		pci_cfg_access_unlock(ioa_cfg->pdev);
7667	ioa_cfg->cfg_locked = 0;
7668	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7669	LEAVE;
7670	return IPR_RC_JOB_CONTINUE;
7671}
7672
7673/**
7674 * ipr_reset_start_bist - Run BIST on the adapter.
7675 * @ipr_cmd:	ipr command struct
7676 *
7677 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7678 *
7679 * Return value:
7680 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7681 **/
7682static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7683{
7684	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7685	int rc = PCIBIOS_SUCCESSFUL;
7686
7687	ENTER;
7688	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7689		writel(IPR_UPROCI_SIS64_START_BIST,
7690		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7691	else
7692		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7693
7694	if (rc == PCIBIOS_SUCCESSFUL) {
7695		ipr_cmd->job_step = ipr_reset_bist_done;
7696		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7697		rc = IPR_RC_JOB_RETURN;
7698	} else {
7699		if (ioa_cfg->cfg_locked)
7700			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7701		ioa_cfg->cfg_locked = 0;
7702		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7703		rc = IPR_RC_JOB_CONTINUE;
7704	}
7705
7706	LEAVE;
7707	return rc;
7708}
7709
7710/**
7711 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7712 * @ipr_cmd:	ipr command struct
7713 *
7714 * Description: This clears PCI reset to the adapter and delays two seconds.
7715 *
7716 * Return value:
7717 * 	IPR_RC_JOB_RETURN
7718 **/
7719static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7720{
7721	ENTER;
7722	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7723	ipr_cmd->job_step = ipr_reset_bist_done;
7724	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7725	LEAVE;
7726	return IPR_RC_JOB_RETURN;
7727}
7728
7729/**
7730 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7731 * @ipr_cmd:	ipr command struct
7732 *
7733 * Description: This asserts PCI reset to the adapter.
7734 *
7735 * Return value:
7736 * 	IPR_RC_JOB_RETURN
7737 **/
7738static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7739{
7740	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7741	struct pci_dev *pdev = ioa_cfg->pdev;
7742
7743	ENTER;
7744	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7745	ipr_cmd->job_step = ipr_reset_slot_reset_done;
7746	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7747	LEAVE;
7748	return IPR_RC_JOB_RETURN;
7749}
7750
7751/**
7752 * ipr_reset_block_config_access_wait - Wait for permission to block config access
7753 * @ipr_cmd:	ipr command struct
7754 *
7755 * Description: This attempts to block config access to the IOA.
7756 *
7757 * Return value:
7758 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7759 **/
7760static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
7761{
7762	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7763	int rc = IPR_RC_JOB_CONTINUE;
7764
7765	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7766		ioa_cfg->cfg_locked = 1;
7767		ipr_cmd->job_step = ioa_cfg->reset;
7768	} else {
7769		if (ipr_cmd->u.time_left) {
7770			rc = IPR_RC_JOB_RETURN;
7771			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7772			ipr_reset_start_timer(ipr_cmd,
7773					      IPR_CHECK_FOR_RESET_TIMEOUT);
7774		} else {
7775			ipr_cmd->job_step = ioa_cfg->reset;
7776			dev_err(&ioa_cfg->pdev->dev,
7777				"Timed out waiting to lock config access. Resetting anyway.\n");
7778		}
7779	}
7780
7781	return rc;
7782}
7783
7784/**
7785 * ipr_reset_block_config_access - Block config access to the IOA
7786 * @ipr_cmd:	ipr command struct
7787 *
7788 * Description: This attempts to block config access to the IOA
7789 *
7790 * Return value:
7791 * 	IPR_RC_JOB_CONTINUE
7792 **/
7793static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
7794{
7795	ipr_cmd->ioa_cfg->cfg_locked = 0;
7796	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
7797	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7798	return IPR_RC_JOB_CONTINUE;
7799}
7800
7801/**
7802 * ipr_reset_allowed - Query whether or not IOA can be reset
7803 * @ioa_cfg:	ioa config struct
7804 *
7805 * Return value:
7806 * 	0 if reset not allowed / non-zero if reset is allowed
7807 **/
7808static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7809{
7810	volatile u32 temp_reg;
7811
7812	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7813	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7814}
7815
7816/**
7817 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7818 * @ipr_cmd:	ipr command struct
7819 *
7820 * Description: This function waits for adapter permission to run BIST,
7821 * then runs BIST. If the adapter does not give permission after a
7822 * reasonable time, we will reset the adapter anyway. The impact of
7823 * resetting the adapter without warning the adapter is the risk of
7824 * losing the persistent error log on the adapter. If the adapter is
7825 * reset while it is writing to the flash on the adapter, the flash
7826 * segment will have bad ECC and be zeroed.
7827 *
7828 * Return value:
7829 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7830 **/
7831static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7832{
7833	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7834	int rc = IPR_RC_JOB_RETURN;
7835
7836	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7837		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7838		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7839	} else {
7840		ipr_cmd->job_step = ipr_reset_block_config_access;
7841		rc = IPR_RC_JOB_CONTINUE;
7842	}
7843
7844	return rc;
7845}
7846
7847/**
7848 * ipr_reset_alert - Alert the adapter of a pending reset
7849 * @ipr_cmd:	ipr command struct
7850 *
7851 * Description: This function alerts the adapter that it will be reset.
7852 * If memory space is not currently enabled, proceed directly
7853 * to running BIST on the adapter. The timer must always be started
7854 * so we guarantee we do not run BIST from ipr_isr.
7855 *
7856 * Return value:
7857 * 	IPR_RC_JOB_RETURN
7858 **/
7859static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7860{
7861	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7862	u16 cmd_reg;
7863	int rc;
7864
7865	ENTER;
7866	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7867
7868	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7869		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7870		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7871		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7872	} else {
7873		ipr_cmd->job_step = ipr_reset_block_config_access;
7874	}
7875
7876	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7877	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7878
7879	LEAVE;
7880	return IPR_RC_JOB_RETURN;
7881}
7882
7883/**
7884 * ipr_reset_ucode_download_done - Microcode download completion
7885 * @ipr_cmd:	ipr command struct
7886 *
7887 * Description: This function unmaps the microcode download buffer.
7888 *
7889 * Return value:
7890 * 	IPR_RC_JOB_CONTINUE
7891 **/
7892static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7893{
7894	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7895	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7896
7897	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7898		     sglist->num_sg, DMA_TO_DEVICE);
7899
7900	ipr_cmd->job_step = ipr_reset_alert;
7901	return IPR_RC_JOB_CONTINUE;
7902}
7903
7904/**
7905 * ipr_reset_ucode_download - Download microcode to the adapter
7906 * @ipr_cmd:	ipr command struct
7907 *
7908 * Description: This function checks to see if it there is microcode
7909 * to download to the adapter. If there is, a download is performed.
7910 *
7911 * Return value:
7912 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7913 **/
7914static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7915{
7916	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7917	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7918
7919	ENTER;
7920	ipr_cmd->job_step = ipr_reset_alert;
7921
7922	if (!sglist)
7923		return IPR_RC_JOB_CONTINUE;
7924
7925	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7926	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7927	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7928	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7929	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7930	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7931	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7932
7933	if (ioa_cfg->sis64)
7934		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7935	else
7936		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7937	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7938
7939	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7940		   IPR_WRITE_BUFFER_TIMEOUT);
7941
7942	LEAVE;
7943	return IPR_RC_JOB_RETURN;
7944}
7945
7946/**
7947 * ipr_reset_shutdown_ioa - Shutdown the adapter
7948 * @ipr_cmd:	ipr command struct
7949 *
7950 * Description: This function issues an adapter shutdown of the
7951 * specified type to the specified adapter as part of the
7952 * adapter reset job.
7953 *
7954 * Return value:
7955 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7956 **/
7957static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7958{
7959	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7960	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7961	unsigned long timeout;
7962	int rc = IPR_RC_JOB_CONTINUE;
7963
7964	ENTER;
7965	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7966		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7967		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7968		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7969		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7970
7971		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7972			timeout = IPR_SHUTDOWN_TIMEOUT;
7973		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7974			timeout = IPR_INTERNAL_TIMEOUT;
7975		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7976			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7977		else
7978			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7979
7980		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7981
7982		rc = IPR_RC_JOB_RETURN;
7983		ipr_cmd->job_step = ipr_reset_ucode_download;
7984	} else
7985		ipr_cmd->job_step = ipr_reset_alert;
7986
7987	LEAVE;
7988	return rc;
7989}
7990
7991/**
7992 * ipr_reset_ioa_job - Adapter reset job
7993 * @ipr_cmd:	ipr command struct
7994 *
7995 * Description: This function is the job router for the adapter reset job.
7996 *
7997 * Return value:
7998 * 	none
7999 **/
8000static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8001{
8002	u32 rc, ioasc;
8003	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8004
8005	do {
8006		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8007
8008		if (ioa_cfg->reset_cmd != ipr_cmd) {
8009			/*
8010			 * We are doing nested adapter resets and this is
8011			 * not the current reset job.
8012			 */
8013			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8014			return;
8015		}
8016
8017		if (IPR_IOASC_SENSE_KEY(ioasc)) {
8018			rc = ipr_cmd->job_step_failed(ipr_cmd);
8019			if (rc == IPR_RC_JOB_RETURN)
8020				return;
8021		}
8022
8023		ipr_reinit_ipr_cmnd(ipr_cmd);
8024		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8025		rc = ipr_cmd->job_step(ipr_cmd);
8026	} while(rc == IPR_RC_JOB_CONTINUE);
8027}
8028
8029/**
8030 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8031 * @ioa_cfg:		ioa config struct
8032 * @job_step:		first job step of reset job
8033 * @shutdown_type:	shutdown type
8034 *
8035 * Description: This function will initiate the reset of the given adapter
8036 * starting at the selected job step.
8037 * If the caller needs to wait on the completion of the reset,
8038 * the caller must sleep on the reset_wait_q.
8039 *
8040 * Return value:
8041 * 	none
8042 **/
8043static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8044				    int (*job_step) (struct ipr_cmnd *),
8045				    enum ipr_shutdown_type shutdown_type)
8046{
8047	struct ipr_cmnd *ipr_cmd;
8048
8049	ioa_cfg->in_reset_reload = 1;
8050	ioa_cfg->allow_cmds = 0;
8051	scsi_block_requests(ioa_cfg->host);
8052
8053	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8054	ioa_cfg->reset_cmd = ipr_cmd;
8055	ipr_cmd->job_step = job_step;
8056	ipr_cmd->u.shutdown_type = shutdown_type;
8057
8058	ipr_reset_ioa_job(ipr_cmd);
8059}
8060
8061/**
8062 * ipr_initiate_ioa_reset - Initiate an adapter reset
8063 * @ioa_cfg:		ioa config struct
8064 * @shutdown_type:	shutdown type
8065 *
8066 * Description: This function will initiate the reset of the given adapter.
8067 * If the caller needs to wait on the completion of the reset,
8068 * the caller must sleep on the reset_wait_q.
8069 *
8070 * Return value:
8071 * 	none
8072 **/
8073static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8074				   enum ipr_shutdown_type shutdown_type)
8075{
8076	if (ioa_cfg->ioa_is_dead)
8077		return;
8078
8079	if (ioa_cfg->in_reset_reload) {
8080		if (ioa_cfg->sdt_state == GET_DUMP)
8081			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8082		else if (ioa_cfg->sdt_state == READ_DUMP)
8083			ioa_cfg->sdt_state = ABORT_DUMP;
8084	}
8085
8086	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8087		dev_err(&ioa_cfg->pdev->dev,
8088			"IOA taken offline - error recovery failed\n");
8089
8090		ioa_cfg->reset_retries = 0;
8091		ioa_cfg->ioa_is_dead = 1;
8092
8093		if (ioa_cfg->in_ioa_bringdown) {
8094			ioa_cfg->reset_cmd = NULL;
8095			ioa_cfg->in_reset_reload = 0;
8096			ipr_fail_all_ops(ioa_cfg);
8097			wake_up_all(&ioa_cfg->reset_wait_q);
8098
8099			spin_unlock_irq(ioa_cfg->host->host_lock);
8100			scsi_unblock_requests(ioa_cfg->host);
8101			spin_lock_irq(ioa_cfg->host->host_lock);
8102			return;
8103		} else {
8104			ioa_cfg->in_ioa_bringdown = 1;
8105			shutdown_type = IPR_SHUTDOWN_NONE;
8106		}
8107	}
8108
8109	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8110				shutdown_type);
8111}
8112
8113/**
8114 * ipr_reset_freeze - Hold off all I/O activity
8115 * @ipr_cmd:	ipr command struct
8116 *
8117 * Description: If the PCI slot is frozen, hold off all I/O
8118 * activity; then, as soon as the slot is available again,
8119 * initiate an adapter reset.
8120 */
8121static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8122{
8123	/* Disallow new interrupts, avoid loop */
8124	ipr_cmd->ioa_cfg->allow_interrupts = 0;
8125	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8126	ipr_cmd->done = ipr_reset_ioa_job;
8127	return IPR_RC_JOB_RETURN;
8128}
8129
8130/**
8131 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8132 * @pdev:	PCI device struct
8133 *
8134 * Description: This routine is called to tell us that the PCI bus
8135 * is down. Can't do anything here, except put the device driver
8136 * into a holding pattern, waiting for the PCI bus to come back.
8137 */
8138static void ipr_pci_frozen(struct pci_dev *pdev)
8139{
8140	unsigned long flags = 0;
8141	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8142
8143	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8144	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8145	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8146}
8147
8148/**
8149 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8150 * @pdev:	PCI device struct
8151 *
8152 * Description: This routine is called by the pci error recovery
8153 * code after the PCI slot has been reset, just before we
8154 * should resume normal operations.
8155 */
8156static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8157{
8158	unsigned long flags = 0;
8159	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8160
8161	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8162	if (ioa_cfg->needs_warm_reset)
8163		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8164	else
8165		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8166					IPR_SHUTDOWN_NONE);
8167	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8168	return PCI_ERS_RESULT_RECOVERED;
8169}
8170
8171/**
8172 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8173 * @pdev:	PCI device struct
8174 *
8175 * Description: This routine is called when the PCI bus has
8176 * permanently failed.
8177 */
8178static void ipr_pci_perm_failure(struct pci_dev *pdev)
8179{
8180	unsigned long flags = 0;
8181	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8182
8183	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8184	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8185		ioa_cfg->sdt_state = ABORT_DUMP;
8186	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8187	ioa_cfg->in_ioa_bringdown = 1;
8188	ioa_cfg->allow_cmds = 0;
8189	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8190	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8191}
8192
8193/**
8194 * ipr_pci_error_detected - Called when a PCI error is detected.
8195 * @pdev:	PCI device struct
8196 * @state:	PCI channel state
8197 *
8198 * Description: Called when a PCI error is detected.
8199 *
8200 * Return value:
8201 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8202 */
8203static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8204					       pci_channel_state_t state)
8205{
8206	switch (state) {
8207	case pci_channel_io_frozen:
8208		ipr_pci_frozen(pdev);
8209		return PCI_ERS_RESULT_NEED_RESET;
8210	case pci_channel_io_perm_failure:
8211		ipr_pci_perm_failure(pdev);
8212		return PCI_ERS_RESULT_DISCONNECT;
8213		break;
8214	default:
8215		break;
8216	}
8217	return PCI_ERS_RESULT_NEED_RESET;
8218}
8219
8220/**
8221 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8222 * @ioa_cfg:	ioa cfg struct
8223 *
8224 * Description: This is the second phase of adapter intialization
8225 * This function takes care of initilizing the adapter to the point
8226 * where it can accept new commands.
8227
8228 * Return value:
8229 * 	0 on success / -EIO on failure
8230 **/
8231static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8232{
8233	int rc = 0;
8234	unsigned long host_lock_flags = 0;
8235
8236	ENTER;
8237	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8238	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8239	if (ioa_cfg->needs_hard_reset) {
8240		ioa_cfg->needs_hard_reset = 0;
8241		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8242	} else
8243		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8244					IPR_SHUTDOWN_NONE);
8245
8246	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8247	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8248	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8249
8250	if (ioa_cfg->ioa_is_dead) {
8251		rc = -EIO;
8252	} else if (ipr_invalid_adapter(ioa_cfg)) {
8253		if (!ipr_testmode)
8254			rc = -EIO;
8255
8256		dev_err(&ioa_cfg->pdev->dev,
8257			"Adapter not supported in this hardware configuration.\n");
8258	}
8259
8260	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8261
8262	LEAVE;
8263	return rc;
8264}
8265
8266/**
8267 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8268 * @ioa_cfg:	ioa config struct
8269 *
8270 * Return value:
8271 * 	none
8272 **/
8273static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8274{
8275	int i;
8276
8277	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8278		if (ioa_cfg->ipr_cmnd_list[i])
8279			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8280				      ioa_cfg->ipr_cmnd_list[i],
8281				      ioa_cfg->ipr_cmnd_list_dma[i]);
8282
8283		ioa_cfg->ipr_cmnd_list[i] = NULL;
8284	}
8285
8286	if (ioa_cfg->ipr_cmd_pool)
8287		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8288
8289	ioa_cfg->ipr_cmd_pool = NULL;
8290}
8291
8292/**
8293 * ipr_free_mem - Frees memory allocated for an adapter
8294 * @ioa_cfg:	ioa cfg struct
8295 *
8296 * Return value:
8297 * 	nothing
8298 **/
8299static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8300{
8301	int i;
8302
8303	kfree(ioa_cfg->res_entries);
8304	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8305			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8306	ipr_free_cmd_blks(ioa_cfg);
8307	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8308			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8309	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8310			    ioa_cfg->u.cfg_table,
8311			    ioa_cfg->cfg_table_dma);
8312
8313	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8314		pci_free_consistent(ioa_cfg->pdev,
8315				    sizeof(struct ipr_hostrcb),
8316				    ioa_cfg->hostrcb[i],
8317				    ioa_cfg->hostrcb_dma[i]);
8318	}
8319
8320	ipr_free_dump(ioa_cfg);
8321	kfree(ioa_cfg->trace);
8322}
8323
8324/**
8325 * ipr_free_all_resources - Free all allocated resources for an adapter.
8326 * @ipr_cmd:	ipr command struct
8327 *
8328 * This function frees all allocated resources for the
8329 * specified adapter.
8330 *
8331 * Return value:
8332 * 	none
8333 **/
8334static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8335{
8336	struct pci_dev *pdev = ioa_cfg->pdev;
8337
8338	ENTER;
8339	free_irq(pdev->irq, ioa_cfg);
8340	pci_disable_msi(pdev);
8341	iounmap(ioa_cfg->hdw_dma_regs);
8342	pci_release_regions(pdev);
8343	ipr_free_mem(ioa_cfg);
8344	scsi_host_put(ioa_cfg->host);
8345	pci_disable_device(pdev);
8346	LEAVE;
8347}
8348
8349/**
8350 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8351 * @ioa_cfg:	ioa config struct
8352 *
8353 * Return value:
8354 * 	0 on success / -ENOMEM on allocation failure
8355 **/
8356static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8357{
8358	struct ipr_cmnd *ipr_cmd;
8359	struct ipr_ioarcb *ioarcb;
8360	dma_addr_t dma_addr;
8361	int i;
8362
8363	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8364						 sizeof(struct ipr_cmnd), 16, 0);
8365
8366	if (!ioa_cfg->ipr_cmd_pool)
8367		return -ENOMEM;
8368
8369	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8370		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8371
8372		if (!ipr_cmd) {
8373			ipr_free_cmd_blks(ioa_cfg);
8374			return -ENOMEM;
8375		}
8376
8377		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8378		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8379		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8380
8381		ioarcb = &ipr_cmd->ioarcb;
8382		ipr_cmd->dma_addr = dma_addr;
8383		if (ioa_cfg->sis64)
8384			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8385		else
8386			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8387
8388		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8389		if (ioa_cfg->sis64) {
8390			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8391				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8392			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8393				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8394		} else {
8395			ioarcb->write_ioadl_addr =
8396				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8397			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8398			ioarcb->ioasa_host_pci_addr =
8399				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8400		}
8401		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8402		ipr_cmd->cmd_index = i;
8403		ipr_cmd->ioa_cfg = ioa_cfg;
8404		ipr_cmd->sense_buffer_dma = dma_addr +
8405			offsetof(struct ipr_cmnd, sense_buffer);
8406
8407		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8408	}
8409
8410	return 0;
8411}
8412
8413/**
8414 * ipr_alloc_mem - Allocate memory for an adapter
8415 * @ioa_cfg:	ioa config struct
8416 *
8417 * Return value:
8418 * 	0 on success / non-zero for error
8419 **/
8420static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8421{
8422	struct pci_dev *pdev = ioa_cfg->pdev;
8423	int i, rc = -ENOMEM;
8424
8425	ENTER;
8426	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8427				       ioa_cfg->max_devs_supported, GFP_KERNEL);
8428
8429	if (!ioa_cfg->res_entries)
8430		goto out;
8431
8432	if (ioa_cfg->sis64) {
8433		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8434					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8435		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8436					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8437		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8438					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8439	}
8440
8441	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8442		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8443		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8444	}
8445
8446	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8447						sizeof(struct ipr_misc_cbs),
8448						&ioa_cfg->vpd_cbs_dma);
8449
8450	if (!ioa_cfg->vpd_cbs)
8451		goto out_free_res_entries;
8452
8453	if (ipr_alloc_cmd_blks(ioa_cfg))
8454		goto out_free_vpd_cbs;
8455
8456	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8457						 sizeof(u32) * IPR_NUM_CMD_BLKS,
8458						 &ioa_cfg->host_rrq_dma);
8459
8460	if (!ioa_cfg->host_rrq)
8461		goto out_ipr_free_cmd_blocks;
8462
8463	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8464						    ioa_cfg->cfg_table_size,
8465						    &ioa_cfg->cfg_table_dma);
8466
8467	if (!ioa_cfg->u.cfg_table)
8468		goto out_free_host_rrq;
8469
8470	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8471		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8472							   sizeof(struct ipr_hostrcb),
8473							   &ioa_cfg->hostrcb_dma[i]);
8474
8475		if (!ioa_cfg->hostrcb[i])
8476			goto out_free_hostrcb_dma;
8477
8478		ioa_cfg->hostrcb[i]->hostrcb_dma =
8479			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8480		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8481		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8482	}
8483
8484	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8485				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8486
8487	if (!ioa_cfg->trace)
8488		goto out_free_hostrcb_dma;
8489
8490	rc = 0;
8491out:
8492	LEAVE;
8493	return rc;
8494
8495out_free_hostrcb_dma:
8496	while (i-- > 0) {
8497		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8498				    ioa_cfg->hostrcb[i],
8499				    ioa_cfg->hostrcb_dma[i]);
8500	}
8501	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8502			    ioa_cfg->u.cfg_table,
8503			    ioa_cfg->cfg_table_dma);
8504out_free_host_rrq:
8505	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8506			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8507out_ipr_free_cmd_blocks:
8508	ipr_free_cmd_blks(ioa_cfg);
8509out_free_vpd_cbs:
8510	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8511			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8512out_free_res_entries:
8513	kfree(ioa_cfg->res_entries);
8514	goto out;
8515}
8516
8517/**
8518 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8519 * @ioa_cfg:	ioa config struct
8520 *
8521 * Return value:
8522 * 	none
8523 **/
8524static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8525{
8526	int i;
8527
8528	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8529		ioa_cfg->bus_attr[i].bus = i;
8530		ioa_cfg->bus_attr[i].qas_enabled = 0;
8531		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8532		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8533			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8534		else
8535			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8536	}
8537}
8538
8539/**
8540 * ipr_init_ioa_cfg - Initialize IOA config struct
8541 * @ioa_cfg:	ioa config struct
8542 * @host:		scsi host struct
8543 * @pdev:		PCI dev struct
8544 *
8545 * Return value:
8546 * 	none
8547 **/
8548static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8549				       struct Scsi_Host *host, struct pci_dev *pdev)
8550{
8551	const struct ipr_interrupt_offsets *p;
8552	struct ipr_interrupts *t;
8553	void __iomem *base;
8554
8555	ioa_cfg->host = host;
8556	ioa_cfg->pdev = pdev;
8557	ioa_cfg->log_level = ipr_log_level;
8558	ioa_cfg->doorbell = IPR_DOORBELL;
8559	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8560	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8561	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8562	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8563	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8564	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8565	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8566	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8567
8568	INIT_LIST_HEAD(&ioa_cfg->free_q);
8569	INIT_LIST_HEAD(&ioa_cfg->pending_q);
8570	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8571	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8572	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8573	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8574	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8575	init_waitqueue_head(&ioa_cfg->reset_wait_q);
8576	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8577	ioa_cfg->sdt_state = INACTIVE;
8578
8579	ipr_initialize_bus_attr(ioa_cfg);
8580	ioa_cfg->max_devs_supported = ipr_max_devs;
8581
8582	if (ioa_cfg->sis64) {
8583		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8584		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8585		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8586			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8587	} else {
8588		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8589		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8590		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8591			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8592	}
8593	host->max_channel = IPR_MAX_BUS_TO_SCAN;
8594	host->unique_id = host->host_no;
8595	host->max_cmd_len = IPR_MAX_CDB_LEN;
8596	pci_set_drvdata(pdev, ioa_cfg);
8597
8598	p = &ioa_cfg->chip_cfg->regs;
8599	t = &ioa_cfg->regs;
8600	base = ioa_cfg->hdw_dma_regs;
8601
8602	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8603	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8604	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8605	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8606	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8607	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8608	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8609	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8610	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8611	t->ioarrin_reg = base + p->ioarrin_reg;
8612	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8613	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8614	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8615	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8616	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8617	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8618
8619	if (ioa_cfg->sis64) {
8620		t->init_feedback_reg = base + p->init_feedback_reg;
8621		t->dump_addr_reg = base + p->dump_addr_reg;
8622		t->dump_data_reg = base + p->dump_data_reg;
8623		t->endian_swap_reg = base + p->endian_swap_reg;
8624	}
8625}
8626
8627/**
8628 * ipr_get_chip_info - Find adapter chip information
8629 * @dev_id:		PCI device id struct
8630 *
8631 * Return value:
8632 * 	ptr to chip information on success / NULL on failure
8633 **/
8634static const struct ipr_chip_t * __devinit
8635ipr_get_chip_info(const struct pci_device_id *dev_id)
8636{
8637	int i;
8638
8639	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8640		if (ipr_chip[i].vendor == dev_id->vendor &&
8641		    ipr_chip[i].device == dev_id->device)
8642			return &ipr_chip[i];
8643	return NULL;
8644}
8645
8646/**
8647 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8648 * @pdev:		PCI device struct
8649 *
8650 * Description: Simply set the msi_received flag to 1 indicating that
8651 * Message Signaled Interrupts are supported.
8652 *
8653 * Return value:
8654 * 	0 on success / non-zero on failure
8655 **/
8656static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8657{
8658	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8659	unsigned long lock_flags = 0;
8660	irqreturn_t rc = IRQ_HANDLED;
8661
8662	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8663
8664	ioa_cfg->msi_received = 1;
8665	wake_up(&ioa_cfg->msi_wait_q);
8666
8667	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8668	return rc;
8669}
8670
8671/**
8672 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8673 * @pdev:		PCI device struct
8674 *
8675 * Description: The return value from pci_enable_msi() can not always be
8676 * trusted.  This routine sets up and initiates a test interrupt to determine
8677 * if the interrupt is received via the ipr_test_intr() service routine.
8678 * If the tests fails, the driver will fall back to LSI.
8679 *
8680 * Return value:
8681 * 	0 on success / non-zero on failure
8682 **/
8683static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8684				  struct pci_dev *pdev)
8685{
8686	int rc;
8687	volatile u32 int_reg;
8688	unsigned long lock_flags = 0;
8689
8690	ENTER;
8691
8692	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8693	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8694	ioa_cfg->msi_received = 0;
8695	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8696	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8697	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8698	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8699
8700	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8701	if (rc) {
8702		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8703		return rc;
8704	} else if (ipr_debug)
8705		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8706
8707	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8708	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8709	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8710	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8711
8712	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8713	if (!ioa_cfg->msi_received) {
8714		/* MSI test failed */
8715		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
8716		rc = -EOPNOTSUPP;
8717	} else if (ipr_debug)
8718		dev_info(&pdev->dev, "MSI test succeeded.\n");
8719
8720	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8721
8722	free_irq(pdev->irq, ioa_cfg);
8723
8724	LEAVE;
8725
8726	return rc;
8727}
8728
8729/**
8730 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8731 * @pdev:		PCI device struct
8732 * @dev_id:		PCI device id struct
8733 *
8734 * Return value:
8735 * 	0 on success / non-zero on failure
8736 **/
8737static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8738				   const struct pci_device_id *dev_id)
8739{
8740	struct ipr_ioa_cfg *ioa_cfg;
8741	struct Scsi_Host *host;
8742	unsigned long ipr_regs_pci;
8743	void __iomem *ipr_regs;
8744	int rc = PCIBIOS_SUCCESSFUL;
8745	volatile u32 mask, uproc, interrupts;
8746
8747	ENTER;
8748
8749	if ((rc = pci_enable_device(pdev))) {
8750		dev_err(&pdev->dev, "Cannot enable adapter\n");
8751		goto out;
8752	}
8753
8754	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8755
8756	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8757
8758	if (!host) {
8759		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8760		rc = -ENOMEM;
8761		goto out_disable;
8762	}
8763
8764	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8765	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8766	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8767		      sata_port_info.flags, &ipr_sata_ops);
8768
8769	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8770
8771	if (!ioa_cfg->ipr_chip) {
8772		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8773			dev_id->vendor, dev_id->device);
8774		goto out_scsi_host_put;
8775	}
8776
8777	/* set SIS 32 or SIS 64 */
8778	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8779	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8780	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
8781
8782	if (ipr_transop_timeout)
8783		ioa_cfg->transop_timeout = ipr_transop_timeout;
8784	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8785		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8786	else
8787		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8788
8789	ioa_cfg->revid = pdev->revision;
8790
8791	ipr_regs_pci = pci_resource_start(pdev, 0);
8792
8793	rc = pci_request_regions(pdev, IPR_NAME);
8794	if (rc < 0) {
8795		dev_err(&pdev->dev,
8796			"Couldn't register memory range of registers\n");
8797		goto out_scsi_host_put;
8798	}
8799
8800	ipr_regs = pci_ioremap_bar(pdev, 0);
8801
8802	if (!ipr_regs) {
8803		dev_err(&pdev->dev,
8804			"Couldn't map memory range of registers\n");
8805		rc = -ENOMEM;
8806		goto out_release_regions;
8807	}
8808
8809	ioa_cfg->hdw_dma_regs = ipr_regs;
8810	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8811	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8812
8813	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8814
8815	pci_set_master(pdev);
8816
8817	if (ioa_cfg->sis64) {
8818		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8819		if (rc < 0) {
8820			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8821			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8822		}
8823
8824	} else
8825		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8826
8827	if (rc < 0) {
8828		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8829		goto cleanup_nomem;
8830	}
8831
8832	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8833				   ioa_cfg->chip_cfg->cache_line_size);
8834
8835	if (rc != PCIBIOS_SUCCESSFUL) {
8836		dev_err(&pdev->dev, "Write of cache line size failed\n");
8837		rc = -EIO;
8838		goto cleanup_nomem;
8839	}
8840
8841	/* Enable MSI style interrupts if they are supported. */
8842	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8843		rc = ipr_test_msi(ioa_cfg, pdev);
8844		if (rc == -EOPNOTSUPP)
8845			pci_disable_msi(pdev);
8846		else if (rc)
8847			goto out_msi_disable;
8848		else
8849			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8850	} else if (ipr_debug)
8851		dev_info(&pdev->dev, "Cannot enable MSI.\n");
8852
8853	/* Save away PCI config space for use following IOA reset */
8854	rc = pci_save_state(pdev);
8855
8856	if (rc != PCIBIOS_SUCCESSFUL) {
8857		dev_err(&pdev->dev, "Failed to save PCI config space\n");
8858		rc = -EIO;
8859		goto out_msi_disable;
8860	}
8861
8862	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8863		goto out_msi_disable;
8864
8865	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8866		goto out_msi_disable;
8867
8868	if (ioa_cfg->sis64)
8869		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8870				+ ((sizeof(struct ipr_config_table_entry64)
8871				* ioa_cfg->max_devs_supported)));
8872	else
8873		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8874				+ ((sizeof(struct ipr_config_table_entry)
8875				* ioa_cfg->max_devs_supported)));
8876
8877	rc = ipr_alloc_mem(ioa_cfg);
8878	if (rc < 0) {
8879		dev_err(&pdev->dev,
8880			"Couldn't allocate enough memory for device driver!\n");
8881		goto out_msi_disable;
8882	}
8883
8884	/*
8885	 * If HRRQ updated interrupt is not masked, or reset alert is set,
8886	 * the card is in an unknown state and needs a hard reset
8887	 */
8888	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8889	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8890	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8891	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8892		ioa_cfg->needs_hard_reset = 1;
8893	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
8894		ioa_cfg->needs_hard_reset = 1;
8895	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8896		ioa_cfg->ioa_unit_checked = 1;
8897
8898	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8899	rc = request_irq(pdev->irq, ipr_isr,
8900			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8901			 IPR_NAME, ioa_cfg);
8902
8903	if (rc) {
8904		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8905			pdev->irq, rc);
8906		goto cleanup_nolog;
8907	}
8908
8909	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8910	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8911		ioa_cfg->needs_warm_reset = 1;
8912		ioa_cfg->reset = ipr_reset_slot_reset;
8913	} else
8914		ioa_cfg->reset = ipr_reset_start_bist;
8915
8916	spin_lock(&ipr_driver_lock);
8917	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8918	spin_unlock(&ipr_driver_lock);
8919
8920	LEAVE;
8921out:
8922	return rc;
8923
8924cleanup_nolog:
8925	ipr_free_mem(ioa_cfg);
8926out_msi_disable:
8927	pci_disable_msi(pdev);
8928cleanup_nomem:
8929	iounmap(ipr_regs);
8930out_release_regions:
8931	pci_release_regions(pdev);
8932out_scsi_host_put:
8933	scsi_host_put(host);
8934out_disable:
8935	pci_disable_device(pdev);
8936	goto out;
8937}
8938
8939/**
8940 * ipr_scan_vsets - Scans for VSET devices
8941 * @ioa_cfg:	ioa config struct
8942 *
8943 * Description: Since the VSET resources do not follow SAM in that we can have
8944 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8945 *
8946 * Return value:
8947 * 	none
8948 **/
8949static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8950{
8951	int target, lun;
8952
8953	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8954		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8955			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8956}
8957
8958/**
8959 * ipr_initiate_ioa_bringdown - Bring down an adapter
8960 * @ioa_cfg:		ioa config struct
8961 * @shutdown_type:	shutdown type
8962 *
8963 * Description: This function will initiate bringing down the adapter.
8964 * This consists of issuing an IOA shutdown to the adapter
8965 * to flush the cache, and running BIST.
8966 * If the caller needs to wait on the completion of the reset,
8967 * the caller must sleep on the reset_wait_q.
8968 *
8969 * Return value:
8970 * 	none
8971 **/
8972static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8973				       enum ipr_shutdown_type shutdown_type)
8974{
8975	ENTER;
8976	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8977		ioa_cfg->sdt_state = ABORT_DUMP;
8978	ioa_cfg->reset_retries = 0;
8979	ioa_cfg->in_ioa_bringdown = 1;
8980	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8981	LEAVE;
8982}
8983
8984/**
8985 * __ipr_remove - Remove a single adapter
8986 * @pdev:	pci device struct
8987 *
8988 * Adapter hot plug remove entry point.
8989 *
8990 * Return value:
8991 * 	none
8992 **/
8993static void __ipr_remove(struct pci_dev *pdev)
8994{
8995	unsigned long host_lock_flags = 0;
8996	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8997	ENTER;
8998
8999	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9000	while(ioa_cfg->in_reset_reload) {
9001		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9002		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9003		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9004	}
9005
9006	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9007
9008	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9009	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9010	flush_work_sync(&ioa_cfg->work_q);
9011	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9012
9013	spin_lock(&ipr_driver_lock);
9014	list_del(&ioa_cfg->queue);
9015	spin_unlock(&ipr_driver_lock);
9016
9017	if (ioa_cfg->sdt_state == ABORT_DUMP)
9018		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9019	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9020
9021	ipr_free_all_resources(ioa_cfg);
9022
9023	LEAVE;
9024}
9025
9026/**
9027 * ipr_remove - IOA hot plug remove entry point
9028 * @pdev:	pci device struct
9029 *
9030 * Adapter hot plug remove entry point.
9031 *
9032 * Return value:
9033 * 	none
9034 **/
9035static void __devexit ipr_remove(struct pci_dev *pdev)
9036{
9037	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9038
9039	ENTER;
9040
9041	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9042			      &ipr_trace_attr);
9043	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9044			     &ipr_dump_attr);
9045	scsi_remove_host(ioa_cfg->host);
9046
9047	__ipr_remove(pdev);
9048
9049	LEAVE;
9050}
9051
9052/**
9053 * ipr_probe - Adapter hot plug add entry point
9054 *
9055 * Return value:
9056 * 	0 on success / non-zero on failure
9057 **/
9058static int __devinit ipr_probe(struct pci_dev *pdev,
9059			       const struct pci_device_id *dev_id)
9060{
9061	struct ipr_ioa_cfg *ioa_cfg;
9062	int rc;
9063
9064	rc = ipr_probe_ioa(pdev, dev_id);
9065
9066	if (rc)
9067		return rc;
9068
9069	ioa_cfg = pci_get_drvdata(pdev);
9070	rc = ipr_probe_ioa_part2(ioa_cfg);
9071
9072	if (rc) {
9073		__ipr_remove(pdev);
9074		return rc;
9075	}
9076
9077	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9078
9079	if (rc) {
9080		__ipr_remove(pdev);
9081		return rc;
9082	}
9083
9084	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9085				   &ipr_trace_attr);
9086
9087	if (rc) {
9088		scsi_remove_host(ioa_cfg->host);
9089		__ipr_remove(pdev);
9090		return rc;
9091	}
9092
9093	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9094				   &ipr_dump_attr);
9095
9096	if (rc) {
9097		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9098				      &ipr_trace_attr);
9099		scsi_remove_host(ioa_cfg->host);
9100		__ipr_remove(pdev);
9101		return rc;
9102	}
9103
9104	scsi_scan_host(ioa_cfg->host);
9105	ipr_scan_vsets(ioa_cfg);
9106	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9107	ioa_cfg->allow_ml_add_del = 1;
9108	ioa_cfg->host->max_channel = IPR_VSET_BUS;
9109	schedule_work(&ioa_cfg->work_q);
9110	return 0;
9111}
9112
9113/**
9114 * ipr_shutdown - Shutdown handler.
9115 * @pdev:	pci device struct
9116 *
9117 * This function is invoked upon system shutdown/reboot. It will issue
9118 * an adapter shutdown to the adapter to flush the write cache.
9119 *
9120 * Return value:
9121 * 	none
9122 **/
9123static void ipr_shutdown(struct pci_dev *pdev)
9124{
9125	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9126	unsigned long lock_flags = 0;
9127
9128	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9129	while(ioa_cfg->in_reset_reload) {
9130		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9131		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9132		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9133	}
9134
9135	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9136	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9137	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9138}
9139
9140static struct pci_device_id ipr_pci_table[] __devinitdata = {
9141	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9142		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9143	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9144		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9145	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9146		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9147	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9148		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9149	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9150		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9151	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9152		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9153	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9154		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9155	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9156		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9157		IPR_USE_LONG_TRANSOP_TIMEOUT },
9158	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9159	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9160	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9161	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9162	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9163	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9164	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9165	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9166	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9167	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9168	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9169	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9170	      IPR_USE_LONG_TRANSOP_TIMEOUT},
9171	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9172	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9173	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9174	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9175	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9176	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9177	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9178	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9179	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9180	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9181	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9182	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9183	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9184	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9185		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9186	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9187		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9188	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9189		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9190		IPR_USE_LONG_TRANSOP_TIMEOUT },
9191	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9192		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9193		IPR_USE_LONG_TRANSOP_TIMEOUT },
9194	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9195		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9196	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9197		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9198	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9199		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9200	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9201		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9202	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9203		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9204	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9205		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9206	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9207		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9208	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9209		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9210	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9211		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9212	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9213		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9214	{ }
9215};
9216MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9217
9218static struct pci_error_handlers ipr_err_handler = {
9219	.error_detected = ipr_pci_error_detected,
9220	.slot_reset = ipr_pci_slot_reset,
9221};
9222
9223static struct pci_driver ipr_driver = {
9224	.name = IPR_NAME,
9225	.id_table = ipr_pci_table,
9226	.probe = ipr_probe,
9227	.remove = __devexit_p(ipr_remove),
9228	.shutdown = ipr_shutdown,
9229	.err_handler = &ipr_err_handler,
9230};
9231
9232/**
9233 * ipr_halt_done - Shutdown prepare completion
9234 *
9235 * Return value:
9236 * 	none
9237 **/
9238static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9239{
9240	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9241
9242	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9243}
9244
9245/**
9246 * ipr_halt - Issue shutdown prepare to all adapters
9247 *
9248 * Return value:
9249 * 	NOTIFY_OK on success / NOTIFY_DONE on failure
9250 **/
9251static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9252{
9253	struct ipr_cmnd *ipr_cmd;
9254	struct ipr_ioa_cfg *ioa_cfg;
9255	unsigned long flags = 0;
9256
9257	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9258		return NOTIFY_DONE;
9259
9260	spin_lock(&ipr_driver_lock);
9261
9262	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9263		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9264		if (!ioa_cfg->allow_cmds) {
9265			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9266			continue;
9267		}
9268
9269		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9270		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9271		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9272		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9273		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9274
9275		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9276		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9277	}
9278	spin_unlock(&ipr_driver_lock);
9279
9280	return NOTIFY_OK;
9281}
9282
9283static struct notifier_block ipr_notifier = {
9284	ipr_halt, NULL, 0
9285};
9286
9287/**
9288 * ipr_init - Module entry point
9289 *
9290 * Return value:
9291 * 	0 on success / negative value on failure
9292 **/
9293static int __init ipr_init(void)
9294{
9295	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9296		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9297
9298	register_reboot_notifier(&ipr_notifier);
9299	return pci_register_driver(&ipr_driver);
9300}
9301
9302/**
9303 * ipr_exit - Module unload
9304 *
9305 * Module unload entry point.
9306 *
9307 * Return value:
9308 * 	none
9309 **/
9310static void __exit ipr_exit(void)
9311{
9312	unregister_reboot_notifier(&ipr_notifier);
9313	pci_unregister_driver(&ipr_driver);
9314}
9315
9316module_init(ipr_init);
9317module_exit(ipr_exit);
9318