mpt2sas_base.c revision 11e1b961ab067ee3acaf723531da4d3f23e1d6f7
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2010  LSI Corporation
7 *  (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42 * USA.
43 */
44
45#include <linux/version.h>
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/sort.h>
59#include <linux/io.h>
60#include <linux/time.h>
61#include <linux/aer.h>
62
63#include "mpt2sas_base.h"
64
65static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
66
67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
68
69static int max_queue_depth = -1;
70module_param(max_queue_depth, int, 0);
71MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
72
73static int max_sgl_entries = -1;
74module_param(max_sgl_entries, int, 0);
75MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
76
77static int msix_disable = -1;
78module_param(msix_disable, int, 0);
79MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
80
81static int missing_delay[2] = {-1, -1};
82module_param_array(missing_delay, int, NULL, 0);
83MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
84
85/* diag_buffer_enable is bitwise
86 * bit 0 set = TRACE
87 * bit 1 set = SNAPSHOT
88 * bit 2 set = EXTENDED
89 *
90 * Either bit can be set, or both
91 */
92static int diag_buffer_enable;
93module_param(diag_buffer_enable, int, 0);
94MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
95    "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
96
97int mpt2sas_fwfault_debug;
98MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
99    "and halt firmware - (default=0)");
100
101static int disable_discovery = -1;
102module_param(disable_discovery, int, 0);
103MODULE_PARM_DESC(disable_discovery, " disable discovery ");
104
105/**
106 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
107 *
108 */
109static int
110_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
111{
112	int ret = param_set_int(val, kp);
113	struct MPT2SAS_ADAPTER *ioc;
114
115	if (ret)
116		return ret;
117
118	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
119	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
120		ioc->fwfault_debug = mpt2sas_fwfault_debug;
121	return 0;
122}
123module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
124    param_get_int, &mpt2sas_fwfault_debug, 0644);
125
126/**
127 * _base_fault_reset_work - workq handling ioc fault conditions
128 * @work: input argument, used to derive ioc
129 * Context: sleep.
130 *
131 * Return nothing.
132 */
133static void
134_base_fault_reset_work(struct work_struct *work)
135{
136	struct MPT2SAS_ADAPTER *ioc =
137	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
138	unsigned long	 flags;
139	u32 doorbell;
140	int rc;
141
142	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
143	if (ioc->shost_recovery)
144		goto rearm_timer;
145	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
146
147	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
148	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
149		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
150		    FORCE_BIG_HAMMER);
151		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
152		    __func__, (rc == 0) ? "success" : "failed");
153		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
154		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
155			mpt2sas_base_fault_info(ioc, doorbell &
156			    MPI2_DOORBELL_DATA_MASK);
157	}
158
159	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
160 rearm_timer:
161	if (ioc->fault_reset_work_q)
162		queue_delayed_work(ioc->fault_reset_work_q,
163		    &ioc->fault_reset_work,
164		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
165	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
166}
167
168/**
169 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
170 * @ioc: per adapter object
171 * Context: sleep.
172 *
173 * Return nothing.
174 */
175void
176mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
177{
178	unsigned long	 flags;
179
180	if (ioc->fault_reset_work_q)
181		return;
182
183	/* initialize fault polling */
184	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
185	snprintf(ioc->fault_reset_work_q_name,
186	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
187	ioc->fault_reset_work_q =
188		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
189	if (!ioc->fault_reset_work_q) {
190		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
191		    ioc->name, __func__, __LINE__);
192			return;
193	}
194	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
195	if (ioc->fault_reset_work_q)
196		queue_delayed_work(ioc->fault_reset_work_q,
197		    &ioc->fault_reset_work,
198		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
199	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
200}
201
202/**
203 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
204 * @ioc: per adapter object
205 * Context: sleep.
206 *
207 * Return nothing.
208 */
209void
210mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
211{
212	unsigned long	 flags;
213	struct workqueue_struct *wq;
214
215	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
216	wq = ioc->fault_reset_work_q;
217	ioc->fault_reset_work_q = NULL;
218	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
219	if (wq) {
220		if (!cancel_delayed_work(&ioc->fault_reset_work))
221			flush_workqueue(wq);
222		destroy_workqueue(wq);
223	}
224}
225
226/**
227 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
228 * @ioc: per adapter object
229 * @fault_code: fault code
230 *
231 * Return nothing.
232 */
233void
234mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
235{
236	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
237	    ioc->name, fault_code);
238}
239
240/**
241 * mpt2sas_halt_firmware - halt's mpt controller firmware
242 * @ioc: per adapter object
243 *
244 * For debugging timeout related issues.  Writing 0xCOFFEE00
245 * to the doorbell register will halt controller firmware. With
246 * the purpose to stop both driver and firmware, the enduser can
247 * obtain a ring buffer from controller UART.
248 */
249void
250mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
251{
252	u32 doorbell;
253
254	if (!ioc->fwfault_debug)
255		return;
256
257	dump_stack();
258
259	doorbell = readl(&ioc->chip->Doorbell);
260	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
261		mpt2sas_base_fault_info(ioc , doorbell);
262	else {
263		writel(0xC0FFEE00, &ioc->chip->Doorbell);
264		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
265		    "timeout\n", ioc->name);
266	}
267
268	panic("panic in %s\n", __func__);
269}
270
271#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
272/**
273 * _base_sas_ioc_info - verbose translation of the ioc status
274 * @ioc: per adapter object
275 * @mpi_reply: reply mf payload returned from firmware
276 * @request_hdr: request mf
277 *
278 * Return nothing.
279 */
280static void
281_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
282     MPI2RequestHeader_t *request_hdr)
283{
284	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
285	    MPI2_IOCSTATUS_MASK;
286	char *desc = NULL;
287	u16 frame_sz;
288	char *func_str = NULL;
289
290	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
291	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
292	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
293	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
294		return;
295
296	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
297		return;
298
299	switch (ioc_status) {
300
301/****************************************************************************
302*  Common IOCStatus values for all replies
303****************************************************************************/
304
305	case MPI2_IOCSTATUS_INVALID_FUNCTION:
306		desc = "invalid function";
307		break;
308	case MPI2_IOCSTATUS_BUSY:
309		desc = "busy";
310		break;
311	case MPI2_IOCSTATUS_INVALID_SGL:
312		desc = "invalid sgl";
313		break;
314	case MPI2_IOCSTATUS_INTERNAL_ERROR:
315		desc = "internal error";
316		break;
317	case MPI2_IOCSTATUS_INVALID_VPID:
318		desc = "invalid vpid";
319		break;
320	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
321		desc = "insufficient resources";
322		break;
323	case MPI2_IOCSTATUS_INVALID_FIELD:
324		desc = "invalid field";
325		break;
326	case MPI2_IOCSTATUS_INVALID_STATE:
327		desc = "invalid state";
328		break;
329	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
330		desc = "op state not supported";
331		break;
332
333/****************************************************************************
334*  Config IOCStatus values
335****************************************************************************/
336
337	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
338		desc = "config invalid action";
339		break;
340	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
341		desc = "config invalid type";
342		break;
343	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
344		desc = "config invalid page";
345		break;
346	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
347		desc = "config invalid data";
348		break;
349	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
350		desc = "config no defaults";
351		break;
352	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
353		desc = "config cant commit";
354		break;
355
356/****************************************************************************
357*  SCSI IO Reply
358****************************************************************************/
359
360	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
361	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
362	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
363	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
364	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
365	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
366	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
367	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
368	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
369	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
370	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
371	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
372		break;
373
374/****************************************************************************
375*  For use by SCSI Initiator and SCSI Target end-to-end data protection
376****************************************************************************/
377
378	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
379		desc = "eedp guard error";
380		break;
381	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
382		desc = "eedp ref tag error";
383		break;
384	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
385		desc = "eedp app tag error";
386		break;
387
388/****************************************************************************
389*  SCSI Target values
390****************************************************************************/
391
392	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
393		desc = "target invalid io index";
394		break;
395	case MPI2_IOCSTATUS_TARGET_ABORTED:
396		desc = "target aborted";
397		break;
398	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
399		desc = "target no conn retryable";
400		break;
401	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
402		desc = "target no connection";
403		break;
404	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
405		desc = "target xfer count mismatch";
406		break;
407	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
408		desc = "target data offset error";
409		break;
410	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
411		desc = "target too much write data";
412		break;
413	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
414		desc = "target iu too short";
415		break;
416	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
417		desc = "target ack nak timeout";
418		break;
419	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
420		desc = "target nak received";
421		break;
422
423/****************************************************************************
424*  Serial Attached SCSI values
425****************************************************************************/
426
427	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
428		desc = "smp request failed";
429		break;
430	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
431		desc = "smp data overrun";
432		break;
433
434/****************************************************************************
435*  Diagnostic Buffer Post / Diagnostic Release values
436****************************************************************************/
437
438	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
439		desc = "diagnostic released";
440		break;
441	default:
442		break;
443	}
444
445	if (!desc)
446		return;
447
448	switch (request_hdr->Function) {
449	case MPI2_FUNCTION_CONFIG:
450		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
451		func_str = "config_page";
452		break;
453	case MPI2_FUNCTION_SCSI_TASK_MGMT:
454		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
455		func_str = "task_mgmt";
456		break;
457	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
458		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
459		func_str = "sas_iounit_ctl";
460		break;
461	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
462		frame_sz = sizeof(Mpi2SepRequest_t);
463		func_str = "enclosure";
464		break;
465	case MPI2_FUNCTION_IOC_INIT:
466		frame_sz = sizeof(Mpi2IOCInitRequest_t);
467		func_str = "ioc_init";
468		break;
469	case MPI2_FUNCTION_PORT_ENABLE:
470		frame_sz = sizeof(Mpi2PortEnableRequest_t);
471		func_str = "port_enable";
472		break;
473	case MPI2_FUNCTION_SMP_PASSTHROUGH:
474		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
475		func_str = "smp_passthru";
476		break;
477	default:
478		frame_sz = 32;
479		func_str = "unknown";
480		break;
481	}
482
483	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
484	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
485
486	_debug_dump_mf(request_hdr, frame_sz/4);
487}
488
489/**
490 * _base_display_event_data - verbose translation of firmware asyn events
491 * @ioc: per adapter object
492 * @mpi_reply: reply mf payload returned from firmware
493 *
494 * Return nothing.
495 */
496static void
497_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
498    Mpi2EventNotificationReply_t *mpi_reply)
499{
500	char *desc = NULL;
501	u16 event;
502
503	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
504		return;
505
506	event = le16_to_cpu(mpi_reply->Event);
507
508	switch (event) {
509	case MPI2_EVENT_LOG_DATA:
510		desc = "Log Data";
511		break;
512	case MPI2_EVENT_STATE_CHANGE:
513		desc = "Status Change";
514		break;
515	case MPI2_EVENT_HARD_RESET_RECEIVED:
516		desc = "Hard Reset Received";
517		break;
518	case MPI2_EVENT_EVENT_CHANGE:
519		desc = "Event Change";
520		break;
521	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
522		desc = "Device Status Change";
523		break;
524	case MPI2_EVENT_IR_OPERATION_STATUS:
525		desc = "IR Operation Status";
526		break;
527	case MPI2_EVENT_SAS_DISCOVERY:
528	{
529		Mpi2EventDataSasDiscovery_t *event_data =
530		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
531		printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
532		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
533		    "start" : "stop");
534		if (event_data->DiscoveryStatus)
535			printk("discovery_status(0x%08x)",
536			    le32_to_cpu(event_data->DiscoveryStatus));
537		printk("\n");
538		return;
539	}
540	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
541		desc = "SAS Broadcast Primitive";
542		break;
543	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
544		desc = "SAS Init Device Status Change";
545		break;
546	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
547		desc = "SAS Init Table Overflow";
548		break;
549	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
550		desc = "SAS Topology Change List";
551		break;
552	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
553		desc = "SAS Enclosure Device Status Change";
554		break;
555	case MPI2_EVENT_IR_VOLUME:
556		desc = "IR Volume";
557		break;
558	case MPI2_EVENT_IR_PHYSICAL_DISK:
559		desc = "IR Physical Disk";
560		break;
561	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
562		desc = "IR Configuration Change List";
563		break;
564	case MPI2_EVENT_LOG_ENTRY_ADDED:
565		desc = "Log Entry Added";
566		break;
567	}
568
569	if (!desc)
570		return;
571
572	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
573}
574#endif
575
576/**
577 * _base_sas_log_info - verbose translation of firmware log info
578 * @ioc: per adapter object
579 * @log_info: log info
580 *
581 * Return nothing.
582 */
583static void
584_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
585{
586	union loginfo_type {
587		u32	loginfo;
588		struct {
589			u32	subcode:16;
590			u32	code:8;
591			u32	originator:4;
592			u32	bus_type:4;
593		} dw;
594	};
595	union loginfo_type sas_loginfo;
596	char *originator_str = NULL;
597
598	sas_loginfo.loginfo = log_info;
599	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
600		return;
601
602	/* each nexus loss loginfo */
603	if (log_info == 0x31170000)
604		return;
605
606	/* eat the loginfos associated with task aborts */
607	if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
608	    0x31140000 || log_info == 0x31130000))
609		return;
610
611	switch (sas_loginfo.dw.originator) {
612	case 0:
613		originator_str = "IOP";
614		break;
615	case 1:
616		originator_str = "PL";
617		break;
618	case 2:
619		originator_str = "IR";
620		break;
621	}
622
623	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
624	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
625	     originator_str, sas_loginfo.dw.code,
626	     sas_loginfo.dw.subcode);
627}
628
629/**
630 * _base_display_reply_info -
631 * @ioc: per adapter object
632 * @smid: system request message index
633 * @msix_index: MSIX table index supplied by the OS
634 * @reply: reply message frame(lower 32bit addr)
635 *
636 * Return nothing.
637 */
638static void
639_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
640    u32 reply)
641{
642	MPI2DefaultReply_t *mpi_reply;
643	u16 ioc_status;
644
645	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
646	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
647#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
648	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
649	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
650		_base_sas_ioc_info(ioc , mpi_reply,
651		   mpt2sas_base_get_msg_frame(ioc, smid));
652	}
653#endif
654	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
655		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
656}
657
658/**
659 * mpt2sas_base_done - base internal command completion routine
660 * @ioc: per adapter object
661 * @smid: system request message index
662 * @msix_index: MSIX table index supplied by the OS
663 * @reply: reply message frame(lower 32bit addr)
664 *
665 * Return 1 meaning mf should be freed from _base_interrupt
666 *        0 means the mf is freed from this function.
667 */
668u8
669mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
670    u32 reply)
671{
672	MPI2DefaultReply_t *mpi_reply;
673
674	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
675	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
676		return 1;
677
678	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
679		return 1;
680
681	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
682	if (mpi_reply) {
683		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
684		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
685	}
686	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
687	complete(&ioc->base_cmds.done);
688	return 1;
689}
690
691/**
692 * _base_async_event - main callback handler for firmware asyn events
693 * @ioc: per adapter object
694 * @msix_index: MSIX table index supplied by the OS
695 * @reply: reply message frame(lower 32bit addr)
696 *
697 * Return 1 meaning mf should be freed from _base_interrupt
698 *        0 means the mf is freed from this function.
699 */
700static u8
701_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
702{
703	Mpi2EventNotificationReply_t *mpi_reply;
704	Mpi2EventAckRequest_t *ack_request;
705	u16 smid;
706
707	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
708	if (!mpi_reply)
709		return 1;
710	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
711		return 1;
712#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
713	_base_display_event_data(ioc, mpi_reply);
714#endif
715	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
716		goto out;
717	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
718	if (!smid) {
719		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
720		    ioc->name, __func__);
721		goto out;
722	}
723
724	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
725	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
726	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
727	ack_request->Event = mpi_reply->Event;
728	ack_request->EventContext = mpi_reply->EventContext;
729	ack_request->VF_ID = 0;  /* TODO */
730	ack_request->VP_ID = 0;
731	mpt2sas_base_put_smid_default(ioc, smid);
732
733 out:
734
735	/* scsih callback handler */
736	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
737
738	/* ctl callback handler */
739	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
740
741	return 1;
742}
743
744/**
745 * _base_get_cb_idx - obtain the callback index
746 * @ioc: per adapter object
747 * @smid: system request message index
748 *
749 * Return callback index.
750 */
751static u8
752_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
753{
754	int i;
755	u8 cb_idx = 0xFF;
756
757	if (smid >= ioc->hi_priority_smid) {
758		if (smid < ioc->internal_smid) {
759			i = smid - ioc->hi_priority_smid;
760			cb_idx = ioc->hpr_lookup[i].cb_idx;
761		} else if (smid <= ioc->hba_queue_depth)  {
762			i = smid - ioc->internal_smid;
763			cb_idx = ioc->internal_lookup[i].cb_idx;
764		}
765	} else {
766		i = smid - 1;
767		cb_idx = ioc->scsi_lookup[i].cb_idx;
768	}
769	return cb_idx;
770}
771
772/**
773 * _base_mask_interrupts - disable interrupts
774 * @ioc: per adapter object
775 *
776 * Disabling ResetIRQ, Reply and Doorbell Interrupts
777 *
778 * Return nothing.
779 */
780static void
781_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
782{
783	u32 him_register;
784
785	ioc->mask_interrupts = 1;
786	him_register = readl(&ioc->chip->HostInterruptMask);
787	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
788	writel(him_register, &ioc->chip->HostInterruptMask);
789	readl(&ioc->chip->HostInterruptMask);
790}
791
792/**
793 * _base_unmask_interrupts - enable interrupts
794 * @ioc: per adapter object
795 *
796 * Enabling only Reply Interrupts
797 *
798 * Return nothing.
799 */
800static void
801_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
802{
803	u32 him_register;
804
805	him_register = readl(&ioc->chip->HostInterruptMask);
806	him_register &= ~MPI2_HIM_RIM;
807	writel(him_register, &ioc->chip->HostInterruptMask);
808	ioc->mask_interrupts = 0;
809}
810
811union reply_descriptor {
812	u64 word;
813	struct {
814		u32 low;
815		u32 high;
816	} u;
817};
818
819/**
820 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
821 * @irq: irq number (not used)
822 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
823 * @r: pt_regs pointer (not used)
824 *
825 * Return IRQ_HANDLE if processed, else IRQ_NONE.
826 */
827static irqreturn_t
828_base_interrupt(int irq, void *bus_id)
829{
830	union reply_descriptor rd;
831	u32 completed_cmds;
832	u8 request_desript_type;
833	u16 smid;
834	u8 cb_idx;
835	u32 reply;
836	u8 msix_index;
837	struct MPT2SAS_ADAPTER *ioc = bus_id;
838	Mpi2ReplyDescriptorsUnion_t *rpf;
839	u8 rc;
840
841	if (ioc->mask_interrupts)
842		return IRQ_NONE;
843
844	rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
845	request_desript_type = rpf->Default.ReplyFlags
846	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
847	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
848		return IRQ_NONE;
849
850	completed_cmds = 0;
851	cb_idx = 0xFF;
852	do {
853		rd.word = rpf->Words;
854		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
855			goto out;
856		reply = 0;
857		cb_idx = 0xFF;
858		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
859		msix_index = rpf->Default.MSIxIndex;
860		if (request_desript_type ==
861		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
862			reply = le32_to_cpu
863				(rpf->AddressReply.ReplyFrameAddress);
864			if (reply > ioc->reply_dma_max_address ||
865			    reply < ioc->reply_dma_min_address)
866				reply = 0;
867		} else if (request_desript_type ==
868		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
869			goto next;
870		else if (request_desript_type ==
871		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
872			goto next;
873		if (smid)
874			cb_idx = _base_get_cb_idx(ioc, smid);
875		if (smid && cb_idx != 0xFF) {
876			rc = mpt_callbacks[cb_idx](ioc, smid, msix_index,
877			    reply);
878			if (reply)
879				_base_display_reply_info(ioc, smid, msix_index,
880				    reply);
881			if (rc)
882				mpt2sas_base_free_smid(ioc, smid);
883		}
884		if (!smid)
885			_base_async_event(ioc, msix_index, reply);
886
887		/* reply free queue handling */
888		if (reply) {
889			ioc->reply_free_host_index =
890			    (ioc->reply_free_host_index ==
891			    (ioc->reply_free_queue_depth - 1)) ?
892			    0 : ioc->reply_free_host_index + 1;
893			ioc->reply_free[ioc->reply_free_host_index] =
894			    cpu_to_le32(reply);
895			wmb();
896			writel(ioc->reply_free_host_index,
897			    &ioc->chip->ReplyFreeHostIndex);
898		}
899
900 next:
901
902		rpf->Words = ULLONG_MAX;
903		ioc->reply_post_host_index = (ioc->reply_post_host_index ==
904		    (ioc->reply_post_queue_depth - 1)) ? 0 :
905		    ioc->reply_post_host_index + 1;
906		request_desript_type =
907		    ioc->reply_post_free[ioc->reply_post_host_index].Default.
908		    ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
909		completed_cmds++;
910		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
911			goto out;
912		if (!ioc->reply_post_host_index)
913			rpf = ioc->reply_post_free;
914		else
915			rpf++;
916	} while (1);
917
918 out:
919
920	if (!completed_cmds)
921		return IRQ_NONE;
922
923	wmb();
924	writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
925	return IRQ_HANDLED;
926}
927
928/**
929 * mpt2sas_base_release_callback_handler - clear interupt callback handler
930 * @cb_idx: callback index
931 *
932 * Return nothing.
933 */
934void
935mpt2sas_base_release_callback_handler(u8 cb_idx)
936{
937	mpt_callbacks[cb_idx] = NULL;
938}
939
940/**
941 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
942 * @cb_func: callback function
943 *
944 * Returns cb_func.
945 */
946u8
947mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
948{
949	u8 cb_idx;
950
951	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
952		if (mpt_callbacks[cb_idx] == NULL)
953			break;
954
955	mpt_callbacks[cb_idx] = cb_func;
956	return cb_idx;
957}
958
959/**
960 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
961 *
962 * Return nothing.
963 */
964void
965mpt2sas_base_initialize_callback_handler(void)
966{
967	u8 cb_idx;
968
969	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
970		mpt2sas_base_release_callback_handler(cb_idx);
971}
972
973/**
974 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
975 * @ioc: per adapter object
976 * @paddr: virtual address for SGE
977 *
978 * Create a zero length scatter gather entry to insure the IOCs hardware has
979 * something to use if the target device goes brain dead and tries
980 * to send data even when none is asked for.
981 *
982 * Return nothing.
983 */
984void
985mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
986{
987	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
988	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
989	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
990	    MPI2_SGE_FLAGS_SHIFT);
991	ioc->base_add_sg_single(paddr, flags_length, -1);
992}
993
994/**
995 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
996 * @paddr: virtual address for SGE
997 * @flags_length: SGE flags and data transfer length
998 * @dma_addr: Physical address
999 *
1000 * Return nothing.
1001 */
1002static void
1003_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1004{
1005	Mpi2SGESimple32_t *sgel = paddr;
1006
1007	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1008	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1009	sgel->FlagsLength = cpu_to_le32(flags_length);
1010	sgel->Address = cpu_to_le32(dma_addr);
1011}
1012
1013
1014/**
1015 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1016 * @paddr: virtual address for SGE
1017 * @flags_length: SGE flags and data transfer length
1018 * @dma_addr: Physical address
1019 *
1020 * Return nothing.
1021 */
1022static void
1023_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1024{
1025	Mpi2SGESimple64_t *sgel = paddr;
1026
1027	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1028	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1029	sgel->FlagsLength = cpu_to_le32(flags_length);
1030	sgel->Address = cpu_to_le64(dma_addr);
1031}
1032
1033#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1034
1035/**
1036 * _base_config_dma_addressing - set dma addressing
1037 * @ioc: per adapter object
1038 * @pdev: PCI device struct
1039 *
1040 * Returns 0 for success, non-zero for failure.
1041 */
1042static int
1043_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1044{
1045	struct sysinfo s;
1046	char *desc = NULL;
1047
1048	if (sizeof(dma_addr_t) > 4) {
1049		const uint64_t required_mask =
1050		    dma_get_required_mask(&pdev->dev);
1051		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
1052		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
1053		    DMA_BIT_MASK(64))) {
1054			ioc->base_add_sg_single = &_base_add_sg_single_64;
1055			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1056			desc = "64";
1057			goto out;
1058		}
1059	}
1060
1061	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1062	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1063		ioc->base_add_sg_single = &_base_add_sg_single_32;
1064		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1065		desc = "32";
1066	} else
1067		return -ENODEV;
1068
1069 out:
1070	si_meminfo(&s);
1071	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
1072	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
1073
1074	return 0;
1075}
1076
1077/**
1078 * _base_save_msix_table - backup msix vector table
1079 * @ioc: per adapter object
1080 *
1081 * This address an errata where diag reset clears out the table
1082 */
1083static void
1084_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
1085{
1086	int i;
1087
1088	if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
1089		return;
1090
1091	for (i = 0; i < ioc->msix_vector_count; i++)
1092		ioc->msix_table_backup[i] = ioc->msix_table[i];
1093}
1094
1095/**
1096 * _base_restore_msix_table - this restores the msix vector table
1097 * @ioc: per adapter object
1098 *
1099 */
1100static void
1101_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
1102{
1103	int i;
1104
1105	if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
1106		return;
1107
1108	for (i = 0; i < ioc->msix_vector_count; i++)
1109		ioc->msix_table[i] = ioc->msix_table_backup[i];
1110}
1111
1112/**
1113 * _base_check_enable_msix - checks MSIX capabable.
1114 * @ioc: per adapter object
1115 *
1116 * Check to see if card is capable of MSIX, and set number
1117 * of avaliable msix vectors
1118 */
1119static int
1120_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1121{
1122	int base;
1123	u16 message_control;
1124	u32 msix_table_offset;
1125
1126	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1127	if (!base) {
1128		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1129		    "supported\n", ioc->name));
1130		return -EINVAL;
1131	}
1132
1133	/* get msix vector count */
1134	pci_read_config_word(ioc->pdev, base + 2, &message_control);
1135	ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1136
1137	/* get msix table  */
1138	pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
1139	msix_table_offset &= 0xFFFFFFF8;
1140	ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
1141
1142	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1143	    "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
1144	    ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
1145	return 0;
1146}
1147
1148/**
1149 * _base_disable_msix - disables msix
1150 * @ioc: per adapter object
1151 *
1152 */
1153static void
1154_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1155{
1156	if (ioc->msix_enable) {
1157		pci_disable_msix(ioc->pdev);
1158		kfree(ioc->msix_table_backup);
1159		ioc->msix_table_backup = NULL;
1160		ioc->msix_enable = 0;
1161	}
1162}
1163
1164/**
1165 * _base_enable_msix - enables msix, failback to io_apic
1166 * @ioc: per adapter object
1167 *
1168 */
1169static int
1170_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1171{
1172	struct msix_entry entries;
1173	int r;
1174	u8 try_msix = 0;
1175
1176	if (msix_disable == -1 || msix_disable == 0)
1177		try_msix = 1;
1178
1179	if (!try_msix)
1180		goto try_ioapic;
1181
1182	if (_base_check_enable_msix(ioc) != 0)
1183		goto try_ioapic;
1184
1185	ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
1186	    sizeof(u32), GFP_KERNEL);
1187	if (!ioc->msix_table_backup) {
1188		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
1189		    "msix_table_backup failed!!!\n", ioc->name));
1190		goto try_ioapic;
1191	}
1192
1193	memset(&entries, 0, sizeof(struct msix_entry));
1194	r = pci_enable_msix(ioc->pdev, &entries, 1);
1195	if (r) {
1196		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1197		    "failed (r=%d) !!!\n", ioc->name, r));
1198		goto try_ioapic;
1199	}
1200
1201	r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED,
1202	    ioc->name, ioc);
1203	if (r) {
1204		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate "
1205		    "interrupt %d !!!\n", ioc->name, entries.vector));
1206		pci_disable_msix(ioc->pdev);
1207		goto try_ioapic;
1208	}
1209
1210	ioc->pci_irq = entries.vector;
1211	ioc->msix_enable = 1;
1212	return 0;
1213
1214/* failback to io_apic interrupt routing */
1215 try_ioapic:
1216
1217	r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED,
1218	    ioc->name, ioc);
1219	if (r) {
1220		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1221		    ioc->name, ioc->pdev->irq);
1222		r = -EBUSY;
1223		goto out_fail;
1224	}
1225
1226	ioc->pci_irq = ioc->pdev->irq;
1227	return 0;
1228
1229 out_fail:
1230	return r;
1231}
1232
1233/**
1234 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1235 * @ioc: per adapter object
1236 *
1237 * Returns 0 for success, non-zero for failure.
1238 */
1239int
1240mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1241{
1242	struct pci_dev *pdev = ioc->pdev;
1243	u32 memap_sz;
1244	u32 pio_sz;
1245	int i, r = 0;
1246	u64 pio_chip = 0;
1247	u64 chip_phys = 0;
1248
1249	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1250	    ioc->name, __func__));
1251
1252	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1253	if (pci_enable_device_mem(pdev)) {
1254		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1255		    "failed\n", ioc->name);
1256		return -ENODEV;
1257	}
1258
1259
1260	if (pci_request_selected_regions(pdev, ioc->bars,
1261	    MPT2SAS_DRIVER_NAME)) {
1262		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1263		    "failed\n", ioc->name);
1264		r = -ENODEV;
1265		goto out_fail;
1266	}
1267
1268	/* AER (Advanced Error Reporting) hooks */
1269	pci_enable_pcie_error_reporting(pdev);
1270
1271	pci_set_master(pdev);
1272
1273	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1274		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1275		    ioc->name, pci_name(pdev));
1276		r = -ENODEV;
1277		goto out_fail;
1278	}
1279
1280	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1281		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1282			if (pio_sz)
1283				continue;
1284			pio_chip = (u64)pci_resource_start(pdev, i);
1285			pio_sz = pci_resource_len(pdev, i);
1286		} else {
1287			if (memap_sz)
1288				continue;
1289			/* verify memory resource is valid before using */
1290			if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1291				ioc->chip_phys = pci_resource_start(pdev, i);
1292				chip_phys = (u64)ioc->chip_phys;
1293				memap_sz = pci_resource_len(pdev, i);
1294				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1295				if (ioc->chip == NULL) {
1296					printk(MPT2SAS_ERR_FMT "unable to map "
1297					    "adapter memory!\n", ioc->name);
1298					r = -EINVAL;
1299					goto out_fail;
1300				}
1301			}
1302		}
1303	}
1304
1305	_base_mask_interrupts(ioc);
1306	r = _base_enable_msix(ioc);
1307	if (r)
1308		goto out_fail;
1309
1310	printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1311	    ioc->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1312	    "IO-APIC enabled"), ioc->pci_irq);
1313	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1314	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1315	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1316	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1317
1318	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1319	pci_save_state(pdev);
1320
1321	return 0;
1322
1323 out_fail:
1324	if (ioc->chip_phys)
1325		iounmap(ioc->chip);
1326	ioc->chip_phys = 0;
1327	ioc->pci_irq = -1;
1328	pci_release_selected_regions(ioc->pdev, ioc->bars);
1329	pci_disable_pcie_error_reporting(pdev);
1330	pci_disable_device(pdev);
1331	return r;
1332}
1333
1334/**
1335 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1336 * @ioc: per adapter object
1337 * @smid: system request message index(smid zero is invalid)
1338 *
1339 * Returns virt pointer to message frame.
1340 */
1341void *
1342mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1343{
1344	return (void *)(ioc->request + (smid * ioc->request_sz));
1345}
1346
1347/**
1348 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1349 * @ioc: per adapter object
1350 * @smid: system request message index
1351 *
1352 * Returns virt pointer to sense buffer.
1353 */
1354void *
1355mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1356{
1357	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1358}
1359
1360/**
1361 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1362 * @ioc: per adapter object
1363 * @smid: system request message index
1364 *
1365 * Returns phys pointer to the low 32bit address of the sense buffer.
1366 */
1367__le32
1368mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1369{
1370	return cpu_to_le32(ioc->sense_dma +
1371			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1372}
1373
1374/**
1375 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1376 * @ioc: per adapter object
1377 * @phys_addr: lower 32 physical addr of the reply
1378 *
1379 * Converts 32bit lower physical addr into a virt address.
1380 */
1381void *
1382mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1383{
1384	if (!phys_addr)
1385		return NULL;
1386	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1387}
1388
1389/**
1390 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1391 * @ioc: per adapter object
1392 * @cb_idx: callback index
1393 *
1394 * Returns smid (zero is invalid)
1395 */
1396u16
1397mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1398{
1399	unsigned long flags;
1400	struct request_tracker *request;
1401	u16 smid;
1402
1403	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1404	if (list_empty(&ioc->internal_free_list)) {
1405		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1406		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1407		    ioc->name, __func__);
1408		return 0;
1409	}
1410
1411	request = list_entry(ioc->internal_free_list.next,
1412	    struct request_tracker, tracker_list);
1413	request->cb_idx = cb_idx;
1414	smid = request->smid;
1415	list_del(&request->tracker_list);
1416	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1417	return smid;
1418}
1419
1420/**
1421 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1422 * @ioc: per adapter object
1423 * @cb_idx: callback index
1424 * @scmd: pointer to scsi command object
1425 *
1426 * Returns smid (zero is invalid)
1427 */
1428u16
1429mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1430    struct scsi_cmnd *scmd)
1431{
1432	unsigned long flags;
1433	struct request_tracker *request;
1434	u16 smid;
1435
1436	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1437	if (list_empty(&ioc->free_list)) {
1438		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1439		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1440		    ioc->name, __func__);
1441		return 0;
1442	}
1443
1444	request = list_entry(ioc->free_list.next,
1445	    struct request_tracker, tracker_list);
1446	request->scmd = scmd;
1447	request->cb_idx = cb_idx;
1448	smid = request->smid;
1449	list_del(&request->tracker_list);
1450	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1451	return smid;
1452}
1453
1454/**
1455 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1456 * @ioc: per adapter object
1457 * @cb_idx: callback index
1458 *
1459 * Returns smid (zero is invalid)
1460 */
1461u16
1462mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1463{
1464	unsigned long flags;
1465	struct request_tracker *request;
1466	u16 smid;
1467
1468	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1469	if (list_empty(&ioc->hpr_free_list)) {
1470		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1471		return 0;
1472	}
1473
1474	request = list_entry(ioc->hpr_free_list.next,
1475	    struct request_tracker, tracker_list);
1476	request->cb_idx = cb_idx;
1477	smid = request->smid;
1478	list_del(&request->tracker_list);
1479	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1480	return smid;
1481}
1482
1483
1484/**
1485 * mpt2sas_base_free_smid - put smid back on free_list
1486 * @ioc: per adapter object
1487 * @smid: system request message index
1488 *
1489 * Return nothing.
1490 */
1491void
1492mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1493{
1494	unsigned long flags;
1495	int i;
1496	struct chain_tracker *chain_req, *next;
1497
1498	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1499	if (smid >= ioc->hi_priority_smid) {
1500		if (smid < ioc->internal_smid) {
1501			/* hi-priority */
1502			i = smid - ioc->hi_priority_smid;
1503			ioc->hpr_lookup[i].cb_idx = 0xFF;
1504			list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1505			    &ioc->hpr_free_list);
1506		} else {
1507			/* internal queue */
1508			i = smid - ioc->internal_smid;
1509			ioc->internal_lookup[i].cb_idx = 0xFF;
1510			list_add_tail(&ioc->internal_lookup[i].tracker_list,
1511			    &ioc->internal_free_list);
1512		}
1513		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1514		return;
1515	}
1516
1517	/* scsiio queue */
1518	i = smid - 1;
1519	if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1520		list_for_each_entry_safe(chain_req, next,
1521		    &ioc->scsi_lookup[i].chain_list, tracker_list) {
1522			list_del_init(&chain_req->tracker_list);
1523			list_add_tail(&chain_req->tracker_list,
1524			    &ioc->free_chain_list);
1525		}
1526	}
1527	ioc->scsi_lookup[i].cb_idx = 0xFF;
1528	ioc->scsi_lookup[i].scmd = NULL;
1529	list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1530	    &ioc->free_list);
1531	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1532
1533	/*
1534	 * See _wait_for_commands_to_complete() call with regards to this code.
1535	 */
1536	if (ioc->shost_recovery && ioc->pending_io_count) {
1537		if (ioc->pending_io_count == 1)
1538			wake_up(&ioc->reset_wq);
1539		ioc->pending_io_count--;
1540	}
1541}
1542
1543/**
1544 * _base_writeq - 64 bit write to MMIO
1545 * @ioc: per adapter object
1546 * @b: data payload
1547 * @addr: address in MMIO space
1548 * @writeq_lock: spin lock
1549 *
1550 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1551 * care of 32 bit environment where its not quarenteed to send the entire word
1552 * in one transfer.
1553 */
1554#ifndef writeq
1555static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1556    spinlock_t *writeq_lock)
1557{
1558	unsigned long flags;
1559	__u64 data_out = cpu_to_le64(b);
1560
1561	spin_lock_irqsave(writeq_lock, flags);
1562	writel((u32)(data_out), addr);
1563	writel((u32)(data_out >> 32), (addr + 4));
1564	spin_unlock_irqrestore(writeq_lock, flags);
1565}
1566#else
1567static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1568    spinlock_t *writeq_lock)
1569{
1570	writeq(cpu_to_le64(b), addr);
1571}
1572#endif
1573
1574/**
1575 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1576 * @ioc: per adapter object
1577 * @smid: system request message index
1578 * @handle: device handle
1579 *
1580 * Return nothing.
1581 */
1582void
1583mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1584{
1585	Mpi2RequestDescriptorUnion_t descriptor;
1586	u64 *request = (u64 *)&descriptor;
1587
1588
1589	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1590	descriptor.SCSIIO.MSIxIndex = 0; /* TODO */
1591	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1592	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1593	descriptor.SCSIIO.LMID = 0;
1594	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1595	    &ioc->scsi_lookup_lock);
1596}
1597
1598
1599/**
1600 * mpt2sas_base_put_smid_hi_priority - send Task Managment request to firmware
1601 * @ioc: per adapter object
1602 * @smid: system request message index
1603 *
1604 * Return nothing.
1605 */
1606void
1607mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1608{
1609	Mpi2RequestDescriptorUnion_t descriptor;
1610	u64 *request = (u64 *)&descriptor;
1611
1612	descriptor.HighPriority.RequestFlags =
1613	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1614	descriptor.HighPriority.MSIxIndex = 0; /* TODO */
1615	descriptor.HighPriority.SMID = cpu_to_le16(smid);
1616	descriptor.HighPriority.LMID = 0;
1617	descriptor.HighPriority.Reserved1 = 0;
1618	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1619	    &ioc->scsi_lookup_lock);
1620}
1621
1622/**
1623 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1624 * @ioc: per adapter object
1625 * @smid: system request message index
1626 *
1627 * Return nothing.
1628 */
1629void
1630mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1631{
1632	Mpi2RequestDescriptorUnion_t descriptor;
1633	u64 *request = (u64 *)&descriptor;
1634
1635	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1636	descriptor.Default.MSIxIndex = 0; /* TODO */
1637	descriptor.Default.SMID = cpu_to_le16(smid);
1638	descriptor.Default.LMID = 0;
1639	descriptor.Default.DescriptorTypeDependent = 0;
1640	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1641	    &ioc->scsi_lookup_lock);
1642}
1643
1644/**
1645 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1646 * @ioc: per adapter object
1647 * @smid: system request message index
1648 * @io_index: value used to track the IO
1649 *
1650 * Return nothing.
1651 */
1652void
1653mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1654    u16 io_index)
1655{
1656	Mpi2RequestDescriptorUnion_t descriptor;
1657	u64 *request = (u64 *)&descriptor;
1658
1659	descriptor.SCSITarget.RequestFlags =
1660	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1661	descriptor.SCSITarget.MSIxIndex = 0; /* TODO */
1662	descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1663	descriptor.SCSITarget.LMID = 0;
1664	descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1665	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1666	    &ioc->scsi_lookup_lock);
1667}
1668
1669/**
1670 * _base_display_dell_branding - Disply branding string
1671 * @ioc: per adapter object
1672 *
1673 * Return nothing.
1674 */
1675static void
1676_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1677{
1678	char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
1679
1680	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
1681		return;
1682
1683	memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
1684	switch (ioc->pdev->subsystem_device) {
1685	case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
1686		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
1687		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1688		break;
1689	case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
1690		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
1691		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1692		break;
1693	case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
1694		strncpy(dell_branding,
1695		    MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
1696		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1697		break;
1698	case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
1699		strncpy(dell_branding,
1700		    MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
1701		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1702		break;
1703	case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
1704		strncpy(dell_branding,
1705		    MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
1706		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1707		break;
1708	case MPT2SAS_DELL_PERC_H200_SSDID:
1709		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
1710		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1711		break;
1712	case MPT2SAS_DELL_6GBPS_SAS_SSDID:
1713		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
1714		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1715		break;
1716	default:
1717		sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
1718		break;
1719	}
1720
1721	printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
1722	    " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
1723	    ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
1724	    ioc->pdev->subsystem_device);
1725}
1726
1727/**
1728 * _base_display_ioc_capabilities - Disply IOC's capabilities.
1729 * @ioc: per adapter object
1730 *
1731 * Return nothing.
1732 */
1733static void
1734_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1735{
1736	int i = 0;
1737	char desc[16];
1738	u8 revision;
1739	u32 iounit_pg1_flags;
1740
1741	pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
1742	strncpy(desc, ioc->manu_pg0.ChipName, 16);
1743	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
1744	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
1745	    ioc->name, desc,
1746	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
1747	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
1748	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
1749	   ioc->facts.FWVersion.Word & 0x000000FF,
1750	   revision,
1751	   (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24,
1752	   (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16,
1753	   (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
1754	    ioc->bios_pg3.BiosVersion & 0x000000FF);
1755
1756	_base_display_dell_branding(ioc);
1757
1758	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
1759
1760	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
1761		printk("Initiator");
1762		i++;
1763	}
1764
1765	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
1766		printk("%sTarget", i ? "," : "");
1767		i++;
1768	}
1769
1770	i = 0;
1771	printk("), ");
1772	printk("Capabilities=(");
1773
1774	if (ioc->facts.IOCCapabilities &
1775	    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
1776		printk("Raid");
1777		i++;
1778	}
1779
1780	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
1781		printk("%sTLR", i ? "," : "");
1782		i++;
1783	}
1784
1785	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
1786		printk("%sMulticast", i ? "," : "");
1787		i++;
1788	}
1789
1790	if (ioc->facts.IOCCapabilities &
1791	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
1792		printk("%sBIDI Target", i ? "," : "");
1793		i++;
1794	}
1795
1796	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
1797		printk("%sEEDP", i ? "," : "");
1798		i++;
1799	}
1800
1801	if (ioc->facts.IOCCapabilities &
1802	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
1803		printk("%sSnapshot Buffer", i ? "," : "");
1804		i++;
1805	}
1806
1807	if (ioc->facts.IOCCapabilities &
1808	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
1809		printk("%sDiag Trace Buffer", i ? "," : "");
1810		i++;
1811	}
1812
1813	if (ioc->facts.IOCCapabilities &
1814	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
1815		printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
1816		i++;
1817	}
1818
1819	if (ioc->facts.IOCCapabilities &
1820	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
1821		printk("%sTask Set Full", i ? "," : "");
1822		i++;
1823	}
1824
1825	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
1826	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
1827		printk("%sNCQ", i ? "," : "");
1828		i++;
1829	}
1830
1831	printk(")\n");
1832}
1833
1834/**
1835 * _base_update_missing_delay - change the missing delay timers
1836 * @ioc: per adapter object
1837 * @device_missing_delay: amount of time till device is reported missing
1838 * @io_missing_delay: interval IO is returned when there is a missing device
1839 *
1840 * Return nothing.
1841 *
1842 * Passed on the command line, this function will modify the device missing
1843 * delay, as well as the io missing delay. This should be called at driver
1844 * load time.
1845 */
1846static void
1847_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
1848	u16 device_missing_delay, u8 io_missing_delay)
1849{
1850	u16 dmd, dmd_new, dmd_orignal;
1851	u8 io_missing_delay_original;
1852	u16 sz;
1853	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
1854	Mpi2ConfigReply_t mpi_reply;
1855	u8 num_phys = 0;
1856	u16 ioc_status;
1857
1858	mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
1859	if (!num_phys)
1860		return;
1861
1862	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
1863	    sizeof(Mpi2SasIOUnit1PhyData_t));
1864	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1865	if (!sas_iounit_pg1) {
1866		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1867		    ioc->name, __FILE__, __LINE__, __func__);
1868		goto out;
1869	}
1870	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1871	    sas_iounit_pg1, sz))) {
1872		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1873		    ioc->name, __FILE__, __LINE__, __func__);
1874		goto out;
1875	}
1876	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1877	    MPI2_IOCSTATUS_MASK;
1878	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1879		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1880		    ioc->name, __FILE__, __LINE__, __func__);
1881		goto out;
1882	}
1883
1884	/* device missing delay */
1885	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
1886	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
1887		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
1888	else
1889		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
1890	dmd_orignal = dmd;
1891	if (device_missing_delay > 0x7F) {
1892		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
1893		    device_missing_delay;
1894		dmd = dmd / 16;
1895		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
1896	} else
1897		dmd = device_missing_delay;
1898	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
1899
1900	/* io missing delay */
1901	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
1902	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
1903
1904	if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
1905	    sz)) {
1906		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
1907			dmd_new = (dmd &
1908			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
1909		else
1910			dmd_new =
1911		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
1912		printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
1913		    "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
1914		printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
1915		    "new(%d)\n", ioc->name, io_missing_delay_original,
1916		    io_missing_delay);
1917		ioc->device_missing_delay = dmd_new;
1918		ioc->io_missing_delay = io_missing_delay;
1919	}
1920
1921out:
1922	kfree(sas_iounit_pg1);
1923}
1924
1925/**
1926 * _base_static_config_pages - static start of day config pages
1927 * @ioc: per adapter object
1928 *
1929 * Return nothing.
1930 */
1931static void
1932_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1933{
1934	Mpi2ConfigReply_t mpi_reply;
1935	u32 iounit_pg1_flags;
1936
1937	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
1938	if (ioc->ir_firmware)
1939		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
1940		    &ioc->manu_pg10);
1941	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
1942	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
1943	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
1944	mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
1945	mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
1946	_base_display_ioc_capabilities(ioc);
1947
1948	/*
1949	 * Enable task_set_full handling in iounit_pg1 when the
1950	 * facts capabilities indicate that its supported.
1951	 */
1952	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
1953	if ((ioc->facts.IOCCapabilities &
1954	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
1955		iounit_pg1_flags &=
1956		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1957	else
1958		iounit_pg1_flags |=
1959		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1960	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
1961	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
1962
1963}
1964
1965/**
1966 * _base_release_memory_pools - release memory
1967 * @ioc: per adapter object
1968 *
1969 * Free memory allocated from _base_allocate_memory_pools.
1970 *
1971 * Return nothing.
1972 */
1973static void
1974_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1975{
1976	int i;
1977
1978	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1979	    __func__));
1980
1981	if (ioc->request) {
1982		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
1983		    ioc->request,  ioc->request_dma);
1984		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
1985		    ": free\n", ioc->name, ioc->request));
1986		ioc->request = NULL;
1987	}
1988
1989	if (ioc->sense) {
1990		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
1991		if (ioc->sense_dma_pool)
1992			pci_pool_destroy(ioc->sense_dma_pool);
1993		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
1994		    ": free\n", ioc->name, ioc->sense));
1995		ioc->sense = NULL;
1996	}
1997
1998	if (ioc->reply) {
1999		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2000		if (ioc->reply_dma_pool)
2001			pci_pool_destroy(ioc->reply_dma_pool);
2002		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
2003		     ": free\n", ioc->name, ioc->reply));
2004		ioc->reply = NULL;
2005	}
2006
2007	if (ioc->reply_free) {
2008		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2009		    ioc->reply_free_dma);
2010		if (ioc->reply_free_dma_pool)
2011			pci_pool_destroy(ioc->reply_free_dma_pool);
2012		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
2013		    "(0x%p): free\n", ioc->name, ioc->reply_free));
2014		ioc->reply_free = NULL;
2015	}
2016
2017	if (ioc->reply_post_free) {
2018		pci_pool_free(ioc->reply_post_free_dma_pool,
2019		    ioc->reply_post_free, ioc->reply_post_free_dma);
2020		if (ioc->reply_post_free_dma_pool)
2021			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2022		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2023		    "reply_post_free_pool(0x%p): free\n", ioc->name,
2024		    ioc->reply_post_free));
2025		ioc->reply_post_free = NULL;
2026	}
2027
2028	if (ioc->config_page) {
2029		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2030		    "config_page(0x%p): free\n", ioc->name,
2031		    ioc->config_page));
2032		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2033		    ioc->config_page, ioc->config_page_dma);
2034	}
2035
2036	if (ioc->scsi_lookup) {
2037		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2038		ioc->scsi_lookup = NULL;
2039	}
2040	kfree(ioc->hpr_lookup);
2041	kfree(ioc->internal_lookup);
2042	if (ioc->chain_lookup) {
2043		for (i = 0; i < ioc->chain_depth; i++) {
2044			if (ioc->chain_lookup[i].chain_buffer)
2045				pci_pool_free(ioc->chain_dma_pool,
2046				    ioc->chain_lookup[i].chain_buffer,
2047				    ioc->chain_lookup[i].chain_buffer_dma);
2048		}
2049		if (ioc->chain_dma_pool)
2050			pci_pool_destroy(ioc->chain_dma_pool);
2051	}
2052	if (ioc->chain_lookup) {
2053		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2054		ioc->chain_lookup = NULL;
2055	}
2056}
2057
2058
2059/**
2060 * _base_allocate_memory_pools - allocate start of day memory pools
2061 * @ioc: per adapter object
2062 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2063 *
2064 * Returns 0 success, anything else error
2065 */
2066static int
2067_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
2068{
2069	Mpi2IOCFactsReply_t *facts;
2070	u32 queue_size, queue_diff;
2071	u16 max_sge_elements;
2072	u16 num_of_reply_frames;
2073	u16 chains_needed_per_io;
2074	u32 sz, total_sz;
2075	u32 retry_sz;
2076	u16 max_request_credit;
2077	int i;
2078
2079	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2080	    __func__));
2081
2082	retry_sz = 0;
2083	facts = &ioc->facts;
2084
2085	/* command line tunables  for max sgl entries */
2086	if (max_sgl_entries != -1) {
2087		ioc->shost->sg_tablesize = (max_sgl_entries <
2088		    MPT2SAS_SG_DEPTH) ? max_sgl_entries :
2089		    MPT2SAS_SG_DEPTH;
2090	} else {
2091		ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
2092	}
2093
2094	/* command line tunables  for max controller queue depth */
2095	if (max_queue_depth != -1)
2096		max_request_credit = (max_queue_depth < facts->RequestCredit)
2097		    ? max_queue_depth : facts->RequestCredit;
2098	else
2099		max_request_credit = facts->RequestCredit;
2100
2101	ioc->hba_queue_depth = max_request_credit;
2102	ioc->hi_priority_depth = facts->HighPriorityCredit;
2103	ioc->internal_depth = ioc->hi_priority_depth + 5;
2104
2105	/* request frame size */
2106	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2107
2108	/* reply frame size */
2109	ioc->reply_sz = facts->ReplyFrameSize * 4;
2110
2111 retry_allocation:
2112	total_sz = 0;
2113	/* calculate number of sg elements left over in the 1st frame */
2114	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2115	    sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
2116	ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
2117
2118	/* now do the same for a chain buffer */
2119	max_sge_elements = ioc->request_sz - ioc->sge_size;
2120	ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
2121
2122	ioc->chain_offset_value_for_main_message =
2123	    ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
2124	     (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
2125
2126	/*
2127	 *  MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2128	 */
2129	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2130	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2131	    + 1;
2132	if (chains_needed_per_io > facts->MaxChainDepth) {
2133		chains_needed_per_io = facts->MaxChainDepth;
2134		ioc->shost->sg_tablesize = min_t(u16,
2135		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2136		* chains_needed_per_io), ioc->shost->sg_tablesize);
2137	}
2138	ioc->chains_needed_per_io = chains_needed_per_io;
2139
2140	/* reply free queue sizing - taking into account for events */
2141	num_of_reply_frames = ioc->hba_queue_depth + 32;
2142
2143	/* number of replies frames can't be a multiple of 16 */
2144	/* decrease number of reply frames by 1 */
2145	if (!(num_of_reply_frames % 16))
2146		num_of_reply_frames--;
2147
2148	/* calculate number of reply free queue entries
2149	 *  (must be multiple of 16)
2150	 */
2151
2152	/* (we know reply_free_queue_depth is not a multiple of 16) */
2153	queue_size = num_of_reply_frames;
2154	queue_size += 16 - (queue_size % 16);
2155	ioc->reply_free_queue_depth = queue_size;
2156
2157	/* reply descriptor post queue sizing */
2158	/* this size should be the number of request frames + number of reply
2159	 * frames
2160	 */
2161
2162	queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
2163	/* round up to 16 byte boundary */
2164	if (queue_size % 16)
2165		queue_size += 16 - (queue_size % 16);
2166
2167	/* check against IOC maximum reply post queue depth */
2168	if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
2169		queue_diff = queue_size -
2170		    facts->MaxReplyDescriptorPostQueueDepth;
2171
2172		/* round queue_diff up to multiple of 16 */
2173		if (queue_diff % 16)
2174			queue_diff += 16 - (queue_diff % 16);
2175
2176		/* adjust hba_queue_depth, reply_free_queue_depth,
2177		 * and queue_size
2178		 */
2179		ioc->hba_queue_depth -= (queue_diff / 2);
2180		ioc->reply_free_queue_depth -= (queue_diff / 2);
2181		queue_size = facts->MaxReplyDescriptorPostQueueDepth;
2182	}
2183	ioc->reply_post_queue_depth = queue_size;
2184
2185	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2186	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2187	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2188	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2189	    ioc->chains_needed_per_io));
2190
2191	ioc->scsiio_depth = ioc->hba_queue_depth -
2192	    ioc->hi_priority_depth - ioc->internal_depth;
2193
2194	/* set the scsi host can_queue depth
2195	 * with some internal commands that could be outstanding
2196	 */
2197	ioc->shost->can_queue = ioc->scsiio_depth - (2);
2198	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2199	    "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2200
2201	/* contiguous pool for request and chains, 16 byte align, one extra "
2202	 * "frame for smid=0
2203	 */
2204	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2205	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2206
2207	/* hi-priority queue */
2208	sz += (ioc->hi_priority_depth * ioc->request_sz);
2209
2210	/* internal queue */
2211	sz += (ioc->internal_depth * ioc->request_sz);
2212
2213	ioc->request_dma_sz = sz;
2214	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2215	if (!ioc->request) {
2216		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2217		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2218		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2219		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2220		if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
2221			goto out;
2222		retry_sz += 64;
2223		ioc->hba_queue_depth = max_request_credit - retry_sz;
2224		goto retry_allocation;
2225	}
2226
2227	if (retry_sz)
2228		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2229		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2230		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2231		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2232
2233
2234	/* hi-priority queue */
2235	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2236	    ioc->request_sz);
2237	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2238	    ioc->request_sz);
2239
2240	/* internal queue */
2241	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2242	    ioc->request_sz);
2243	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2244	    ioc->request_sz);
2245
2246
2247	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2248	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2249	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2250	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2251	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2252	    ioc->name, (unsigned long long) ioc->request_dma));
2253	total_sz += sz;
2254
2255	sz = ioc->scsiio_depth * sizeof(struct request_tracker);
2256	ioc->scsi_lookup_pages = get_order(sz);
2257	ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
2258	    GFP_KERNEL, ioc->scsi_lookup_pages);
2259	if (!ioc->scsi_lookup) {
2260		printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
2261		    "sz(%d)\n", ioc->name, (int)sz);
2262		goto out;
2263	}
2264
2265	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
2266	    "depth(%d)\n", ioc->name, ioc->request,
2267	    ioc->scsiio_depth));
2268
2269	/* loop till the allocation succeeds */
2270	do {
2271		sz = ioc->chain_depth * sizeof(struct chain_tracker);
2272		ioc->chain_pages = get_order(sz);
2273		ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2274		    GFP_KERNEL, ioc->chain_pages);
2275		if (ioc->chain_lookup == NULL)
2276			ioc->chain_depth -= 100;
2277	} while (ioc->chain_lookup == NULL);
2278	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2279	    ioc->request_sz, 16, 0);
2280	if (!ioc->chain_dma_pool) {
2281		printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
2282		    "failed\n", ioc->name);
2283		goto out;
2284	}
2285	for (i = 0; i < ioc->chain_depth; i++) {
2286		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2287		    ioc->chain_dma_pool , GFP_KERNEL,
2288		    &ioc->chain_lookup[i].chain_buffer_dma);
2289		if (!ioc->chain_lookup[i].chain_buffer) {
2290			ioc->chain_depth = i;
2291			goto chain_done;
2292		}
2293		total_sz += ioc->request_sz;
2294	}
2295chain_done:
2296	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
2297	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2298	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2299	    ioc->request_sz))/1024));
2300
2301	/* initialize hi-priority queue smid's */
2302	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2303	    sizeof(struct request_tracker), GFP_KERNEL);
2304	if (!ioc->hpr_lookup) {
2305		printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2306		    ioc->name);
2307		goto out;
2308	}
2309	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2310	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2311	    "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2312	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2313
2314	/* initialize internal queue smid's */
2315	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2316	    sizeof(struct request_tracker), GFP_KERNEL);
2317	if (!ioc->internal_lookup) {
2318		printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2319		    ioc->name);
2320		goto out;
2321	}
2322	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2323	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2324	    "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2325	     ioc->internal_depth, ioc->internal_smid));
2326
2327	/* sense buffers, 4 byte align */
2328	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2329	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2330	    0);
2331	if (!ioc->sense_dma_pool) {
2332		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
2333		    ioc->name);
2334		goto out;
2335	}
2336	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2337	    &ioc->sense_dma);
2338	if (!ioc->sense) {
2339		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
2340		    ioc->name);
2341		goto out;
2342	}
2343	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2344	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2345	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2346	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2347	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
2348	    ioc->name, (unsigned long long)ioc->sense_dma));
2349	total_sz += sz;
2350
2351	/* reply pool, 4 byte align */
2352	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2353	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2354	    0);
2355	if (!ioc->reply_dma_pool) {
2356		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
2357		    ioc->name);
2358		goto out;
2359	}
2360	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2361	    &ioc->reply_dma);
2362	if (!ioc->reply) {
2363		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
2364		    ioc->name);
2365		goto out;
2366	}
2367	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2368	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2369	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2370	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2371	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2372	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
2373	    ioc->name, (unsigned long long)ioc->reply_dma));
2374	total_sz += sz;
2375
2376	/* reply free queue, 16 byte align */
2377	sz = ioc->reply_free_queue_depth * 4;
2378	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2379	    ioc->pdev, sz, 16, 0);
2380	if (!ioc->reply_free_dma_pool) {
2381		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
2382		    "failed\n", ioc->name);
2383		goto out;
2384	}
2385	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2386	    &ioc->reply_free_dma);
2387	if (!ioc->reply_free) {
2388		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
2389		    "failed\n", ioc->name);
2390		goto out;
2391	}
2392	memset(ioc->reply_free, 0, sz);
2393	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
2394	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2395	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2396	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
2397	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2398	total_sz += sz;
2399
2400	/* reply post queue, 16 byte align */
2401	sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t);
2402	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2403	    ioc->pdev, sz, 16, 0);
2404	if (!ioc->reply_post_free_dma_pool) {
2405		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
2406		    "failed\n", ioc->name);
2407		goto out;
2408	}
2409	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2410	    GFP_KERNEL, &ioc->reply_post_free_dma);
2411	if (!ioc->reply_post_free) {
2412		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
2413		    "failed\n", ioc->name);
2414		goto out;
2415	}
2416	memset(ioc->reply_post_free, 0, sz);
2417	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
2418	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2419	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2420	    sz/1024));
2421	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
2422	    "(0x%llx)\n", ioc->name, (unsigned long long)
2423	    ioc->reply_post_free_dma));
2424	total_sz += sz;
2425
2426	ioc->config_page_sz = 512;
2427	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2428	    ioc->config_page_sz, &ioc->config_page_dma);
2429	if (!ioc->config_page) {
2430		printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
2431		    "failed\n", ioc->name);
2432		goto out;
2433	}
2434	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
2435	    "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
2436	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
2437	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
2438	total_sz += ioc->config_page_sz;
2439
2440	printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
2441	    ioc->name, total_sz/1024);
2442	printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
2443	    "Max Controller Queue Depth(%d)\n",
2444	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2445	printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
2446	    ioc->name, ioc->shost->sg_tablesize);
2447	return 0;
2448
2449 out:
2450	return -ENOMEM;
2451}
2452
2453
2454/**
2455 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
2456 * @ioc: Pointer to MPT_ADAPTER structure
2457 * @cooked: Request raw or cooked IOC state
2458 *
2459 * Returns all IOC Doorbell register bits if cooked==0, else just the
2460 * Doorbell bits in MPI_IOC_STATE_MASK.
2461 */
2462u32
2463mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
2464{
2465	u32 s, sc;
2466
2467	s = readl(&ioc->chip->Doorbell);
2468	sc = s & MPI2_IOC_STATE_MASK;
2469	return cooked ? sc : s;
2470}
2471
2472/**
2473 * _base_wait_on_iocstate - waiting on a particular ioc state
2474 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2475 * @timeout: timeout in second
2476 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2477 *
2478 * Returns 0 for success, non-zero for failure.
2479 */
2480static int
2481_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2482    int sleep_flag)
2483{
2484	u32 count, cntdn;
2485	u32 current_state;
2486
2487	count = 0;
2488	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2489	do {
2490		current_state = mpt2sas_base_get_iocstate(ioc, 1);
2491		if (current_state == ioc_state)
2492			return 0;
2493		if (count && current_state == MPI2_IOC_STATE_FAULT)
2494			break;
2495		if (sleep_flag == CAN_SLEEP)
2496			msleep(1);
2497		else
2498			udelay(500);
2499		count++;
2500	} while (--cntdn);
2501
2502	return current_state;
2503}
2504
2505/**
2506 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2507 * a write to the doorbell)
2508 * @ioc: per adapter object
2509 * @timeout: timeout in second
2510 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2511 *
2512 * Returns 0 for success, non-zero for failure.
2513 *
2514 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2515 */
2516static int
2517_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2518    int sleep_flag)
2519{
2520	u32 cntdn, count;
2521	u32 int_status;
2522
2523	count = 0;
2524	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2525	do {
2526		int_status = readl(&ioc->chip->HostInterruptStatus);
2527		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2528			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2529			    "successfull count(%d), timeout(%d)\n", ioc->name,
2530			    __func__, count, timeout));
2531			return 0;
2532		}
2533		if (sleep_flag == CAN_SLEEP)
2534			msleep(1);
2535		else
2536			udelay(500);
2537		count++;
2538	} while (--cntdn);
2539
2540	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2541	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2542	return -EFAULT;
2543}
2544
2545/**
2546 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2547 * @ioc: per adapter object
2548 * @timeout: timeout in second
2549 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2550 *
2551 * Returns 0 for success, non-zero for failure.
2552 *
2553 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
2554 * doorbell.
2555 */
2556static int
2557_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2558    int sleep_flag)
2559{
2560	u32 cntdn, count;
2561	u32 int_status;
2562	u32 doorbell;
2563
2564	count = 0;
2565	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2566	do {
2567		int_status = readl(&ioc->chip->HostInterruptStatus);
2568		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2569			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2570			    "successfull count(%d), timeout(%d)\n", ioc->name,
2571			    __func__, count, timeout));
2572			return 0;
2573		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2574			doorbell = readl(&ioc->chip->Doorbell);
2575			if ((doorbell & MPI2_IOC_STATE_MASK) ==
2576			    MPI2_IOC_STATE_FAULT) {
2577				mpt2sas_base_fault_info(ioc , doorbell);
2578				return -EFAULT;
2579			}
2580		} else if (int_status == 0xFFFFFFFF)
2581			goto out;
2582
2583		if (sleep_flag == CAN_SLEEP)
2584			msleep(1);
2585		else
2586			udelay(500);
2587		count++;
2588	} while (--cntdn);
2589
2590 out:
2591	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2592	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2593	return -EFAULT;
2594}
2595
2596/**
2597 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
2598 * @ioc: per adapter object
2599 * @timeout: timeout in second
2600 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2601 *
2602 * Returns 0 for success, non-zero for failure.
2603 *
2604 */
2605static int
2606_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2607    int sleep_flag)
2608{
2609	u32 cntdn, count;
2610	u32 doorbell_reg;
2611
2612	count = 0;
2613	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2614	do {
2615		doorbell_reg = readl(&ioc->chip->Doorbell);
2616		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2617			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2618			    "successfull count(%d), timeout(%d)\n", ioc->name,
2619			    __func__, count, timeout));
2620			return 0;
2621		}
2622		if (sleep_flag == CAN_SLEEP)
2623			msleep(1);
2624		else
2625			udelay(500);
2626		count++;
2627	} while (--cntdn);
2628
2629	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2630	    "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
2631	return -EFAULT;
2632}
2633
2634/**
2635 * _base_send_ioc_reset - send doorbell reset
2636 * @ioc: per adapter object
2637 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
2638 * @timeout: timeout in second
2639 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2640 *
2641 * Returns 0 for success, non-zero for failure.
2642 */
2643static int
2644_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
2645    int sleep_flag)
2646{
2647	u32 ioc_state;
2648	int r = 0;
2649
2650	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
2651		printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
2652		    ioc->name, __func__);
2653		return -EFAULT;
2654	}
2655
2656	if (!(ioc->facts.IOCCapabilities &
2657	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
2658		return -EFAULT;
2659
2660	printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
2661
2662	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
2663	    &ioc->chip->Doorbell);
2664	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
2665		r = -EFAULT;
2666		goto out;
2667	}
2668	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
2669	    timeout, sleep_flag);
2670	if (ioc_state) {
2671		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
2672		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
2673		r = -EFAULT;
2674		goto out;
2675	}
2676 out:
2677	printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
2678	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
2679	return r;
2680}
2681
2682/**
2683 * _base_handshake_req_reply_wait - send request thru doorbell interface
2684 * @ioc: per adapter object
2685 * @request_bytes: request length
2686 * @request: pointer having request payload
2687 * @reply_bytes: reply length
2688 * @reply: pointer to reply payload
2689 * @timeout: timeout in second
2690 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2691 *
2692 * Returns 0 for success, non-zero for failure.
2693 */
2694static int
2695_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
2696    u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
2697{
2698	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
2699	int i;
2700	u8 failed;
2701	u16 dummy;
2702	u32 *mfp;
2703
2704	/* make sure doorbell is not in use */
2705	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
2706		printk(MPT2SAS_ERR_FMT "doorbell is in use "
2707		    " (line=%d)\n", ioc->name, __LINE__);
2708		return -EFAULT;
2709	}
2710
2711	/* clear pending doorbell interrupts from previous state changes */
2712	if (readl(&ioc->chip->HostInterruptStatus) &
2713	    MPI2_HIS_IOC2SYS_DB_STATUS)
2714		writel(0, &ioc->chip->HostInterruptStatus);
2715
2716	/* send message to ioc */
2717	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
2718	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
2719	    &ioc->chip->Doorbell);
2720
2721	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
2722		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2723		   "int failed (line=%d)\n", ioc->name, __LINE__);
2724		return -EFAULT;
2725	}
2726	writel(0, &ioc->chip->HostInterruptStatus);
2727
2728	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
2729		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2730		    "ack failed (line=%d)\n", ioc->name, __LINE__);
2731		return -EFAULT;
2732	}
2733
2734	/* send message 32-bits at a time */
2735	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
2736		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
2737		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
2738			failed = 1;
2739	}
2740
2741	if (failed) {
2742		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2743		    "sending request failed (line=%d)\n", ioc->name, __LINE__);
2744		return -EFAULT;
2745	}
2746
2747	/* now wait for the reply */
2748	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
2749		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2750		   "int failed (line=%d)\n", ioc->name, __LINE__);
2751		return -EFAULT;
2752	}
2753
2754	/* read the first two 16-bits, it gives the total length of the reply */
2755	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2756	    & MPI2_DOORBELL_DATA_MASK);
2757	writel(0, &ioc->chip->HostInterruptStatus);
2758	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
2759		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2760		   "int failed (line=%d)\n", ioc->name, __LINE__);
2761		return -EFAULT;
2762	}
2763	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2764	    & MPI2_DOORBELL_DATA_MASK);
2765	writel(0, &ioc->chip->HostInterruptStatus);
2766
2767	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
2768		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
2769			printk(MPT2SAS_ERR_FMT "doorbell "
2770			    "handshake int failed (line=%d)\n", ioc->name,
2771			    __LINE__);
2772			return -EFAULT;
2773		}
2774		if (i >=  reply_bytes/2) /* overflow case */
2775			dummy = readl(&ioc->chip->Doorbell);
2776		else
2777			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2778			    & MPI2_DOORBELL_DATA_MASK);
2779		writel(0, &ioc->chip->HostInterruptStatus);
2780	}
2781
2782	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
2783	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
2784		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
2785		    " (line=%d)\n", ioc->name, __LINE__));
2786	}
2787	writel(0, &ioc->chip->HostInterruptStatus);
2788
2789	if (ioc->logging_level & MPT_DEBUG_INIT) {
2790		mfp = (u32 *)reply;
2791		printk(KERN_INFO "\toffset:data\n");
2792		for (i = 0; i < reply_bytes/4; i++)
2793			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
2794			    le32_to_cpu(mfp[i]));
2795	}
2796	return 0;
2797}
2798
2799/**
2800 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
2801 * @ioc: per adapter object
2802 * @mpi_reply: the reply payload from FW
2803 * @mpi_request: the request payload sent to FW
2804 *
2805 * The SAS IO Unit Control Request message allows the host to perform low-level
2806 * operations, such as resets on the PHYs of the IO Unit, also allows the host
2807 * to obtain the IOC assigned device handles for a device if it has other
2808 * identifying information about the device, in addition allows the host to
2809 * remove IOC resources associated with the device.
2810 *
2811 * Returns 0 for success, non-zero for failure.
2812 */
2813int
2814mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
2815    Mpi2SasIoUnitControlReply_t *mpi_reply,
2816    Mpi2SasIoUnitControlRequest_t *mpi_request)
2817{
2818	u16 smid;
2819	u32 ioc_state;
2820	unsigned long timeleft;
2821	u8 issue_reset;
2822	int rc;
2823	void *request;
2824	u16 wait_state_count;
2825
2826	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2827	    __func__));
2828
2829	mutex_lock(&ioc->base_cmds.mutex);
2830
2831	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
2832		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
2833		    ioc->name, __func__);
2834		rc = -EAGAIN;
2835		goto out;
2836	}
2837
2838	wait_state_count = 0;
2839	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2840	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2841		if (wait_state_count++ == 10) {
2842			printk(MPT2SAS_ERR_FMT
2843			    "%s: failed due to ioc not operational\n",
2844			    ioc->name, __func__);
2845			rc = -EFAULT;
2846			goto out;
2847		}
2848		ssleep(1);
2849		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2850		printk(MPT2SAS_INFO_FMT "%s: waiting for "
2851		    "operational state(count=%d)\n", ioc->name,
2852		    __func__, wait_state_count);
2853	}
2854
2855	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2856	if (!smid) {
2857		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2858		    ioc->name, __func__);
2859		rc = -EAGAIN;
2860		goto out;
2861	}
2862
2863	rc = 0;
2864	ioc->base_cmds.status = MPT2_CMD_PENDING;
2865	request = mpt2sas_base_get_msg_frame(ioc, smid);
2866	ioc->base_cmds.smid = smid;
2867	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
2868	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
2869	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
2870		ioc->ioc_link_reset_in_progress = 1;
2871	mpt2sas_base_put_smid_default(ioc, smid);
2872	init_completion(&ioc->base_cmds.done);
2873	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2874	    msecs_to_jiffies(10000));
2875	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
2876	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
2877	    ioc->ioc_link_reset_in_progress)
2878		ioc->ioc_link_reset_in_progress = 0;
2879	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2880		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2881		    ioc->name, __func__);
2882		_debug_dump_mf(mpi_request,
2883		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
2884		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
2885			issue_reset = 1;
2886		goto issue_host_reset;
2887	}
2888	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
2889		memcpy(mpi_reply, ioc->base_cmds.reply,
2890		    sizeof(Mpi2SasIoUnitControlReply_t));
2891	else
2892		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
2893	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2894	goto out;
2895
2896 issue_host_reset:
2897	if (issue_reset)
2898		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2899		    FORCE_BIG_HAMMER);
2900	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2901	rc = -EFAULT;
2902 out:
2903	mutex_unlock(&ioc->base_cmds.mutex);
2904	return rc;
2905}
2906
2907
2908/**
2909 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
2910 * @ioc: per adapter object
2911 * @mpi_reply: the reply payload from FW
2912 * @mpi_request: the request payload sent to FW
2913 *
2914 * The SCSI Enclosure Processor request message causes the IOC to
2915 * communicate with SES devices to control LED status signals.
2916 *
2917 * Returns 0 for success, non-zero for failure.
2918 */
2919int
2920mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
2921    Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
2922{
2923	u16 smid;
2924	u32 ioc_state;
2925	unsigned long timeleft;
2926	u8 issue_reset;
2927	int rc;
2928	void *request;
2929	u16 wait_state_count;
2930
2931	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2932	    __func__));
2933
2934	mutex_lock(&ioc->base_cmds.mutex);
2935
2936	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
2937		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
2938		    ioc->name, __func__);
2939		rc = -EAGAIN;
2940		goto out;
2941	}
2942
2943	wait_state_count = 0;
2944	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2945	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2946		if (wait_state_count++ == 10) {
2947			printk(MPT2SAS_ERR_FMT
2948			    "%s: failed due to ioc not operational\n",
2949			    ioc->name, __func__);
2950			rc = -EFAULT;
2951			goto out;
2952		}
2953		ssleep(1);
2954		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2955		printk(MPT2SAS_INFO_FMT "%s: waiting for "
2956		    "operational state(count=%d)\n", ioc->name,
2957		    __func__, wait_state_count);
2958	}
2959
2960	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2961	if (!smid) {
2962		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2963		    ioc->name, __func__);
2964		rc = -EAGAIN;
2965		goto out;
2966	}
2967
2968	rc = 0;
2969	ioc->base_cmds.status = MPT2_CMD_PENDING;
2970	request = mpt2sas_base_get_msg_frame(ioc, smid);
2971	ioc->base_cmds.smid = smid;
2972	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
2973	mpt2sas_base_put_smid_default(ioc, smid);
2974	init_completion(&ioc->base_cmds.done);
2975	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2976	    msecs_to_jiffies(10000));
2977	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2978		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2979		    ioc->name, __func__);
2980		_debug_dump_mf(mpi_request,
2981		    sizeof(Mpi2SepRequest_t)/4);
2982		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
2983			issue_reset = 1;
2984		goto issue_host_reset;
2985	}
2986	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
2987		memcpy(mpi_reply, ioc->base_cmds.reply,
2988		    sizeof(Mpi2SepReply_t));
2989	else
2990		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
2991	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2992	goto out;
2993
2994 issue_host_reset:
2995	if (issue_reset)
2996		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2997		    FORCE_BIG_HAMMER);
2998	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2999	rc = -EFAULT;
3000 out:
3001	mutex_unlock(&ioc->base_cmds.mutex);
3002	return rc;
3003}
3004
3005/**
3006 * _base_get_port_facts - obtain port facts reply and save in ioc
3007 * @ioc: per adapter object
3008 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3009 *
3010 * Returns 0 for success, non-zero for failure.
3011 */
3012static int
3013_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3014{
3015	Mpi2PortFactsRequest_t mpi_request;
3016	Mpi2PortFactsReply_t mpi_reply, *pfacts;
3017	int mpi_reply_sz, mpi_request_sz, r;
3018
3019	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3020	    __func__));
3021
3022	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3023	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3024	memset(&mpi_request, 0, mpi_request_sz);
3025	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3026	mpi_request.PortNumber = port;
3027	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3028	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3029
3030	if (r != 0) {
3031		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3032		    ioc->name, __func__, r);
3033		return r;
3034	}
3035
3036	pfacts = &ioc->pfacts[port];
3037	memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
3038	pfacts->PortNumber = mpi_reply.PortNumber;
3039	pfacts->VP_ID = mpi_reply.VP_ID;
3040	pfacts->VF_ID = mpi_reply.VF_ID;
3041	pfacts->MaxPostedCmdBuffers =
3042	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3043
3044	return 0;
3045}
3046
3047/**
3048 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3049 * @ioc: per adapter object
3050 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3051 *
3052 * Returns 0 for success, non-zero for failure.
3053 */
3054static int
3055_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3056{
3057	Mpi2IOCFactsRequest_t mpi_request;
3058	Mpi2IOCFactsReply_t mpi_reply, *facts;
3059	int mpi_reply_sz, mpi_request_sz, r;
3060
3061	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3062	    __func__));
3063
3064	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3065	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3066	memset(&mpi_request, 0, mpi_request_sz);
3067	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3068	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3069	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3070
3071	if (r != 0) {
3072		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3073		    ioc->name, __func__, r);
3074		return r;
3075	}
3076
3077	facts = &ioc->facts;
3078	memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
3079	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3080	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3081	facts->VP_ID = mpi_reply.VP_ID;
3082	facts->VF_ID = mpi_reply.VF_ID;
3083	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3084	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3085	facts->WhoInit = mpi_reply.WhoInit;
3086	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3087	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3088	facts->MaxReplyDescriptorPostQueueDepth =
3089	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3090	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3091	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3092	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3093		ioc->ir_firmware = 1;
3094	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3095	facts->IOCRequestFrameSize =
3096	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3097	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3098	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3099	ioc->shost->max_id = -1;
3100	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3101	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3102	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3103	facts->HighPriorityCredit =
3104	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3105	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3106	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3107
3108	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
3109	    "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
3110	    facts->MaxChainDepth));
3111	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
3112	    "reply frame size(%d)\n", ioc->name,
3113	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3114	return 0;
3115}
3116
3117/**
3118 * _base_send_ioc_init - send ioc_init to firmware
3119 * @ioc: per adapter object
3120 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3121 *
3122 * Returns 0 for success, non-zero for failure.
3123 */
3124static int
3125_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3126{
3127	Mpi2IOCInitRequest_t mpi_request;
3128	Mpi2IOCInitReply_t mpi_reply;
3129	int r;
3130	struct timeval current_time;
3131	u16 ioc_status;
3132
3133	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3134	    __func__));
3135
3136	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3137	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3138	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3139	mpi_request.VF_ID = 0; /* TODO */
3140	mpi_request.VP_ID = 0;
3141	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3142	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3143
3144	/* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was
3145	 * removed and made reserved.  For those with older firmware will need
3146	 * this fix. It was decided that the Reply and Request frame sizes are
3147	 * the same.
3148	 */
3149	if ((ioc->facts.HeaderVersion >> 8) < 0xA) {
3150		mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz);
3151/*		mpi_request.SystemReplyFrameSize =
3152 *		 cpu_to_le16(ioc->reply_sz);
3153 */
3154	}
3155
3156	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3157	mpi_request.ReplyDescriptorPostQueueDepth =
3158	    cpu_to_le16(ioc->reply_post_queue_depth);
3159	mpi_request.ReplyFreeQueueDepth =
3160	    cpu_to_le16(ioc->reply_free_queue_depth);
3161
3162#if BITS_PER_LONG > 32
3163	mpi_request.SenseBufferAddressHigh =
3164	    cpu_to_le32(ioc->sense_dma >> 32);
3165	mpi_request.SystemReplyAddressHigh =
3166	    cpu_to_le32(ioc->reply_dma >> 32);
3167	mpi_request.SystemRequestFrameBaseAddress =
3168	    cpu_to_le64(ioc->request_dma);
3169	mpi_request.ReplyFreeQueueAddress =
3170	    cpu_to_le64(ioc->reply_free_dma);
3171	mpi_request.ReplyDescriptorPostQueueAddress =
3172	    cpu_to_le64(ioc->reply_post_free_dma);
3173#else
3174	mpi_request.SystemRequestFrameBaseAddress =
3175	    cpu_to_le32(ioc->request_dma);
3176	mpi_request.ReplyFreeQueueAddress =
3177	    cpu_to_le32(ioc->reply_free_dma);
3178	mpi_request.ReplyDescriptorPostQueueAddress =
3179	    cpu_to_le32(ioc->reply_post_free_dma);
3180#endif
3181
3182	/* This time stamp specifies number of milliseconds
3183	 * since epoch ~ midnight January 1, 1970.
3184	 */
3185	do_gettimeofday(&current_time);
3186	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3187	    (current_time.tv_usec / 1000));
3188
3189	if (ioc->logging_level & MPT_DEBUG_INIT) {
3190		u32 *mfp;
3191		int i;
3192
3193		mfp = (u32 *)&mpi_request;
3194		printk(KERN_INFO "\toffset:data\n");
3195		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3196			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3197			    le32_to_cpu(mfp[i]));
3198	}
3199
3200	r = _base_handshake_req_reply_wait(ioc,
3201	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3202	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3203	    sleep_flag);
3204
3205	if (r != 0) {
3206		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3207		    ioc->name, __func__, r);
3208		return r;
3209	}
3210
3211	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3212	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3213	    mpi_reply.IOCLogInfo) {
3214		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
3215		r = -EIO;
3216	}
3217
3218	return 0;
3219}
3220
3221/**
3222 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3223 * @ioc: per adapter object
3224 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3225 *
3226 * Returns 0 for success, non-zero for failure.
3227 */
3228static int
3229_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3230{
3231	Mpi2PortEnableRequest_t *mpi_request;
3232	u32 ioc_state;
3233	unsigned long timeleft;
3234	int r = 0;
3235	u16 smid;
3236
3237	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3238
3239	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3240		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3241		    ioc->name, __func__);
3242		return -EAGAIN;
3243	}
3244
3245	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3246	if (!smid) {
3247		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3248		    ioc->name, __func__);
3249		return -EAGAIN;
3250	}
3251
3252	ioc->base_cmds.status = MPT2_CMD_PENDING;
3253	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3254	ioc->base_cmds.smid = smid;
3255	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3256	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3257	mpi_request->VF_ID = 0; /* TODO */
3258	mpi_request->VP_ID = 0;
3259
3260	mpt2sas_base_put_smid_default(ioc, smid);
3261	init_completion(&ioc->base_cmds.done);
3262	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3263	    300*HZ);
3264	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3265		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3266		    ioc->name, __func__);
3267		_debug_dump_mf(mpi_request,
3268		    sizeof(Mpi2PortEnableRequest_t)/4);
3269		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3270			r = -EFAULT;
3271		else
3272			r = -ETIME;
3273		goto out;
3274	} else
3275		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3276		    ioc->name, __func__));
3277
3278	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL,
3279	    60, sleep_flag);
3280	if (ioc_state) {
3281		printk(MPT2SAS_ERR_FMT "%s: failed going to operational state "
3282		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3283		r = -EFAULT;
3284	}
3285 out:
3286	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3287	printk(MPT2SAS_INFO_FMT "port enable: %s\n",
3288	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3289	return r;
3290}
3291
3292/**
3293 * _base_unmask_events - turn on notification for this event
3294 * @ioc: per adapter object
3295 * @event: firmware event
3296 *
3297 * The mask is stored in ioc->event_masks.
3298 */
3299static void
3300_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
3301{
3302	u32 desired_event;
3303
3304	if (event >= 128)
3305		return;
3306
3307	desired_event = (1 << (event % 32));
3308
3309	if (event < 32)
3310		ioc->event_masks[0] &= ~desired_event;
3311	else if (event < 64)
3312		ioc->event_masks[1] &= ~desired_event;
3313	else if (event < 96)
3314		ioc->event_masks[2] &= ~desired_event;
3315	else if (event < 128)
3316		ioc->event_masks[3] &= ~desired_event;
3317}
3318
3319/**
3320 * _base_event_notification - send event notification
3321 * @ioc: per adapter object
3322 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3323 *
3324 * Returns 0 for success, non-zero for failure.
3325 */
3326static int
3327_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3328{
3329	Mpi2EventNotificationRequest_t *mpi_request;
3330	unsigned long timeleft;
3331	u16 smid;
3332	int r = 0;
3333	int i;
3334
3335	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3336	    __func__));
3337
3338	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3339		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3340		    ioc->name, __func__);
3341		return -EAGAIN;
3342	}
3343
3344	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3345	if (!smid) {
3346		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3347		    ioc->name, __func__);
3348		return -EAGAIN;
3349	}
3350	ioc->base_cmds.status = MPT2_CMD_PENDING;
3351	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3352	ioc->base_cmds.smid = smid;
3353	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3354	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3355	mpi_request->VF_ID = 0; /* TODO */
3356	mpi_request->VP_ID = 0;
3357	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3358		mpi_request->EventMasks[i] =
3359		    cpu_to_le32(ioc->event_masks[i]);
3360	mpt2sas_base_put_smid_default(ioc, smid);
3361	init_completion(&ioc->base_cmds.done);
3362	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3363	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3364		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3365		    ioc->name, __func__);
3366		_debug_dump_mf(mpi_request,
3367		    sizeof(Mpi2EventNotificationRequest_t)/4);
3368		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3369			r = -EFAULT;
3370		else
3371			r = -ETIME;
3372	} else
3373		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3374		    ioc->name, __func__));
3375	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3376	return r;
3377}
3378
3379/**
3380 * mpt2sas_base_validate_event_type - validating event types
3381 * @ioc: per adapter object
3382 * @event: firmware event
3383 *
3384 * This will turn on firmware event notification when application
3385 * ask for that event. We don't mask events that are already enabled.
3386 */
3387void
3388mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
3389{
3390	int i, j;
3391	u32 event_mask, desired_event;
3392	u8 send_update_to_fw;
3393
3394	for (i = 0, send_update_to_fw = 0; i <
3395	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
3396		event_mask = ~event_type[i];
3397		desired_event = 1;
3398		for (j = 0; j < 32; j++) {
3399			if (!(event_mask & desired_event) &&
3400			    (ioc->event_masks[i] & desired_event)) {
3401				ioc->event_masks[i] &= ~desired_event;
3402				send_update_to_fw = 1;
3403			}
3404			desired_event = (desired_event << 1);
3405		}
3406	}
3407
3408	if (!send_update_to_fw)
3409		return;
3410
3411	mutex_lock(&ioc->base_cmds.mutex);
3412	_base_event_notification(ioc, CAN_SLEEP);
3413	mutex_unlock(&ioc->base_cmds.mutex);
3414}
3415
3416/**
3417 * _base_diag_reset - the "big hammer" start of day reset
3418 * @ioc: per adapter object
3419 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3420 *
3421 * Returns 0 for success, non-zero for failure.
3422 */
3423static int
3424_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3425{
3426	u32 host_diagnostic;
3427	u32 ioc_state;
3428	u32 count;
3429	u32 hcb_size;
3430
3431	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3432
3433	_base_save_msix_table(ioc);
3434
3435	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3436	    ioc->name));
3437
3438	count = 0;
3439	do {
3440		/* Write magic sequence to WriteSequence register
3441		 * Loop until in diagnostic mode
3442		 */
3443		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
3444		    "sequence\n", ioc->name));
3445		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3446		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
3447		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
3448		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
3449		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
3450		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
3451		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
3452
3453		/* wait 100 msec */
3454		if (sleep_flag == CAN_SLEEP)
3455			msleep(100);
3456		else
3457			mdelay(100);
3458
3459		if (count++ > 20)
3460			goto out;
3461
3462		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3463		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
3464		    "sequence: count(%d), host_diagnostic(0x%08x)\n",
3465		    ioc->name, count, host_diagnostic));
3466
3467	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
3468
3469	hcb_size = readl(&ioc->chip->HCBSize);
3470
3471	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
3472	    ioc->name));
3473	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
3474	     &ioc->chip->HostDiagnostic);
3475
3476	/* don't access any registers for 50 milliseconds */
3477	msleep(50);
3478
3479	/* 300 second max wait */
3480	for (count = 0; count < 3000000 ; count++) {
3481
3482		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3483
3484		if (host_diagnostic == 0xFFFFFFFF)
3485			goto out;
3486		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
3487			break;
3488
3489		/* wait 100 msec */
3490		if (sleep_flag == CAN_SLEEP)
3491			msleep(1);
3492		else
3493			mdelay(1);
3494	}
3495
3496	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
3497
3498		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
3499		    "assuming the HCB Address points to good F/W\n",
3500		    ioc->name));
3501		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
3502		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
3503		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
3504
3505		drsprintk(ioc, printk(MPT2SAS_INFO_FMT
3506		    "re-enable the HCDW\n", ioc->name));
3507		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
3508		    &ioc->chip->HCBSize);
3509	}
3510
3511	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
3512	    ioc->name));
3513	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
3514	    &ioc->chip->HostDiagnostic);
3515
3516	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
3517	    "diagnostic register\n", ioc->name));
3518	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3519
3520	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
3521	    "READY state\n", ioc->name));
3522	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
3523	    sleep_flag);
3524	if (ioc_state) {
3525		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3526		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3527		goto out;
3528	}
3529
3530	_base_restore_msix_table(ioc);
3531	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
3532	return 0;
3533
3534 out:
3535	printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
3536	return -EFAULT;
3537}
3538
3539/**
3540 * _base_make_ioc_ready - put controller in READY state
3541 * @ioc: per adapter object
3542 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3543 * @type: FORCE_BIG_HAMMER or SOFT_RESET
3544 *
3545 * Returns 0 for success, non-zero for failure.
3546 */
3547static int
3548_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3549    enum reset_type type)
3550{
3551	u32 ioc_state;
3552	int rc;
3553
3554	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3555	    __func__));
3556
3557	if (ioc->pci_error_recovery)
3558		return 0;
3559
3560	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3561	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
3562	    ioc->name, __func__, ioc_state));
3563
3564	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
3565		return 0;
3566
3567	if (ioc_state & MPI2_DOORBELL_USED) {
3568		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
3569		    "active!\n", ioc->name));
3570		goto issue_diag_reset;
3571	}
3572
3573	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3574		mpt2sas_base_fault_info(ioc, ioc_state &
3575		    MPI2_DOORBELL_DATA_MASK);
3576		goto issue_diag_reset;
3577	}
3578
3579	if (type == FORCE_BIG_HAMMER)
3580		goto issue_diag_reset;
3581
3582	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
3583		if (!(_base_send_ioc_reset(ioc,
3584		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
3585			ioc->ioc_reset_count++;
3586			return 0;
3587	}
3588
3589 issue_diag_reset:
3590	rc = _base_diag_reset(ioc, CAN_SLEEP);
3591	ioc->ioc_reset_count++;
3592	return rc;
3593}
3594
3595/**
3596 * _base_make_ioc_operational - put controller in OPERATIONAL state
3597 * @ioc: per adapter object
3598 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3599 *
3600 * Returns 0 for success, non-zero for failure.
3601 */
3602static int
3603_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3604{
3605	int r, i;
3606	unsigned long	flags;
3607	u32 reply_address;
3608	u16 smid;
3609	struct _tr_list *delayed_tr, *delayed_tr_next;
3610
3611	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3612	    __func__));
3613
3614	/* clean the delayed target reset list */
3615	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
3616	    &ioc->delayed_tr_list, list) {
3617		list_del(&delayed_tr->list);
3618		kfree(delayed_tr);
3619	}
3620
3621	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
3622	    &ioc->delayed_tr_volume_list, list) {
3623		list_del(&delayed_tr->list);
3624		kfree(delayed_tr);
3625	}
3626
3627	/* initialize the scsi lookup free list */
3628	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3629	INIT_LIST_HEAD(&ioc->free_list);
3630	smid = 1;
3631	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
3632		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
3633		ioc->scsi_lookup[i].cb_idx = 0xFF;
3634		ioc->scsi_lookup[i].smid = smid;
3635		ioc->scsi_lookup[i].scmd = NULL;
3636		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
3637		    &ioc->free_list);
3638	}
3639
3640	/* hi-priority queue */
3641	INIT_LIST_HEAD(&ioc->hpr_free_list);
3642	smid = ioc->hi_priority_smid;
3643	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
3644		ioc->hpr_lookup[i].cb_idx = 0xFF;
3645		ioc->hpr_lookup[i].smid = smid;
3646		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
3647		    &ioc->hpr_free_list);
3648	}
3649
3650	/* internal queue */
3651	INIT_LIST_HEAD(&ioc->internal_free_list);
3652	smid = ioc->internal_smid;
3653	for (i = 0; i < ioc->internal_depth; i++, smid++) {
3654		ioc->internal_lookup[i].cb_idx = 0xFF;
3655		ioc->internal_lookup[i].smid = smid;
3656		list_add_tail(&ioc->internal_lookup[i].tracker_list,
3657		    &ioc->internal_free_list);
3658	}
3659
3660	/* chain pool */
3661	INIT_LIST_HEAD(&ioc->free_chain_list);
3662	for (i = 0; i < ioc->chain_depth; i++)
3663		list_add_tail(&ioc->chain_lookup[i].tracker_list,
3664		    &ioc->free_chain_list);
3665
3666	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3667
3668	/* initialize Reply Free Queue */
3669	for (i = 0, reply_address = (u32)ioc->reply_dma ;
3670	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
3671	    ioc->reply_sz)
3672		ioc->reply_free[i] = cpu_to_le32(reply_address);
3673
3674	/* initialize Reply Post Free Queue */
3675	for (i = 0; i < ioc->reply_post_queue_depth; i++)
3676		ioc->reply_post_free[i].Words = ULLONG_MAX;
3677
3678	r = _base_send_ioc_init(ioc, sleep_flag);
3679	if (r)
3680		return r;
3681
3682	/* initialize the index's */
3683	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
3684	ioc->reply_post_host_index = 0;
3685	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
3686	writel(0, &ioc->chip->ReplyPostHostIndex);
3687
3688	_base_unmask_interrupts(ioc);
3689	r = _base_event_notification(ioc, sleep_flag);
3690	if (r)
3691		return r;
3692
3693	if (sleep_flag == CAN_SLEEP)
3694		_base_static_config_pages(ioc);
3695
3696	if (ioc->wait_for_port_enable_to_complete) {
3697		if (diag_buffer_enable != 0)
3698			mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
3699		if (disable_discovery > 0)
3700			return r;
3701	}
3702
3703	r = _base_send_port_enable(ioc, sleep_flag);
3704	if (r)
3705		return r;
3706
3707	return r;
3708}
3709
3710/**
3711 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
3712 * @ioc: per adapter object
3713 *
3714 * Return nothing.
3715 */
3716void
3717mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3718{
3719	struct pci_dev *pdev = ioc->pdev;
3720
3721	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3722	    __func__));
3723
3724	_base_mask_interrupts(ioc);
3725	ioc->shost_recovery = 1;
3726	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3727	ioc->shost_recovery = 0;
3728	if (ioc->pci_irq) {
3729		synchronize_irq(pdev->irq);
3730		free_irq(ioc->pci_irq, ioc);
3731	}
3732	_base_disable_msix(ioc);
3733	if (ioc->chip_phys)
3734		iounmap(ioc->chip);
3735	ioc->pci_irq = -1;
3736	ioc->chip_phys = 0;
3737	pci_release_selected_regions(ioc->pdev, ioc->bars);
3738	pci_disable_pcie_error_reporting(pdev);
3739	pci_disable_device(pdev);
3740	return;
3741}
3742
3743/**
3744 * mpt2sas_base_attach - attach controller instance
3745 * @ioc: per adapter object
3746 *
3747 * Returns 0 for success, non-zero for failure.
3748 */
3749int
3750mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3751{
3752	int r, i;
3753
3754	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3755	    __func__));
3756
3757	r = mpt2sas_base_map_resources(ioc);
3758	if (r)
3759		return r;
3760
3761	pci_set_drvdata(ioc->pdev, ioc->shost);
3762	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
3763	if (r)
3764		goto out_free_resources;
3765
3766	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3767	if (r)
3768		goto out_free_resources;
3769
3770	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
3771	    sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
3772	if (!ioc->pfacts) {
3773		r = -ENOMEM;
3774		goto out_free_resources;
3775	}
3776
3777	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
3778		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
3779		if (r)
3780			goto out_free_resources;
3781	}
3782
3783	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
3784	if (r)
3785		goto out_free_resources;
3786
3787	init_waitqueue_head(&ioc->reset_wq);
3788
3789	/* allocate memory pd handle bitmask list */
3790	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
3791	if (ioc->facts.MaxDevHandle % 8)
3792		ioc->pd_handles_sz++;
3793	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
3794	    GFP_KERNEL);
3795	if (!ioc->pd_handles) {
3796		r = -ENOMEM;
3797		goto out_free_resources;
3798	}
3799
3800	ioc->fwfault_debug = mpt2sas_fwfault_debug;
3801
3802	/* base internal command bits */
3803	mutex_init(&ioc->base_cmds.mutex);
3804	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3805	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3806
3807	/* transport internal command bits */
3808	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3809	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3810	mutex_init(&ioc->transport_cmds.mutex);
3811
3812	/* scsih internal command bits */
3813	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3814	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
3815	mutex_init(&ioc->scsih_cmds.mutex);
3816
3817	/* task management internal command bits */
3818	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3819	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
3820	mutex_init(&ioc->tm_cmds.mutex);
3821
3822	/* config page internal command bits */
3823	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3824	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
3825	mutex_init(&ioc->config_cmds.mutex);
3826
3827	/* ctl module internal command bits */
3828	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3829	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
3830	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
3831	mutex_init(&ioc->ctl_cmds.mutex);
3832
3833	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
3834	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
3835	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
3836	    !ioc->ctl_cmds.sense) {
3837		r = -ENOMEM;
3838		goto out_free_resources;
3839	}
3840
3841	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
3842	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
3843	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
3844		r = -ENOMEM;
3845		goto out_free_resources;
3846	}
3847
3848	init_completion(&ioc->shost_recovery_done);
3849
3850	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3851		ioc->event_masks[i] = -1;
3852
3853	/* here we enable the events we care about */
3854	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
3855	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
3856	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3857	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
3858	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
3859	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
3860	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
3861	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
3862	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
3863	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
3864	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
3865	if (r)
3866		goto out_free_resources;
3867
3868	if (missing_delay[0] != -1 && missing_delay[1] != -1)
3869		_base_update_missing_delay(ioc, missing_delay[0],
3870		    missing_delay[1]);
3871
3872	mpt2sas_base_start_watchdog(ioc);
3873	return 0;
3874
3875 out_free_resources:
3876
3877	ioc->remove_host = 1;
3878	mpt2sas_base_free_resources(ioc);
3879	_base_release_memory_pools(ioc);
3880	pci_set_drvdata(ioc->pdev, NULL);
3881	kfree(ioc->pd_handles);
3882	kfree(ioc->tm_cmds.reply);
3883	kfree(ioc->transport_cmds.reply);
3884	kfree(ioc->scsih_cmds.reply);
3885	kfree(ioc->config_cmds.reply);
3886	kfree(ioc->base_cmds.reply);
3887	kfree(ioc->ctl_cmds.reply);
3888	kfree(ioc->ctl_cmds.sense);
3889	kfree(ioc->pfacts);
3890	ioc->ctl_cmds.reply = NULL;
3891	ioc->base_cmds.reply = NULL;
3892	ioc->tm_cmds.reply = NULL;
3893	ioc->scsih_cmds.reply = NULL;
3894	ioc->transport_cmds.reply = NULL;
3895	ioc->config_cmds.reply = NULL;
3896	ioc->pfacts = NULL;
3897	return r;
3898}
3899
3900
3901/**
3902 * mpt2sas_base_detach - remove controller instance
3903 * @ioc: per adapter object
3904 *
3905 * Return nothing.
3906 */
3907void
3908mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3909{
3910
3911	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3912	    __func__));
3913
3914	mpt2sas_base_stop_watchdog(ioc);
3915	mpt2sas_base_free_resources(ioc);
3916	_base_release_memory_pools(ioc);
3917	pci_set_drvdata(ioc->pdev, NULL);
3918	kfree(ioc->pd_handles);
3919	kfree(ioc->pfacts);
3920	kfree(ioc->ctl_cmds.reply);
3921	kfree(ioc->ctl_cmds.sense);
3922	kfree(ioc->base_cmds.reply);
3923	kfree(ioc->tm_cmds.reply);
3924	kfree(ioc->transport_cmds.reply);
3925	kfree(ioc->scsih_cmds.reply);
3926	kfree(ioc->config_cmds.reply);
3927}
3928
3929/**
3930 * _base_reset_handler - reset callback handler (for base)
3931 * @ioc: per adapter object
3932 * @reset_phase: phase
3933 *
3934 * The handler for doing any required cleanup or initialization.
3935 *
3936 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
3937 * MPT2_IOC_DONE_RESET
3938 *
3939 * Return nothing.
3940 */
3941static void
3942_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3943{
3944	switch (reset_phase) {
3945	case MPT2_IOC_PRE_RESET:
3946		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3947		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
3948		break;
3949	case MPT2_IOC_AFTER_RESET:
3950		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3951		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
3952		if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
3953			ioc->transport_cmds.status |= MPT2_CMD_RESET;
3954			mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
3955			complete(&ioc->transport_cmds.done);
3956		}
3957		if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3958			ioc->base_cmds.status |= MPT2_CMD_RESET;
3959			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
3960			complete(&ioc->base_cmds.done);
3961		}
3962		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
3963			ioc->config_cmds.status |= MPT2_CMD_RESET;
3964			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
3965			ioc->config_cmds.smid = USHRT_MAX;
3966			complete(&ioc->config_cmds.done);
3967		}
3968		break;
3969	case MPT2_IOC_DONE_RESET:
3970		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
3971		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
3972		break;
3973	}
3974	mpt2sas_scsih_reset_handler(ioc, reset_phase);
3975	mpt2sas_ctl_reset_handler(ioc, reset_phase);
3976}
3977
3978/**
3979 * _wait_for_commands_to_complete - reset controller
3980 * @ioc: Pointer to MPT_ADAPTER structure
3981 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3982 *
3983 * This function waiting(3s) for all pending commands to complete
3984 * prior to putting controller in reset.
3985 */
3986static void
3987_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3988{
3989	u32 ioc_state;
3990	unsigned long flags;
3991	u16 i;
3992
3993	ioc->pending_io_count = 0;
3994	if (sleep_flag != CAN_SLEEP)
3995		return;
3996
3997	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3998	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
3999		return;
4000
4001	/* pending command count */
4002	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4003	for (i = 0; i < ioc->scsiio_depth; i++)
4004		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4005			ioc->pending_io_count++;
4006	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4007
4008	if (!ioc->pending_io_count)
4009		return;
4010
4011	/* wait for pending commands to complete */
4012	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4013}
4014
4015/**
4016 * mpt2sas_base_hard_reset_handler - reset controller
4017 * @ioc: Pointer to MPT_ADAPTER structure
4018 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4019 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4020 *
4021 * Returns 0 for success, non-zero for failure.
4022 */
4023int
4024mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4025    enum reset_type type)
4026{
4027	int r;
4028	unsigned long flags;
4029
4030	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4031	    __func__));
4032
4033	if (ioc->pci_error_recovery) {
4034		printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
4035		    ioc->name, __func__);
4036		r = 0;
4037		goto out;
4038	}
4039
4040	if (mpt2sas_fwfault_debug)
4041		mpt2sas_halt_firmware(ioc);
4042
4043	/* TODO - What we really should be doing is pulling
4044	 * out all the code associated with NO_SLEEP; its never used.
4045	 * That is legacy code from mpt fusion driver, ported over.
4046	 * I will leave this BUG_ON here for now till its been resolved.
4047	 */
4048	BUG_ON(sleep_flag == NO_SLEEP);
4049
4050	/* wait for an active reset in progress to complete */
4051	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4052		do {
4053			ssleep(1);
4054		} while (ioc->shost_recovery == 1);
4055		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4056		    __func__));
4057		return ioc->ioc_reset_in_progress_status;
4058	}
4059
4060	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4061	ioc->shost_recovery = 1;
4062	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4063
4064	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
4065	_wait_for_commands_to_complete(ioc, sleep_flag);
4066	_base_mask_interrupts(ioc);
4067	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4068	if (r)
4069		goto out;
4070	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4071	r = _base_make_ioc_operational(ioc, sleep_flag);
4072	if (!r)
4073		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
4074 out:
4075	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
4076	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4077
4078	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4079	ioc->ioc_reset_in_progress_status = r;
4080	ioc->shost_recovery = 0;
4081	complete(&ioc->shost_recovery_done);
4082	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4083	mutex_unlock(&ioc->reset_in_progress_mutex);
4084
4085	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4086	    __func__));
4087	return r;
4088}
4089