mpt2sas_base.c revision 338b131a3269881c7431234855c93c219b0979b6
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2010  LSI Corporation
7 *  (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42 * USA.
43 */
44
45#include <linux/kernel.h>
46#include <linux/module.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/slab.h>
50#include <linux/types.h>
51#include <linux/pci.h>
52#include <linux/kdev_t.h>
53#include <linux/blkdev.h>
54#include <linux/delay.h>
55#include <linux/interrupt.h>
56#include <linux/dma-mapping.h>
57#include <linux/sort.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/kthread.h>
61#include <linux/aer.h>
62
63#include "mpt2sas_base.h"
64
65static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
66
67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
68
69#define MAX_HBA_QUEUE_DEPTH	30000
70#define MAX_CHAIN_DEPTH		100000
71static int max_queue_depth = -1;
72module_param(max_queue_depth, int, 0);
73MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
74
75static int max_sgl_entries = -1;
76module_param(max_sgl_entries, int, 0);
77MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
78
79static int msix_disable = -1;
80module_param(msix_disable, int, 0);
81MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
82
83static int missing_delay[2] = {-1, -1};
84module_param_array(missing_delay, int, NULL, 0);
85MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
86
87static int mpt2sas_fwfault_debug;
88MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
89	"and halt firmware - (default=0)");
90
91static int disable_discovery = -1;
92module_param(disable_discovery, int, 0);
93MODULE_PARM_DESC(disable_discovery, " disable discovery ");
94
95/**
96 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
97 *
98 */
99static int
100_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
101{
102	int ret = param_set_int(val, kp);
103	struct MPT2SAS_ADAPTER *ioc;
104
105	if (ret)
106		return ret;
107
108	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
109	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
110		ioc->fwfault_debug = mpt2sas_fwfault_debug;
111	return 0;
112}
113
114module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
115    param_get_int, &mpt2sas_fwfault_debug, 0644);
116
117/**
118 *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
119 * @arg: input argument, used to derive ioc
120 *
121 * Return 0 if controller is removed from pci subsystem.
122 * Return -1 for other case.
123 */
124static int mpt2sas_remove_dead_ioc_func(void *arg)
125{
126		struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
127		struct pci_dev *pdev;
128
129		if ((ioc == NULL))
130			return -1;
131
132		pdev = ioc->pdev;
133		if ((pdev == NULL))
134			return -1;
135		pci_stop_and_remove_bus_device(pdev);
136		return 0;
137}
138
139
140/**
141 * _base_fault_reset_work - workq handling ioc fault conditions
142 * @work: input argument, used to derive ioc
143 * Context: sleep.
144 *
145 * Return nothing.
146 */
147static void
148_base_fault_reset_work(struct work_struct *work)
149{
150	struct MPT2SAS_ADAPTER *ioc =
151	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
152	unsigned long	 flags;
153	u32 doorbell;
154	int rc;
155	struct task_struct *p;
156
157	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
158	if (ioc->shost_recovery)
159		goto rearm_timer;
160	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
161
162	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
163	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
164		printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
165			ioc->name, __func__);
166
167		/*
168		 * Call _scsih_flush_pending_cmds callback so that we flush all
169		 * pending commands back to OS. This call is required to aovid
170		 * deadlock at block layer. Dead IOC will fail to do diag reset,
171		 * and this call is safe since dead ioc will never return any
172		 * command back from HW.
173		 */
174		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
175		/*
176		 * Set remove_host flag early since kernel thread will
177		 * take some time to execute.
178		 */
179		ioc->remove_host = 1;
180		/*Remove the Dead Host */
181		p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
182		    "mpt2sas_dead_ioc_%d", ioc->id);
183		if (IS_ERR(p)) {
184			printk(MPT2SAS_ERR_FMT
185			"%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
186			ioc->name, __func__);
187		} else {
188		    printk(MPT2SAS_ERR_FMT
189			"%s: Running mpt2sas_dead_ioc thread success !!!!\n",
190			ioc->name, __func__);
191		}
192
193		return; /* don't rearm timer */
194	}
195
196	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
197		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
198		    FORCE_BIG_HAMMER);
199		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
200		    __func__, (rc == 0) ? "success" : "failed");
201		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
202		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
203			mpt2sas_base_fault_info(ioc, doorbell &
204			    MPI2_DOORBELL_DATA_MASK);
205	}
206
207	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
208 rearm_timer:
209	if (ioc->fault_reset_work_q)
210		queue_delayed_work(ioc->fault_reset_work_q,
211		    &ioc->fault_reset_work,
212		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
213	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
214}
215
216/**
217 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
218 * @ioc: per adapter object
219 * Context: sleep.
220 *
221 * Return nothing.
222 */
223void
224mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
225{
226	unsigned long	 flags;
227
228	if (ioc->fault_reset_work_q)
229		return;
230
231	/* initialize fault polling */
232	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
233	snprintf(ioc->fault_reset_work_q_name,
234	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
235	ioc->fault_reset_work_q =
236		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
237	if (!ioc->fault_reset_work_q) {
238		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
239		    ioc->name, __func__, __LINE__);
240			return;
241	}
242	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
243	if (ioc->fault_reset_work_q)
244		queue_delayed_work(ioc->fault_reset_work_q,
245		    &ioc->fault_reset_work,
246		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
247	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
248}
249
250/**
251 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
252 * @ioc: per adapter object
253 * Context: sleep.
254 *
255 * Return nothing.
256 */
257void
258mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
259{
260	unsigned long	 flags;
261	struct workqueue_struct *wq;
262
263	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
264	wq = ioc->fault_reset_work_q;
265	ioc->fault_reset_work_q = NULL;
266	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
267	if (wq) {
268		if (!cancel_delayed_work(&ioc->fault_reset_work))
269			flush_workqueue(wq);
270		destroy_workqueue(wq);
271	}
272}
273
274/**
275 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
276 * @ioc: per adapter object
277 * @fault_code: fault code
278 *
279 * Return nothing.
280 */
281void
282mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
283{
284	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
285	    ioc->name, fault_code);
286}
287
288/**
289 * mpt2sas_halt_firmware - halt's mpt controller firmware
290 * @ioc: per adapter object
291 *
292 * For debugging timeout related issues.  Writing 0xCOFFEE00
293 * to the doorbell register will halt controller firmware. With
294 * the purpose to stop both driver and firmware, the enduser can
295 * obtain a ring buffer from controller UART.
296 */
297void
298mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
299{
300	u32 doorbell;
301
302	if (!ioc->fwfault_debug)
303		return;
304
305	dump_stack();
306
307	doorbell = readl(&ioc->chip->Doorbell);
308	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
309		mpt2sas_base_fault_info(ioc , doorbell);
310	else {
311		writel(0xC0FFEE00, &ioc->chip->Doorbell);
312		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
313		    "timeout\n", ioc->name);
314	}
315
316	panic("panic in %s\n", __func__);
317}
318
319#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
320/**
321 * _base_sas_ioc_info - verbose translation of the ioc status
322 * @ioc: per adapter object
323 * @mpi_reply: reply mf payload returned from firmware
324 * @request_hdr: request mf
325 *
326 * Return nothing.
327 */
328static void
329_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
330     MPI2RequestHeader_t *request_hdr)
331{
332	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
333	    MPI2_IOCSTATUS_MASK;
334	char *desc = NULL;
335	u16 frame_sz;
336	char *func_str = NULL;
337
338	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
339	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
340	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
341	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
342		return;
343
344	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
345		return;
346
347	switch (ioc_status) {
348
349/****************************************************************************
350*  Common IOCStatus values for all replies
351****************************************************************************/
352
353	case MPI2_IOCSTATUS_INVALID_FUNCTION:
354		desc = "invalid function";
355		break;
356	case MPI2_IOCSTATUS_BUSY:
357		desc = "busy";
358		break;
359	case MPI2_IOCSTATUS_INVALID_SGL:
360		desc = "invalid sgl";
361		break;
362	case MPI2_IOCSTATUS_INTERNAL_ERROR:
363		desc = "internal error";
364		break;
365	case MPI2_IOCSTATUS_INVALID_VPID:
366		desc = "invalid vpid";
367		break;
368	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
369		desc = "insufficient resources";
370		break;
371	case MPI2_IOCSTATUS_INVALID_FIELD:
372		desc = "invalid field";
373		break;
374	case MPI2_IOCSTATUS_INVALID_STATE:
375		desc = "invalid state";
376		break;
377	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
378		desc = "op state not supported";
379		break;
380
381/****************************************************************************
382*  Config IOCStatus values
383****************************************************************************/
384
385	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
386		desc = "config invalid action";
387		break;
388	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
389		desc = "config invalid type";
390		break;
391	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
392		desc = "config invalid page";
393		break;
394	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
395		desc = "config invalid data";
396		break;
397	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
398		desc = "config no defaults";
399		break;
400	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
401		desc = "config cant commit";
402		break;
403
404/****************************************************************************
405*  SCSI IO Reply
406****************************************************************************/
407
408	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
409	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
410	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
411	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
412	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
413	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
414	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
415	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
416	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
417	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
418	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
419	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
420		break;
421
422/****************************************************************************
423*  For use by SCSI Initiator and SCSI Target end-to-end data protection
424****************************************************************************/
425
426	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
427		desc = "eedp guard error";
428		break;
429	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
430		desc = "eedp ref tag error";
431		break;
432	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
433		desc = "eedp app tag error";
434		break;
435
436/****************************************************************************
437*  SCSI Target values
438****************************************************************************/
439
440	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
441		desc = "target invalid io index";
442		break;
443	case MPI2_IOCSTATUS_TARGET_ABORTED:
444		desc = "target aborted";
445		break;
446	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
447		desc = "target no conn retryable";
448		break;
449	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
450		desc = "target no connection";
451		break;
452	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
453		desc = "target xfer count mismatch";
454		break;
455	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
456		desc = "target data offset error";
457		break;
458	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
459		desc = "target too much write data";
460		break;
461	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
462		desc = "target iu too short";
463		break;
464	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
465		desc = "target ack nak timeout";
466		break;
467	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
468		desc = "target nak received";
469		break;
470
471/****************************************************************************
472*  Serial Attached SCSI values
473****************************************************************************/
474
475	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
476		desc = "smp request failed";
477		break;
478	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
479		desc = "smp data overrun";
480		break;
481
482/****************************************************************************
483*  Diagnostic Buffer Post / Diagnostic Release values
484****************************************************************************/
485
486	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
487		desc = "diagnostic released";
488		break;
489	default:
490		break;
491	}
492
493	if (!desc)
494		return;
495
496	switch (request_hdr->Function) {
497	case MPI2_FUNCTION_CONFIG:
498		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
499		func_str = "config_page";
500		break;
501	case MPI2_FUNCTION_SCSI_TASK_MGMT:
502		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
503		func_str = "task_mgmt";
504		break;
505	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
506		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
507		func_str = "sas_iounit_ctl";
508		break;
509	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
510		frame_sz = sizeof(Mpi2SepRequest_t);
511		func_str = "enclosure";
512		break;
513	case MPI2_FUNCTION_IOC_INIT:
514		frame_sz = sizeof(Mpi2IOCInitRequest_t);
515		func_str = "ioc_init";
516		break;
517	case MPI2_FUNCTION_PORT_ENABLE:
518		frame_sz = sizeof(Mpi2PortEnableRequest_t);
519		func_str = "port_enable";
520		break;
521	case MPI2_FUNCTION_SMP_PASSTHROUGH:
522		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
523		func_str = "smp_passthru";
524		break;
525	default:
526		frame_sz = 32;
527		func_str = "unknown";
528		break;
529	}
530
531	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
532	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
533
534	_debug_dump_mf(request_hdr, frame_sz/4);
535}
536
537/**
538 * _base_display_event_data - verbose translation of firmware asyn events
539 * @ioc: per adapter object
540 * @mpi_reply: reply mf payload returned from firmware
541 *
542 * Return nothing.
543 */
544static void
545_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
546    Mpi2EventNotificationReply_t *mpi_reply)
547{
548	char *desc = NULL;
549	u16 event;
550
551	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
552		return;
553
554	event = le16_to_cpu(mpi_reply->Event);
555
556	switch (event) {
557	case MPI2_EVENT_LOG_DATA:
558		desc = "Log Data";
559		break;
560	case MPI2_EVENT_STATE_CHANGE:
561		desc = "Status Change";
562		break;
563	case MPI2_EVENT_HARD_RESET_RECEIVED:
564		desc = "Hard Reset Received";
565		break;
566	case MPI2_EVENT_EVENT_CHANGE:
567		desc = "Event Change";
568		break;
569	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
570		desc = "Device Status Change";
571		break;
572	case MPI2_EVENT_IR_OPERATION_STATUS:
573		if (!ioc->hide_ir_msg)
574			desc = "IR Operation Status";
575		break;
576	case MPI2_EVENT_SAS_DISCOVERY:
577	{
578		Mpi2EventDataSasDiscovery_t *event_data =
579		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
580		printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
581		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
582		    "start" : "stop");
583		if (event_data->DiscoveryStatus)
584			printk("discovery_status(0x%08x)",
585			    le32_to_cpu(event_data->DiscoveryStatus));
586		printk("\n");
587		return;
588	}
589	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
590		desc = "SAS Broadcast Primitive";
591		break;
592	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
593		desc = "SAS Init Device Status Change";
594		break;
595	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
596		desc = "SAS Init Table Overflow";
597		break;
598	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
599		desc = "SAS Topology Change List";
600		break;
601	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
602		desc = "SAS Enclosure Device Status Change";
603		break;
604	case MPI2_EVENT_IR_VOLUME:
605		if (!ioc->hide_ir_msg)
606			desc = "IR Volume";
607		break;
608	case MPI2_EVENT_IR_PHYSICAL_DISK:
609		if (!ioc->hide_ir_msg)
610			desc = "IR Physical Disk";
611		break;
612	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
613		if (!ioc->hide_ir_msg)
614			desc = "IR Configuration Change List";
615		break;
616	case MPI2_EVENT_LOG_ENTRY_ADDED:
617		if (!ioc->hide_ir_msg)
618			desc = "Log Entry Added";
619		break;
620	}
621
622	if (!desc)
623		return;
624
625	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
626}
627#endif
628
629/**
630 * _base_sas_log_info - verbose translation of firmware log info
631 * @ioc: per adapter object
632 * @log_info: log info
633 *
634 * Return nothing.
635 */
636static void
637_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
638{
639	union loginfo_type {
640		u32	loginfo;
641		struct {
642			u32	subcode:16;
643			u32	code:8;
644			u32	originator:4;
645			u32	bus_type:4;
646		} dw;
647	};
648	union loginfo_type sas_loginfo;
649	char *originator_str = NULL;
650
651	sas_loginfo.loginfo = log_info;
652	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
653		return;
654
655	/* each nexus loss loginfo */
656	if (log_info == 0x31170000)
657		return;
658
659	/* eat the loginfos associated with task aborts */
660	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
661	    0x31140000 || log_info == 0x31130000))
662		return;
663
664	switch (sas_loginfo.dw.originator) {
665	case 0:
666		originator_str = "IOP";
667		break;
668	case 1:
669		originator_str = "PL";
670		break;
671	case 2:
672		if (!ioc->hide_ir_msg)
673			originator_str = "IR";
674		else
675			originator_str = "WarpDrive";
676		break;
677	}
678
679	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
680	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
681	     originator_str, sas_loginfo.dw.code,
682	     sas_loginfo.dw.subcode);
683}
684
685/**
686 * _base_display_reply_info -
687 * @ioc: per adapter object
688 * @smid: system request message index
689 * @msix_index: MSIX table index supplied by the OS
690 * @reply: reply message frame(lower 32bit addr)
691 *
692 * Return nothing.
693 */
694static void
695_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
696    u32 reply)
697{
698	MPI2DefaultReply_t *mpi_reply;
699	u16 ioc_status;
700
701	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
702	if (unlikely(!mpi_reply)) {
703		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
704			ioc->name, __FILE__, __LINE__, __func__);
705		return;
706	}
707	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
708#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
709	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
710	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
711		_base_sas_ioc_info(ioc , mpi_reply,
712		   mpt2sas_base_get_msg_frame(ioc, smid));
713	}
714#endif
715	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
716		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
717}
718
719/**
720 * mpt2sas_base_done - base internal command completion routine
721 * @ioc: per adapter object
722 * @smid: system request message index
723 * @msix_index: MSIX table index supplied by the OS
724 * @reply: reply message frame(lower 32bit addr)
725 *
726 * Return 1 meaning mf should be freed from _base_interrupt
727 *        0 means the mf is freed from this function.
728 */
729u8
730mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
731    u32 reply)
732{
733	MPI2DefaultReply_t *mpi_reply;
734
735	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
736	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
737		return 1;
738
739	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
740		return 1;
741
742	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
743	if (mpi_reply) {
744		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
745		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
746	}
747	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
748
749	complete(&ioc->base_cmds.done);
750	return 1;
751}
752
753/**
754 * _base_async_event - main callback handler for firmware asyn events
755 * @ioc: per adapter object
756 * @msix_index: MSIX table index supplied by the OS
757 * @reply: reply message frame(lower 32bit addr)
758 *
759 * Return 1 meaning mf should be freed from _base_interrupt
760 *        0 means the mf is freed from this function.
761 */
762static u8
763_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
764{
765	Mpi2EventNotificationReply_t *mpi_reply;
766	Mpi2EventAckRequest_t *ack_request;
767	u16 smid;
768
769	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
770	if (!mpi_reply)
771		return 1;
772	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
773		return 1;
774#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
775	_base_display_event_data(ioc, mpi_reply);
776#endif
777	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
778		goto out;
779	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
780	if (!smid) {
781		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
782		    ioc->name, __func__);
783		goto out;
784	}
785
786	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
787	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
788	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
789	ack_request->Event = mpi_reply->Event;
790	ack_request->EventContext = mpi_reply->EventContext;
791	ack_request->VF_ID = 0;  /* TODO */
792	ack_request->VP_ID = 0;
793	mpt2sas_base_put_smid_default(ioc, smid);
794
795 out:
796
797	/* scsih callback handler */
798	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
799
800	/* ctl callback handler */
801	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
802
803	return 1;
804}
805
806/**
807 * _base_get_cb_idx - obtain the callback index
808 * @ioc: per adapter object
809 * @smid: system request message index
810 *
811 * Return callback index.
812 */
813static u8
814_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
815{
816	int i;
817	u8 cb_idx;
818
819	if (smid < ioc->hi_priority_smid) {
820		i = smid - 1;
821		cb_idx = ioc->scsi_lookup[i].cb_idx;
822	} else if (smid < ioc->internal_smid) {
823		i = smid - ioc->hi_priority_smid;
824		cb_idx = ioc->hpr_lookup[i].cb_idx;
825	} else if (smid <= ioc->hba_queue_depth) {
826		i = smid - ioc->internal_smid;
827		cb_idx = ioc->internal_lookup[i].cb_idx;
828	} else
829		cb_idx = 0xFF;
830	return cb_idx;
831}
832
833/**
834 * _base_mask_interrupts - disable interrupts
835 * @ioc: per adapter object
836 *
837 * Disabling ResetIRQ, Reply and Doorbell Interrupts
838 *
839 * Return nothing.
840 */
841static void
842_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
843{
844	u32 him_register;
845
846	ioc->mask_interrupts = 1;
847	him_register = readl(&ioc->chip->HostInterruptMask);
848	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
849	writel(him_register, &ioc->chip->HostInterruptMask);
850	readl(&ioc->chip->HostInterruptMask);
851}
852
853/**
854 * _base_unmask_interrupts - enable interrupts
855 * @ioc: per adapter object
856 *
857 * Enabling only Reply Interrupts
858 *
859 * Return nothing.
860 */
861static void
862_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
863{
864	u32 him_register;
865
866	him_register = readl(&ioc->chip->HostInterruptMask);
867	him_register &= ~MPI2_HIM_RIM;
868	writel(him_register, &ioc->chip->HostInterruptMask);
869	ioc->mask_interrupts = 0;
870}
871
872union reply_descriptor {
873	u64 word;
874	struct {
875		u32 low;
876		u32 high;
877	} u;
878};
879
880/**
881 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
882 * @irq: irq number (not used)
883 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
884 * @r: pt_regs pointer (not used)
885 *
886 * Return IRQ_HANDLE if processed, else IRQ_NONE.
887 */
888static irqreturn_t
889_base_interrupt(int irq, void *bus_id)
890{
891	struct adapter_reply_queue *reply_q = bus_id;
892	union reply_descriptor rd;
893	u32 completed_cmds;
894	u8 request_desript_type;
895	u16 smid;
896	u8 cb_idx;
897	u32 reply;
898	u8 msix_index = reply_q->msix_index;
899	struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
900	Mpi2ReplyDescriptorsUnion_t *rpf;
901	u8 rc;
902
903	if (ioc->mask_interrupts)
904		return IRQ_NONE;
905
906	if (!atomic_add_unless(&reply_q->busy, 1, 1))
907		return IRQ_NONE;
908
909	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
910	request_desript_type = rpf->Default.ReplyFlags
911	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
912	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
913		atomic_dec(&reply_q->busy);
914		return IRQ_NONE;
915	}
916
917	completed_cmds = 0;
918	cb_idx = 0xFF;
919	do {
920		rd.word = le64_to_cpu(rpf->Words);
921		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
922			goto out;
923		reply = 0;
924		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
925		if (request_desript_type ==
926		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
927			reply = le32_to_cpu
928				(rpf->AddressReply.ReplyFrameAddress);
929			if (reply > ioc->reply_dma_max_address ||
930			    reply < ioc->reply_dma_min_address)
931				reply = 0;
932		} else if (request_desript_type ==
933		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
934			goto next;
935		else if (request_desript_type ==
936		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
937			goto next;
938		if (smid) {
939			cb_idx = _base_get_cb_idx(ioc, smid);
940		if ((likely(cb_idx < MPT_MAX_CALLBACKS))
941			    && (likely(mpt_callbacks[cb_idx] != NULL))) {
942				rc = mpt_callbacks[cb_idx](ioc, smid,
943				    msix_index, reply);
944			if (reply)
945				_base_display_reply_info(ioc, smid,
946				    msix_index, reply);
947			if (rc)
948				mpt2sas_base_free_smid(ioc, smid);
949			}
950		}
951		if (!smid)
952			_base_async_event(ioc, msix_index, reply);
953
954		/* reply free queue handling */
955		if (reply) {
956			ioc->reply_free_host_index =
957			    (ioc->reply_free_host_index ==
958			    (ioc->reply_free_queue_depth - 1)) ?
959			    0 : ioc->reply_free_host_index + 1;
960			ioc->reply_free[ioc->reply_free_host_index] =
961			    cpu_to_le32(reply);
962			wmb();
963			writel(ioc->reply_free_host_index,
964			    &ioc->chip->ReplyFreeHostIndex);
965		}
966
967 next:
968
969		rpf->Words = cpu_to_le64(ULLONG_MAX);
970		reply_q->reply_post_host_index =
971		    (reply_q->reply_post_host_index ==
972		    (ioc->reply_post_queue_depth - 1)) ? 0 :
973		    reply_q->reply_post_host_index + 1;
974		request_desript_type =
975		    reply_q->reply_post_free[reply_q->reply_post_host_index].
976		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
977		completed_cmds++;
978		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
979			goto out;
980		if (!reply_q->reply_post_host_index)
981			rpf = reply_q->reply_post_free;
982		else
983			rpf++;
984	} while (1);
985
986 out:
987
988	if (!completed_cmds) {
989		atomic_dec(&reply_q->busy);
990		return IRQ_NONE;
991	}
992	wmb();
993	if (ioc->is_warpdrive) {
994		writel(reply_q->reply_post_host_index,
995		ioc->reply_post_host_index[msix_index]);
996		atomic_dec(&reply_q->busy);
997		return IRQ_HANDLED;
998	}
999	writel(reply_q->reply_post_host_index | (msix_index <<
1000	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1001	atomic_dec(&reply_q->busy);
1002	return IRQ_HANDLED;
1003}
1004
1005/**
1006 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1007 * @ioc: per adapter object
1008 *
1009 */
1010static inline int
1011_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
1012{
1013	return (ioc->facts.IOCCapabilities &
1014	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1015}
1016
1017/**
1018 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
1019 * @ioc: per adapter object
1020 * Context: ISR conext
1021 *
1022 * Called when a Task Management request has completed. We want
1023 * to flush the other reply queues so all the outstanding IO has been
1024 * completed back to OS before we process the TM completetion.
1025 *
1026 * Return nothing.
1027 */
1028void
1029mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1030{
1031	struct adapter_reply_queue *reply_q;
1032
1033	/* If MSIX capability is turned off
1034	 * then multi-queues are not enabled
1035	 */
1036	if (!_base_is_controller_msix_enabled(ioc))
1037		return;
1038
1039	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1040		if (ioc->shost_recovery)
1041			return;
1042		/* TMs are on msix_index == 0 */
1043		if (reply_q->msix_index == 0)
1044			continue;
1045		_base_interrupt(reply_q->vector, (void *)reply_q);
1046	}
1047}
1048
1049/**
1050 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
1051 * @cb_idx: callback index
1052 *
1053 * Return nothing.
1054 */
1055void
1056mpt2sas_base_release_callback_handler(u8 cb_idx)
1057{
1058	mpt_callbacks[cb_idx] = NULL;
1059}
1060
1061/**
1062 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
1063 * @cb_func: callback function
1064 *
1065 * Returns cb_func.
1066 */
1067u8
1068mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1069{
1070	u8 cb_idx;
1071
1072	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1073		if (mpt_callbacks[cb_idx] == NULL)
1074			break;
1075
1076	mpt_callbacks[cb_idx] = cb_func;
1077	return cb_idx;
1078}
1079
1080/**
1081 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
1082 *
1083 * Return nothing.
1084 */
1085void
1086mpt2sas_base_initialize_callback_handler(void)
1087{
1088	u8 cb_idx;
1089
1090	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1091		mpt2sas_base_release_callback_handler(cb_idx);
1092}
1093
1094/**
1095 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
1096 * @ioc: per adapter object
1097 * @paddr: virtual address for SGE
1098 *
1099 * Create a zero length scatter gather entry to insure the IOCs hardware has
1100 * something to use if the target device goes brain dead and tries
1101 * to send data even when none is asked for.
1102 *
1103 * Return nothing.
1104 */
1105void
1106mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
1107{
1108	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1109	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1110	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1111	    MPI2_SGE_FLAGS_SHIFT);
1112	ioc->base_add_sg_single(paddr, flags_length, -1);
1113}
1114
1115/**
1116 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1117 * @paddr: virtual address for SGE
1118 * @flags_length: SGE flags and data transfer length
1119 * @dma_addr: Physical address
1120 *
1121 * Return nothing.
1122 */
1123static void
1124_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1125{
1126	Mpi2SGESimple32_t *sgel = paddr;
1127
1128	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1129	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1130	sgel->FlagsLength = cpu_to_le32(flags_length);
1131	sgel->Address = cpu_to_le32(dma_addr);
1132}
1133
1134
1135/**
1136 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1137 * @paddr: virtual address for SGE
1138 * @flags_length: SGE flags and data transfer length
1139 * @dma_addr: Physical address
1140 *
1141 * Return nothing.
1142 */
1143static void
1144_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1145{
1146	Mpi2SGESimple64_t *sgel = paddr;
1147
1148	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1149	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1150	sgel->FlagsLength = cpu_to_le32(flags_length);
1151	sgel->Address = cpu_to_le64(dma_addr);
1152}
1153
1154#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1155
1156/**
1157 * _base_config_dma_addressing - set dma addressing
1158 * @ioc: per adapter object
1159 * @pdev: PCI device struct
1160 *
1161 * Returns 0 for success, non-zero for failure.
1162 */
1163static int
1164_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1165{
1166	struct sysinfo s;
1167	char *desc = NULL;
1168
1169	if (sizeof(dma_addr_t) > 4) {
1170		const uint64_t required_mask =
1171		    dma_get_required_mask(&pdev->dev);
1172		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
1173		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
1174		    DMA_BIT_MASK(64))) {
1175			ioc->base_add_sg_single = &_base_add_sg_single_64;
1176			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1177			desc = "64";
1178			goto out;
1179		}
1180	}
1181
1182	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1183	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1184		ioc->base_add_sg_single = &_base_add_sg_single_32;
1185		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1186		desc = "32";
1187	} else
1188		return -ENODEV;
1189
1190 out:
1191	si_meminfo(&s);
1192	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
1193	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
1194
1195	return 0;
1196}
1197
1198/**
1199 * _base_check_enable_msix - checks MSIX capabable.
1200 * @ioc: per adapter object
1201 *
1202 * Check to see if card is capable of MSIX, and set number
1203 * of available msix vectors
1204 */
1205static int
1206_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1207{
1208	int base;
1209	u16 message_control;
1210
1211
1212	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1213	if (!base) {
1214		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1215		    "supported\n", ioc->name));
1216		return -EINVAL;
1217	}
1218
1219	/* get msix vector count */
1220	/* NUMA_IO not supported for older controllers */
1221	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1222	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1223	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1224	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1225	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1226	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1227	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1228		ioc->msix_vector_count = 1;
1229	else {
1230		pci_read_config_word(ioc->pdev, base + 2, &message_control);
1231		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1232	}
1233	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1234	    "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1235
1236	return 0;
1237}
1238
1239/**
1240 * _base_free_irq - free irq
1241 * @ioc: per adapter object
1242 *
1243 * Freeing respective reply_queue from the list.
1244 */
1245static void
1246_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1247{
1248	struct adapter_reply_queue *reply_q, *next;
1249
1250	if (list_empty(&ioc->reply_queue_list))
1251		return;
1252
1253	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1254		list_del(&reply_q->list);
1255		synchronize_irq(reply_q->vector);
1256		free_irq(reply_q->vector, reply_q);
1257		kfree(reply_q);
1258	}
1259}
1260
1261/**
1262 * _base_request_irq - request irq
1263 * @ioc: per adapter object
1264 * @index: msix index into vector table
1265 * @vector: irq vector
1266 *
1267 * Inserting respective reply_queue into the list.
1268 */
1269static int
1270_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1271{
1272	struct adapter_reply_queue *reply_q;
1273	int r;
1274
1275	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1276	if (!reply_q) {
1277		printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1278		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1279		return -ENOMEM;
1280	}
1281	reply_q->ioc = ioc;
1282	reply_q->msix_index = index;
1283	reply_q->vector = vector;
1284	atomic_set(&reply_q->busy, 0);
1285	if (ioc->msix_enable)
1286		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1287		    MPT2SAS_DRIVER_NAME, ioc->id, index);
1288	else
1289		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1290		    MPT2SAS_DRIVER_NAME, ioc->id);
1291	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1292	    reply_q);
1293	if (r) {
1294		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1295		    reply_q->name, vector);
1296		kfree(reply_q);
1297		return -EBUSY;
1298	}
1299
1300	INIT_LIST_HEAD(&reply_q->list);
1301	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1302	return 0;
1303}
1304
1305/**
1306 * _base_assign_reply_queues - assigning msix index for each cpu
1307 * @ioc: per adapter object
1308 *
1309 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1310 *
1311 * It would nice if we could call irq_set_affinity, however it is not
1312 * an exported symbol
1313 */
1314static void
1315_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1316{
1317	struct adapter_reply_queue *reply_q;
1318	int cpu_id;
1319	int cpu_grouping, loop, grouping, grouping_mod;
1320
1321	if (!_base_is_controller_msix_enabled(ioc))
1322		return;
1323
1324	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1325	/* when there are more cpus than available msix vectors,
1326	 * then group cpus togeather on same irq
1327	 */
1328	if (ioc->cpu_count > ioc->msix_vector_count) {
1329		grouping = ioc->cpu_count / ioc->msix_vector_count;
1330		grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1331		if (grouping < 2 || (grouping == 2 && !grouping_mod))
1332			cpu_grouping = 2;
1333		else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1334			cpu_grouping = 4;
1335		else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1336			cpu_grouping = 8;
1337		else
1338			cpu_grouping = 16;
1339	} else
1340		cpu_grouping = 0;
1341
1342	loop = 0;
1343	reply_q = list_entry(ioc->reply_queue_list.next,
1344	     struct adapter_reply_queue, list);
1345	for_each_online_cpu(cpu_id) {
1346		if (!cpu_grouping) {
1347			ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1348			reply_q = list_entry(reply_q->list.next,
1349			    struct adapter_reply_queue, list);
1350		} else {
1351			if (loop < cpu_grouping) {
1352				ioc->cpu_msix_table[cpu_id] =
1353					reply_q->msix_index;
1354				loop++;
1355			} else {
1356				reply_q = list_entry(reply_q->list.next,
1357				    struct adapter_reply_queue, list);
1358				ioc->cpu_msix_table[cpu_id] =
1359					reply_q->msix_index;
1360				loop = 1;
1361			}
1362		}
1363	}
1364}
1365
1366/**
1367 * _base_disable_msix - disables msix
1368 * @ioc: per adapter object
1369 *
1370 */
1371static void
1372_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1373{
1374	if (ioc->msix_enable) {
1375		pci_disable_msix(ioc->pdev);
1376		ioc->msix_enable = 0;
1377	}
1378}
1379
1380/**
1381 * _base_enable_msix - enables msix, failback to io_apic
1382 * @ioc: per adapter object
1383 *
1384 */
1385static int
1386_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1387{
1388	struct msix_entry *entries, *a;
1389	int r;
1390	int i;
1391	u8 try_msix = 0;
1392
1393	INIT_LIST_HEAD(&ioc->reply_queue_list);
1394
1395	if (msix_disable == -1 || msix_disable == 0)
1396		try_msix = 1;
1397
1398	if (!try_msix)
1399		goto try_ioapic;
1400
1401	if (_base_check_enable_msix(ioc) != 0)
1402		goto try_ioapic;
1403
1404	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1405	    ioc->msix_vector_count);
1406
1407	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1408	    GFP_KERNEL);
1409	if (!entries) {
1410		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1411		    "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1412		    __LINE__, __func__));
1413		goto try_ioapic;
1414	}
1415
1416	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1417		a->entry = i;
1418
1419	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1420	if (r) {
1421		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1422		    "failed (r=%d) !!!\n", ioc->name, r));
1423		kfree(entries);
1424		goto try_ioapic;
1425	}
1426
1427	ioc->msix_enable = 1;
1428	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1429		r = _base_request_irq(ioc, i, a->vector);
1430		if (r) {
1431			_base_free_irq(ioc);
1432			_base_disable_msix(ioc);
1433			kfree(entries);
1434			goto try_ioapic;
1435		}
1436	}
1437
1438	kfree(entries);
1439	return 0;
1440
1441/* failback to io_apic interrupt routing */
1442 try_ioapic:
1443
1444	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1445
1446	return r;
1447}
1448
1449/**
1450 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1451 * @ioc: per adapter object
1452 *
1453 * Returns 0 for success, non-zero for failure.
1454 */
1455int
1456mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1457{
1458	struct pci_dev *pdev = ioc->pdev;
1459	u32 memap_sz;
1460	u32 pio_sz;
1461	int i, r = 0;
1462	u64 pio_chip = 0;
1463	u64 chip_phys = 0;
1464	struct adapter_reply_queue *reply_q;
1465
1466	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1467	    ioc->name, __func__));
1468
1469	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1470	if (pci_enable_device_mem(pdev)) {
1471		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1472		    "failed\n", ioc->name);
1473		return -ENODEV;
1474	}
1475
1476
1477	if (pci_request_selected_regions(pdev, ioc->bars,
1478	    MPT2SAS_DRIVER_NAME)) {
1479		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1480		    "failed\n", ioc->name);
1481		r = -ENODEV;
1482		goto out_fail;
1483	}
1484
1485	/* AER (Advanced Error Reporting) hooks */
1486	pci_enable_pcie_error_reporting(pdev);
1487
1488	pci_set_master(pdev);
1489
1490	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1491		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1492		    ioc->name, pci_name(pdev));
1493		r = -ENODEV;
1494		goto out_fail;
1495	}
1496
1497	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1498		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1499			if (pio_sz)
1500				continue;
1501			pio_chip = (u64)pci_resource_start(pdev, i);
1502			pio_sz = pci_resource_len(pdev, i);
1503		} else {
1504			if (memap_sz)
1505				continue;
1506			/* verify memory resource is valid before using */
1507			if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1508				ioc->chip_phys = pci_resource_start(pdev, i);
1509				chip_phys = (u64)ioc->chip_phys;
1510				memap_sz = pci_resource_len(pdev, i);
1511				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1512				if (ioc->chip == NULL) {
1513					printk(MPT2SAS_ERR_FMT "unable to map "
1514					    "adapter memory!\n", ioc->name);
1515					r = -EINVAL;
1516					goto out_fail;
1517				}
1518			}
1519		}
1520	}
1521
1522	_base_mask_interrupts(ioc);
1523	r = _base_enable_msix(ioc);
1524	if (r)
1525		goto out_fail;
1526
1527	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1528		printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1529		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1530		    "IO-APIC enabled"), reply_q->vector);
1531
1532	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1533	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1534	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1535	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1536
1537	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1538	pci_save_state(pdev);
1539
1540	return 0;
1541
1542 out_fail:
1543	if (ioc->chip_phys)
1544		iounmap(ioc->chip);
1545	ioc->chip_phys = 0;
1546	pci_release_selected_regions(ioc->pdev, ioc->bars);
1547	pci_disable_pcie_error_reporting(pdev);
1548	pci_disable_device(pdev);
1549	return r;
1550}
1551
1552/**
1553 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1554 * @ioc: per adapter object
1555 * @smid: system request message index(smid zero is invalid)
1556 *
1557 * Returns virt pointer to message frame.
1558 */
1559void *
1560mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1561{
1562	return (void *)(ioc->request + (smid * ioc->request_sz));
1563}
1564
1565/**
1566 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1567 * @ioc: per adapter object
1568 * @smid: system request message index
1569 *
1570 * Returns virt pointer to sense buffer.
1571 */
1572void *
1573mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1574{
1575	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1576}
1577
1578/**
1579 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1580 * @ioc: per adapter object
1581 * @smid: system request message index
1582 *
1583 * Returns phys pointer to the low 32bit address of the sense buffer.
1584 */
1585__le32
1586mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1587{
1588	return cpu_to_le32(ioc->sense_dma +
1589			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1590}
1591
1592/**
1593 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1594 * @ioc: per adapter object
1595 * @phys_addr: lower 32 physical addr of the reply
1596 *
1597 * Converts 32bit lower physical addr into a virt address.
1598 */
1599void *
1600mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1601{
1602	if (!phys_addr)
1603		return NULL;
1604	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1605}
1606
1607/**
1608 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1609 * @ioc: per adapter object
1610 * @cb_idx: callback index
1611 *
1612 * Returns smid (zero is invalid)
1613 */
1614u16
1615mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1616{
1617	unsigned long flags;
1618	struct request_tracker *request;
1619	u16 smid;
1620
1621	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1622	if (list_empty(&ioc->internal_free_list)) {
1623		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1624		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1625		    ioc->name, __func__);
1626		return 0;
1627	}
1628
1629	request = list_entry(ioc->internal_free_list.next,
1630	    struct request_tracker, tracker_list);
1631	request->cb_idx = cb_idx;
1632	smid = request->smid;
1633	list_del(&request->tracker_list);
1634	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1635	return smid;
1636}
1637
1638/**
1639 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1640 * @ioc: per adapter object
1641 * @cb_idx: callback index
1642 * @scmd: pointer to scsi command object
1643 *
1644 * Returns smid (zero is invalid)
1645 */
1646u16
1647mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1648    struct scsi_cmnd *scmd)
1649{
1650	unsigned long flags;
1651	struct scsiio_tracker *request;
1652	u16 smid;
1653
1654	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1655	if (list_empty(&ioc->free_list)) {
1656		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1657		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1658		    ioc->name, __func__);
1659		return 0;
1660	}
1661
1662	request = list_entry(ioc->free_list.next,
1663	    struct scsiio_tracker, tracker_list);
1664	request->scmd = scmd;
1665	request->cb_idx = cb_idx;
1666	smid = request->smid;
1667	list_del(&request->tracker_list);
1668	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1669	return smid;
1670}
1671
1672/**
1673 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1674 * @ioc: per adapter object
1675 * @cb_idx: callback index
1676 *
1677 * Returns smid (zero is invalid)
1678 */
1679u16
1680mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1681{
1682	unsigned long flags;
1683	struct request_tracker *request;
1684	u16 smid;
1685
1686	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1687	if (list_empty(&ioc->hpr_free_list)) {
1688		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1689		return 0;
1690	}
1691
1692	request = list_entry(ioc->hpr_free_list.next,
1693	    struct request_tracker, tracker_list);
1694	request->cb_idx = cb_idx;
1695	smid = request->smid;
1696	list_del(&request->tracker_list);
1697	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1698	return smid;
1699}
1700
1701
1702/**
1703 * mpt2sas_base_free_smid - put smid back on free_list
1704 * @ioc: per adapter object
1705 * @smid: system request message index
1706 *
1707 * Return nothing.
1708 */
1709void
1710mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1711{
1712	unsigned long flags;
1713	int i;
1714	struct chain_tracker *chain_req, *next;
1715
1716	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1717	if (smid < ioc->hi_priority_smid) {
1718		/* scsiio queue */
1719		i = smid - 1;
1720		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1721			list_for_each_entry_safe(chain_req, next,
1722			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
1723				list_del_init(&chain_req->tracker_list);
1724				list_add_tail(&chain_req->tracker_list,
1725				    &ioc->free_chain_list);
1726			}
1727		}
1728		ioc->scsi_lookup[i].cb_idx = 0xFF;
1729		ioc->scsi_lookup[i].scmd = NULL;
1730		ioc->scsi_lookup[i].direct_io = 0;
1731		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1732		    &ioc->free_list);
1733		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1734
1735		/*
1736		 * See _wait_for_commands_to_complete() call with regards
1737		 * to this code.
1738		 */
1739		if (ioc->shost_recovery && ioc->pending_io_count) {
1740			if (ioc->pending_io_count == 1)
1741				wake_up(&ioc->reset_wq);
1742			ioc->pending_io_count--;
1743		}
1744		return;
1745	} else if (smid < ioc->internal_smid) {
1746		/* hi-priority */
1747		i = smid - ioc->hi_priority_smid;
1748		ioc->hpr_lookup[i].cb_idx = 0xFF;
1749		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1750		    &ioc->hpr_free_list);
1751	} else if (smid <= ioc->hba_queue_depth) {
1752		/* internal queue */
1753		i = smid - ioc->internal_smid;
1754		ioc->internal_lookup[i].cb_idx = 0xFF;
1755		list_add_tail(&ioc->internal_lookup[i].tracker_list,
1756		    &ioc->internal_free_list);
1757	}
1758	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1759}
1760
1761/**
1762 * _base_writeq - 64 bit write to MMIO
1763 * @ioc: per adapter object
1764 * @b: data payload
1765 * @addr: address in MMIO space
1766 * @writeq_lock: spin lock
1767 *
1768 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1769 * care of 32 bit environment where its not quarenteed to send the entire word
1770 * in one transfer.
1771 */
1772#ifndef writeq
1773static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1774    spinlock_t *writeq_lock)
1775{
1776	unsigned long flags;
1777	__u64 data_out = cpu_to_le64(b);
1778
1779	spin_lock_irqsave(writeq_lock, flags);
1780	writel((u32)(data_out), addr);
1781	writel((u32)(data_out >> 32), (addr + 4));
1782	spin_unlock_irqrestore(writeq_lock, flags);
1783}
1784#else
1785static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1786    spinlock_t *writeq_lock)
1787{
1788	writeq(cpu_to_le64(b), addr);
1789}
1790#endif
1791
1792static inline u8
1793_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1794{
1795	return ioc->cpu_msix_table[raw_smp_processor_id()];
1796}
1797
1798/**
1799 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1800 * @ioc: per adapter object
1801 * @smid: system request message index
1802 * @handle: device handle
1803 *
1804 * Return nothing.
1805 */
1806void
1807mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1808{
1809	Mpi2RequestDescriptorUnion_t descriptor;
1810	u64 *request = (u64 *)&descriptor;
1811
1812
1813	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1814	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
1815	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1816	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1817	descriptor.SCSIIO.LMID = 0;
1818	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1819	    &ioc->scsi_lookup_lock);
1820}
1821
1822
1823/**
1824 * mpt2sas_base_put_smid_hi_priority - send Task Management request to firmware
1825 * @ioc: per adapter object
1826 * @smid: system request message index
1827 *
1828 * Return nothing.
1829 */
1830void
1831mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1832{
1833	Mpi2RequestDescriptorUnion_t descriptor;
1834	u64 *request = (u64 *)&descriptor;
1835
1836	descriptor.HighPriority.RequestFlags =
1837	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1838	descriptor.HighPriority.MSIxIndex =  0;
1839	descriptor.HighPriority.SMID = cpu_to_le16(smid);
1840	descriptor.HighPriority.LMID = 0;
1841	descriptor.HighPriority.Reserved1 = 0;
1842	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1843	    &ioc->scsi_lookup_lock);
1844}
1845
1846/**
1847 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1848 * @ioc: per adapter object
1849 * @smid: system request message index
1850 *
1851 * Return nothing.
1852 */
1853void
1854mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1855{
1856	Mpi2RequestDescriptorUnion_t descriptor;
1857	u64 *request = (u64 *)&descriptor;
1858
1859	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1860	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
1861	descriptor.Default.SMID = cpu_to_le16(smid);
1862	descriptor.Default.LMID = 0;
1863	descriptor.Default.DescriptorTypeDependent = 0;
1864	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1865	    &ioc->scsi_lookup_lock);
1866}
1867
1868/**
1869 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1870 * @ioc: per adapter object
1871 * @smid: system request message index
1872 * @io_index: value used to track the IO
1873 *
1874 * Return nothing.
1875 */
1876void
1877mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1878    u16 io_index)
1879{
1880	Mpi2RequestDescriptorUnion_t descriptor;
1881	u64 *request = (u64 *)&descriptor;
1882
1883	descriptor.SCSITarget.RequestFlags =
1884	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1885	descriptor.SCSITarget.MSIxIndex =  _base_get_msix_index(ioc);
1886	descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1887	descriptor.SCSITarget.LMID = 0;
1888	descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1889	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1890	    &ioc->scsi_lookup_lock);
1891}
1892
1893/**
1894 * _base_display_dell_branding - Disply branding string
1895 * @ioc: per adapter object
1896 *
1897 * Return nothing.
1898 */
1899static void
1900_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1901{
1902	char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
1903
1904	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
1905		return;
1906
1907	memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
1908	switch (ioc->pdev->subsystem_device) {
1909	case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
1910		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
1911		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1912		break;
1913	case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
1914		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
1915		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1916		break;
1917	case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
1918		strncpy(dell_branding,
1919		    MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
1920		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1921		break;
1922	case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
1923		strncpy(dell_branding,
1924		    MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
1925		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1926		break;
1927	case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
1928		strncpy(dell_branding,
1929		    MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
1930		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1931		break;
1932	case MPT2SAS_DELL_PERC_H200_SSDID:
1933		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
1934		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1935		break;
1936	case MPT2SAS_DELL_6GBPS_SAS_SSDID:
1937		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
1938		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1939		break;
1940	default:
1941		sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
1942		break;
1943	}
1944
1945	printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
1946	    " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
1947	    ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
1948	    ioc->pdev->subsystem_device);
1949}
1950
1951/**
1952 * _base_display_intel_branding - Display branding string
1953 * @ioc: per adapter object
1954 *
1955 * Return nothing.
1956 */
1957static void
1958_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1959{
1960	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1961		return;
1962
1963	switch (ioc->pdev->device) {
1964	case MPI2_MFGPAGE_DEVID_SAS2008:
1965		switch (ioc->pdev->subsystem_device) {
1966		case MPT2SAS_INTEL_RMS2LL080_SSDID:
1967			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1968			    MPT2SAS_INTEL_RMS2LL080_BRANDING);
1969			break;
1970		case MPT2SAS_INTEL_RMS2LL040_SSDID:
1971			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1972			    MPT2SAS_INTEL_RMS2LL040_BRANDING);
1973			break;
1974		case MPT2SAS_INTEL_RAMSDALE_SSDID:
1975			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1976			    MPT2SAS_INTEL_RAMSDALE_BRANDING);
1977			break;
1978		default:
1979			break;
1980		}
1981	case MPI2_MFGPAGE_DEVID_SAS2308_2:
1982		switch (ioc->pdev->subsystem_device) {
1983		case MPT2SAS_INTEL_RS25GB008_SSDID:
1984			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1985			    MPT2SAS_INTEL_RS25GB008_BRANDING);
1986			break;
1987		case MPT2SAS_INTEL_RMS25JB080_SSDID:
1988			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1989			    MPT2SAS_INTEL_RMS25JB080_BRANDING);
1990			break;
1991		case MPT2SAS_INTEL_RMS25JB040_SSDID:
1992			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1993			    MPT2SAS_INTEL_RMS25JB040_BRANDING);
1994			break;
1995		case MPT2SAS_INTEL_RMS25KB080_SSDID:
1996			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1997			    MPT2SAS_INTEL_RMS25KB080_BRANDING);
1998			break;
1999		case MPT2SAS_INTEL_RMS25KB040_SSDID:
2000			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2001			    MPT2SAS_INTEL_RMS25KB040_BRANDING);
2002			break;
2003		default:
2004			break;
2005		}
2006	default:
2007		break;
2008	}
2009}
2010
2011/**
2012 * _base_display_hp_branding - Display branding string
2013 * @ioc: per adapter object
2014 *
2015 * Return nothing.
2016 */
2017static void
2018_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
2019{
2020	if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
2021		return;
2022
2023	switch (ioc->pdev->device) {
2024	case MPI2_MFGPAGE_DEVID_SAS2004:
2025		switch (ioc->pdev->subsystem_device) {
2026		case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2027			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2028			    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2029			break;
2030		default:
2031			break;
2032		}
2033	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2034		switch (ioc->pdev->subsystem_device) {
2035		case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2036			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2037			    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2038			break;
2039		case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2040			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2041			    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2042			break;
2043		case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2044			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2045			    MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2046			break;
2047		case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2048			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2049			    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2050			break;
2051		default:
2052			break;
2053		}
2054	default:
2055		break;
2056	}
2057}
2058
2059/**
2060 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2061 * @ioc: per adapter object
2062 *
2063 * Return nothing.
2064 */
2065static void
2066_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2067{
2068	int i = 0;
2069	char desc[16];
2070	u32 iounit_pg1_flags;
2071	u32 bios_version;
2072
2073	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2074	strncpy(desc, ioc->manu_pg0.ChipName, 16);
2075	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
2076	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2077	    ioc->name, desc,
2078	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2079	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2080	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2081	   ioc->facts.FWVersion.Word & 0x000000FF,
2082	   ioc->pdev->revision,
2083	   (bios_version & 0xFF000000) >> 24,
2084	   (bios_version & 0x00FF0000) >> 16,
2085	   (bios_version & 0x0000FF00) >> 8,
2086	    bios_version & 0x000000FF);
2087
2088	_base_display_dell_branding(ioc);
2089	_base_display_intel_branding(ioc);
2090	_base_display_hp_branding(ioc);
2091
2092	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
2093
2094	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2095		printk("Initiator");
2096		i++;
2097	}
2098
2099	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2100		printk("%sTarget", i ? "," : "");
2101		i++;
2102	}
2103
2104	i = 0;
2105	printk("), ");
2106	printk("Capabilities=(");
2107
2108	if (!ioc->hide_ir_msg) {
2109		if (ioc->facts.IOCCapabilities &
2110		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2111			printk("Raid");
2112			i++;
2113		}
2114	}
2115
2116	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2117		printk("%sTLR", i ? "," : "");
2118		i++;
2119	}
2120
2121	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2122		printk("%sMulticast", i ? "," : "");
2123		i++;
2124	}
2125
2126	if (ioc->facts.IOCCapabilities &
2127	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2128		printk("%sBIDI Target", i ? "," : "");
2129		i++;
2130	}
2131
2132	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2133		printk("%sEEDP", i ? "," : "");
2134		i++;
2135	}
2136
2137	if (ioc->facts.IOCCapabilities &
2138	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2139		printk("%sSnapshot Buffer", i ? "," : "");
2140		i++;
2141	}
2142
2143	if (ioc->facts.IOCCapabilities &
2144	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2145		printk("%sDiag Trace Buffer", i ? "," : "");
2146		i++;
2147	}
2148
2149	if (ioc->facts.IOCCapabilities &
2150	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2151		printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
2152		i++;
2153	}
2154
2155	if (ioc->facts.IOCCapabilities &
2156	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2157		printk("%sTask Set Full", i ? "," : "");
2158		i++;
2159	}
2160
2161	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2162	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2163		printk("%sNCQ", i ? "," : "");
2164		i++;
2165	}
2166
2167	printk(")\n");
2168}
2169
2170/**
2171 * _base_update_missing_delay - change the missing delay timers
2172 * @ioc: per adapter object
2173 * @device_missing_delay: amount of time till device is reported missing
2174 * @io_missing_delay: interval IO is returned when there is a missing device
2175 *
2176 * Return nothing.
2177 *
2178 * Passed on the command line, this function will modify the device missing
2179 * delay, as well as the io missing delay. This should be called at driver
2180 * load time.
2181 */
2182static void
2183_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2184	u16 device_missing_delay, u8 io_missing_delay)
2185{
2186	u16 dmd, dmd_new, dmd_orignal;
2187	u8 io_missing_delay_original;
2188	u16 sz;
2189	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2190	Mpi2ConfigReply_t mpi_reply;
2191	u8 num_phys = 0;
2192	u16 ioc_status;
2193
2194	mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
2195	if (!num_phys)
2196		return;
2197
2198	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2199	    sizeof(Mpi2SasIOUnit1PhyData_t));
2200	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2201	if (!sas_iounit_pg1) {
2202		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2203		    ioc->name, __FILE__, __LINE__, __func__);
2204		goto out;
2205	}
2206	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2207	    sas_iounit_pg1, sz))) {
2208		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2209		    ioc->name, __FILE__, __LINE__, __func__);
2210		goto out;
2211	}
2212	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2213	    MPI2_IOCSTATUS_MASK;
2214	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2215		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2216		    ioc->name, __FILE__, __LINE__, __func__);
2217		goto out;
2218	}
2219
2220	/* device missing delay */
2221	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2222	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2223		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2224	else
2225		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2226	dmd_orignal = dmd;
2227	if (device_missing_delay > 0x7F) {
2228		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2229		    device_missing_delay;
2230		dmd = dmd / 16;
2231		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2232	} else
2233		dmd = device_missing_delay;
2234	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2235
2236	/* io missing delay */
2237	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2238	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2239
2240	if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2241	    sz)) {
2242		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2243			dmd_new = (dmd &
2244			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2245		else
2246			dmd_new =
2247		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2248		printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
2249		    "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
2250		printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
2251		    "new(%d)\n", ioc->name, io_missing_delay_original,
2252		    io_missing_delay);
2253		ioc->device_missing_delay = dmd_new;
2254		ioc->io_missing_delay = io_missing_delay;
2255	}
2256
2257out:
2258	kfree(sas_iounit_pg1);
2259}
2260
2261/**
2262 * _base_static_config_pages - static start of day config pages
2263 * @ioc: per adapter object
2264 *
2265 * Return nothing.
2266 */
2267static void
2268_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
2269{
2270	Mpi2ConfigReply_t mpi_reply;
2271	u32 iounit_pg1_flags;
2272
2273	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2274	if (ioc->ir_firmware)
2275		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2276		    &ioc->manu_pg10);
2277	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2278	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2279	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2280	mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2281	mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2282	_base_display_ioc_capabilities(ioc);
2283
2284	/*
2285	 * Enable task_set_full handling in iounit_pg1 when the
2286	 * facts capabilities indicate that its supported.
2287	 */
2288	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2289	if ((ioc->facts.IOCCapabilities &
2290	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2291		iounit_pg1_flags &=
2292		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2293	else
2294		iounit_pg1_flags |=
2295		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2296	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2297	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2298
2299}
2300
2301/**
2302 * _base_release_memory_pools - release memory
2303 * @ioc: per adapter object
2304 *
2305 * Free memory allocated from _base_allocate_memory_pools.
2306 *
2307 * Return nothing.
2308 */
2309static void
2310_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2311{
2312	int i;
2313
2314	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2315	    __func__));
2316
2317	if (ioc->request) {
2318		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2319		    ioc->request,  ioc->request_dma);
2320		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
2321		    ": free\n", ioc->name, ioc->request));
2322		ioc->request = NULL;
2323	}
2324
2325	if (ioc->sense) {
2326		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2327		if (ioc->sense_dma_pool)
2328			pci_pool_destroy(ioc->sense_dma_pool);
2329		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
2330		    ": free\n", ioc->name, ioc->sense));
2331		ioc->sense = NULL;
2332	}
2333
2334	if (ioc->reply) {
2335		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2336		if (ioc->reply_dma_pool)
2337			pci_pool_destroy(ioc->reply_dma_pool);
2338		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
2339		     ": free\n", ioc->name, ioc->reply));
2340		ioc->reply = NULL;
2341	}
2342
2343	if (ioc->reply_free) {
2344		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2345		    ioc->reply_free_dma);
2346		if (ioc->reply_free_dma_pool)
2347			pci_pool_destroy(ioc->reply_free_dma_pool);
2348		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
2349		    "(0x%p): free\n", ioc->name, ioc->reply_free));
2350		ioc->reply_free = NULL;
2351	}
2352
2353	if (ioc->reply_post_free) {
2354		pci_pool_free(ioc->reply_post_free_dma_pool,
2355		    ioc->reply_post_free, ioc->reply_post_free_dma);
2356		if (ioc->reply_post_free_dma_pool)
2357			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2358		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2359		    "reply_post_free_pool(0x%p): free\n", ioc->name,
2360		    ioc->reply_post_free));
2361		ioc->reply_post_free = NULL;
2362	}
2363
2364	if (ioc->config_page) {
2365		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2366		    "config_page(0x%p): free\n", ioc->name,
2367		    ioc->config_page));
2368		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2369		    ioc->config_page, ioc->config_page_dma);
2370	}
2371
2372	if (ioc->scsi_lookup) {
2373		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2374		ioc->scsi_lookup = NULL;
2375	}
2376	kfree(ioc->hpr_lookup);
2377	kfree(ioc->internal_lookup);
2378	if (ioc->chain_lookup) {
2379		for (i = 0; i < ioc->chain_depth; i++) {
2380			if (ioc->chain_lookup[i].chain_buffer)
2381				pci_pool_free(ioc->chain_dma_pool,
2382				    ioc->chain_lookup[i].chain_buffer,
2383				    ioc->chain_lookup[i].chain_buffer_dma);
2384		}
2385		if (ioc->chain_dma_pool)
2386			pci_pool_destroy(ioc->chain_dma_pool);
2387		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2388		ioc->chain_lookup = NULL;
2389	}
2390}
2391
2392
2393/**
2394 * _base_allocate_memory_pools - allocate start of day memory pools
2395 * @ioc: per adapter object
2396 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2397 *
2398 * Returns 0 success, anything else error
2399 */
2400static int
2401_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
2402{
2403	struct mpt2sas_facts *facts;
2404	u16 max_sge_elements;
2405	u16 chains_needed_per_io;
2406	u32 sz, total_sz, reply_post_free_sz;
2407	u32 retry_sz;
2408	u16 max_request_credit;
2409	int i;
2410
2411	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2412	    __func__));
2413
2414	retry_sz = 0;
2415	facts = &ioc->facts;
2416
2417	/* command line tunables  for max sgl entries */
2418	if (max_sgl_entries != -1) {
2419		ioc->shost->sg_tablesize = (max_sgl_entries <
2420		    MPT2SAS_SG_DEPTH) ? max_sgl_entries :
2421		    MPT2SAS_SG_DEPTH;
2422	} else {
2423		ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
2424	}
2425
2426	/* command line tunables  for max controller queue depth */
2427	if (max_queue_depth != -1 && max_queue_depth != 0) {
2428		max_request_credit = min_t(u16, max_queue_depth +
2429			ioc->hi_priority_depth + ioc->internal_depth,
2430			facts->RequestCredit);
2431		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2432			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
2433	} else
2434		max_request_credit = min_t(u16, facts->RequestCredit,
2435		    MAX_HBA_QUEUE_DEPTH);
2436
2437	ioc->hba_queue_depth = max_request_credit;
2438	ioc->hi_priority_depth = facts->HighPriorityCredit;
2439	ioc->internal_depth = ioc->hi_priority_depth + 5;
2440
2441	/* request frame size */
2442	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2443
2444	/* reply frame size */
2445	ioc->reply_sz = facts->ReplyFrameSize * 4;
2446
2447 retry_allocation:
2448	total_sz = 0;
2449	/* calculate number of sg elements left over in the 1st frame */
2450	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2451	    sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
2452	ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
2453
2454	/* now do the same for a chain buffer */
2455	max_sge_elements = ioc->request_sz - ioc->sge_size;
2456	ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
2457
2458	ioc->chain_offset_value_for_main_message =
2459	    ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
2460	     (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
2461
2462	/*
2463	 *  MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2464	 */
2465	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2466	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2467	    + 1;
2468	if (chains_needed_per_io > facts->MaxChainDepth) {
2469		chains_needed_per_io = facts->MaxChainDepth;
2470		ioc->shost->sg_tablesize = min_t(u16,
2471		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2472		* chains_needed_per_io), ioc->shost->sg_tablesize);
2473	}
2474	ioc->chains_needed_per_io = chains_needed_per_io;
2475
2476	/* reply free queue sizing - taking into account for 64 FW events */
2477	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2478
2479	/* align the reply post queue on the next 16 count boundary */
2480	if (!ioc->reply_free_queue_depth % 16)
2481		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
2482	else
2483		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
2484				32 - (ioc->reply_free_queue_depth % 16);
2485	if (ioc->reply_post_queue_depth >
2486	    facts->MaxReplyDescriptorPostQueueDepth) {
2487		ioc->reply_post_queue_depth = min_t(u16,
2488		    (facts->MaxReplyDescriptorPostQueueDepth -
2489		    (facts->MaxReplyDescriptorPostQueueDepth % 16)),
2490		    (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
2491		ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
2492		ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
2493	}
2494
2495
2496	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2497	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2498	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2499	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2500	    ioc->chains_needed_per_io));
2501
2502	ioc->scsiio_depth = ioc->hba_queue_depth -
2503	    ioc->hi_priority_depth - ioc->internal_depth;
2504
2505	/* set the scsi host can_queue depth
2506	 * with some internal commands that could be outstanding
2507	 */
2508	ioc->shost->can_queue = ioc->scsiio_depth;
2509	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2510	    "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2511
2512	/* contiguous pool for request and chains, 16 byte align, one extra "
2513	 * "frame for smid=0
2514	 */
2515	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2516	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2517
2518	/* hi-priority queue */
2519	sz += (ioc->hi_priority_depth * ioc->request_sz);
2520
2521	/* internal queue */
2522	sz += (ioc->internal_depth * ioc->request_sz);
2523
2524	ioc->request_dma_sz = sz;
2525	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2526	if (!ioc->request) {
2527		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2528		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2529		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2530		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2531		if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
2532			goto out;
2533		retry_sz += 64;
2534		ioc->hba_queue_depth = max_request_credit - retry_sz;
2535		goto retry_allocation;
2536	}
2537
2538	if (retry_sz)
2539		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2540		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2541		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2542		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2543
2544
2545	/* hi-priority queue */
2546	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2547	    ioc->request_sz);
2548	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2549	    ioc->request_sz);
2550
2551	/* internal queue */
2552	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2553	    ioc->request_sz);
2554	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2555	    ioc->request_sz);
2556
2557
2558	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2559	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2560	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2561	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2562	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2563	    ioc->name, (unsigned long long) ioc->request_dma));
2564	total_sz += sz;
2565
2566	sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2567	ioc->scsi_lookup_pages = get_order(sz);
2568	ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2569	    GFP_KERNEL, ioc->scsi_lookup_pages);
2570	if (!ioc->scsi_lookup) {
2571		printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
2572		    "sz(%d)\n", ioc->name, (int)sz);
2573		goto out;
2574	}
2575
2576	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
2577	    "depth(%d)\n", ioc->name, ioc->request,
2578	    ioc->scsiio_depth));
2579
2580	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2581	sz = ioc->chain_depth * sizeof(struct chain_tracker);
2582	ioc->chain_pages = get_order(sz);
2583
2584	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2585	    GFP_KERNEL, ioc->chain_pages);
2586	if (!ioc->chain_lookup) {
2587		printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
2588		    "sz(%d)\n", ioc->name, (int)sz);
2589		goto out;
2590	}
2591	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2592	    ioc->request_sz, 16, 0);
2593	if (!ioc->chain_dma_pool) {
2594		printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
2595		    "failed\n", ioc->name);
2596		goto out;
2597	}
2598	for (i = 0; i < ioc->chain_depth; i++) {
2599		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2600		    ioc->chain_dma_pool , GFP_KERNEL,
2601		    &ioc->chain_lookup[i].chain_buffer_dma);
2602		if (!ioc->chain_lookup[i].chain_buffer) {
2603			ioc->chain_depth = i;
2604			goto chain_done;
2605		}
2606		total_sz += ioc->request_sz;
2607	}
2608chain_done:
2609	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
2610	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2611	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2612	    ioc->request_sz))/1024));
2613
2614	/* initialize hi-priority queue smid's */
2615	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2616	    sizeof(struct request_tracker), GFP_KERNEL);
2617	if (!ioc->hpr_lookup) {
2618		printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2619		    ioc->name);
2620		goto out;
2621	}
2622	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2623	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2624	    "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2625	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2626
2627	/* initialize internal queue smid's */
2628	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2629	    sizeof(struct request_tracker), GFP_KERNEL);
2630	if (!ioc->internal_lookup) {
2631		printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2632		    ioc->name);
2633		goto out;
2634	}
2635	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2636	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2637	    "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2638	     ioc->internal_depth, ioc->internal_smid));
2639
2640	/* sense buffers, 4 byte align */
2641	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2642	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2643	    0);
2644	if (!ioc->sense_dma_pool) {
2645		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
2646		    ioc->name);
2647		goto out;
2648	}
2649	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2650	    &ioc->sense_dma);
2651	if (!ioc->sense) {
2652		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
2653		    ioc->name);
2654		goto out;
2655	}
2656	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2657	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2658	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2659	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2660	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
2661	    ioc->name, (unsigned long long)ioc->sense_dma));
2662	total_sz += sz;
2663
2664	/* reply pool, 4 byte align */
2665	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2666	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2667	    0);
2668	if (!ioc->reply_dma_pool) {
2669		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
2670		    ioc->name);
2671		goto out;
2672	}
2673	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2674	    &ioc->reply_dma);
2675	if (!ioc->reply) {
2676		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
2677		    ioc->name);
2678		goto out;
2679	}
2680	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2681	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2682	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2683	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2684	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2685	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
2686	    ioc->name, (unsigned long long)ioc->reply_dma));
2687	total_sz += sz;
2688
2689	/* reply free queue, 16 byte align */
2690	sz = ioc->reply_free_queue_depth * 4;
2691	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2692	    ioc->pdev, sz, 16, 0);
2693	if (!ioc->reply_free_dma_pool) {
2694		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
2695		    "failed\n", ioc->name);
2696		goto out;
2697	}
2698	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2699	    &ioc->reply_free_dma);
2700	if (!ioc->reply_free) {
2701		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
2702		    "failed\n", ioc->name);
2703		goto out;
2704	}
2705	memset(ioc->reply_free, 0, sz);
2706	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
2707	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2708	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2709	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
2710	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2711	total_sz += sz;
2712
2713	/* reply post queue, 16 byte align */
2714	reply_post_free_sz = ioc->reply_post_queue_depth *
2715	    sizeof(Mpi2DefaultReplyDescriptor_t);
2716	if (_base_is_controller_msix_enabled(ioc))
2717		sz = reply_post_free_sz * ioc->reply_queue_count;
2718	else
2719		sz = reply_post_free_sz;
2720	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2721	    ioc->pdev, sz, 16, 0);
2722	if (!ioc->reply_post_free_dma_pool) {
2723		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
2724		    "failed\n", ioc->name);
2725		goto out;
2726	}
2727	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2728	    GFP_KERNEL, &ioc->reply_post_free_dma);
2729	if (!ioc->reply_post_free) {
2730		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
2731		    "failed\n", ioc->name);
2732		goto out;
2733	}
2734	memset(ioc->reply_post_free, 0, sz);
2735	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
2736	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2737	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2738	    sz/1024));
2739	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
2740	    "(0x%llx)\n", ioc->name, (unsigned long long)
2741	    ioc->reply_post_free_dma));
2742	total_sz += sz;
2743
2744	ioc->config_page_sz = 512;
2745	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2746	    ioc->config_page_sz, &ioc->config_page_dma);
2747	if (!ioc->config_page) {
2748		printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
2749		    "failed\n", ioc->name);
2750		goto out;
2751	}
2752	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
2753	    "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
2754	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
2755	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
2756	total_sz += ioc->config_page_sz;
2757
2758	printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
2759	    ioc->name, total_sz/1024);
2760	printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
2761	    "Max Controller Queue Depth(%d)\n",
2762	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2763	printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
2764	    ioc->name, ioc->shost->sg_tablesize);
2765	return 0;
2766
2767 out:
2768	return -ENOMEM;
2769}
2770
2771
2772/**
2773 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
2774 * @ioc: Pointer to MPT_ADAPTER structure
2775 * @cooked: Request raw or cooked IOC state
2776 *
2777 * Returns all IOC Doorbell register bits if cooked==0, else just the
2778 * Doorbell bits in MPI_IOC_STATE_MASK.
2779 */
2780u32
2781mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
2782{
2783	u32 s, sc;
2784
2785	s = readl(&ioc->chip->Doorbell);
2786	sc = s & MPI2_IOC_STATE_MASK;
2787	return cooked ? sc : s;
2788}
2789
2790/**
2791 * _base_wait_on_iocstate - waiting on a particular ioc state
2792 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2793 * @timeout: timeout in second
2794 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2795 *
2796 * Returns 0 for success, non-zero for failure.
2797 */
2798static int
2799_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2800    int sleep_flag)
2801{
2802	u32 count, cntdn;
2803	u32 current_state;
2804
2805	count = 0;
2806	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2807	do {
2808		current_state = mpt2sas_base_get_iocstate(ioc, 1);
2809		if (current_state == ioc_state)
2810			return 0;
2811		if (count && current_state == MPI2_IOC_STATE_FAULT)
2812			break;
2813		if (sleep_flag == CAN_SLEEP)
2814			msleep(1);
2815		else
2816			udelay(500);
2817		count++;
2818	} while (--cntdn);
2819
2820	return current_state;
2821}
2822
2823/**
2824 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2825 * a write to the doorbell)
2826 * @ioc: per adapter object
2827 * @timeout: timeout in second
2828 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2829 *
2830 * Returns 0 for success, non-zero for failure.
2831 *
2832 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2833 */
2834static int
2835_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2836    int sleep_flag)
2837{
2838	u32 cntdn, count;
2839	u32 int_status;
2840
2841	count = 0;
2842	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2843	do {
2844		int_status = readl(&ioc->chip->HostInterruptStatus);
2845		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2846			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2847			    "successful count(%d), timeout(%d)\n", ioc->name,
2848			    __func__, count, timeout));
2849			return 0;
2850		}
2851		if (sleep_flag == CAN_SLEEP)
2852			msleep(1);
2853		else
2854			udelay(500);
2855		count++;
2856	} while (--cntdn);
2857
2858	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2859	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2860	return -EFAULT;
2861}
2862
2863/**
2864 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2865 * @ioc: per adapter object
2866 * @timeout: timeout in second
2867 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2868 *
2869 * Returns 0 for success, non-zero for failure.
2870 *
2871 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
2872 * doorbell.
2873 */
2874static int
2875_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2876    int sleep_flag)
2877{
2878	u32 cntdn, count;
2879	u32 int_status;
2880	u32 doorbell;
2881
2882	count = 0;
2883	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2884	do {
2885		int_status = readl(&ioc->chip->HostInterruptStatus);
2886		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2887			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2888			    "successful count(%d), timeout(%d)\n", ioc->name,
2889			    __func__, count, timeout));
2890			return 0;
2891		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2892			doorbell = readl(&ioc->chip->Doorbell);
2893			if ((doorbell & MPI2_IOC_STATE_MASK) ==
2894			    MPI2_IOC_STATE_FAULT) {
2895				mpt2sas_base_fault_info(ioc , doorbell);
2896				return -EFAULT;
2897			}
2898		} else if (int_status == 0xFFFFFFFF)
2899			goto out;
2900
2901		if (sleep_flag == CAN_SLEEP)
2902			msleep(1);
2903		else
2904			udelay(500);
2905		count++;
2906	} while (--cntdn);
2907
2908 out:
2909	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2910	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2911	return -EFAULT;
2912}
2913
2914/**
2915 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
2916 * @ioc: per adapter object
2917 * @timeout: timeout in second
2918 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2919 *
2920 * Returns 0 for success, non-zero for failure.
2921 *
2922 */
2923static int
2924_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2925    int sleep_flag)
2926{
2927	u32 cntdn, count;
2928	u32 doorbell_reg;
2929
2930	count = 0;
2931	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2932	do {
2933		doorbell_reg = readl(&ioc->chip->Doorbell);
2934		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2935			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2936			    "successful count(%d), timeout(%d)\n", ioc->name,
2937			    __func__, count, timeout));
2938			return 0;
2939		}
2940		if (sleep_flag == CAN_SLEEP)
2941			msleep(1);
2942		else
2943			udelay(500);
2944		count++;
2945	} while (--cntdn);
2946
2947	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2948	    "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
2949	return -EFAULT;
2950}
2951
2952/**
2953 * _base_send_ioc_reset - send doorbell reset
2954 * @ioc: per adapter object
2955 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
2956 * @timeout: timeout in second
2957 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2958 *
2959 * Returns 0 for success, non-zero for failure.
2960 */
2961static int
2962_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
2963    int sleep_flag)
2964{
2965	u32 ioc_state;
2966	int r = 0;
2967
2968	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
2969		printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
2970		    ioc->name, __func__);
2971		return -EFAULT;
2972	}
2973
2974	if (!(ioc->facts.IOCCapabilities &
2975	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
2976		return -EFAULT;
2977
2978	printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
2979
2980	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
2981	    &ioc->chip->Doorbell);
2982	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
2983		r = -EFAULT;
2984		goto out;
2985	}
2986	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
2987	    timeout, sleep_flag);
2988	if (ioc_state) {
2989		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
2990		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
2991		r = -EFAULT;
2992		goto out;
2993	}
2994 out:
2995	printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
2996	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
2997	return r;
2998}
2999
3000/**
3001 * _base_handshake_req_reply_wait - send request thru doorbell interface
3002 * @ioc: per adapter object
3003 * @request_bytes: request length
3004 * @request: pointer having request payload
3005 * @reply_bytes: reply length
3006 * @reply: pointer to reply payload
3007 * @timeout: timeout in second
3008 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3009 *
3010 * Returns 0 for success, non-zero for failure.
3011 */
3012static int
3013_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
3014    u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3015{
3016	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3017	int i;
3018	u8 failed;
3019	u16 dummy;
3020	__le32 *mfp;
3021
3022	/* make sure doorbell is not in use */
3023	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3024		printk(MPT2SAS_ERR_FMT "doorbell is in use "
3025		    " (line=%d)\n", ioc->name, __LINE__);
3026		return -EFAULT;
3027	}
3028
3029	/* clear pending doorbell interrupts from previous state changes */
3030	if (readl(&ioc->chip->HostInterruptStatus) &
3031	    MPI2_HIS_IOC2SYS_DB_STATUS)
3032		writel(0, &ioc->chip->HostInterruptStatus);
3033
3034	/* send message to ioc */
3035	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3036	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3037	    &ioc->chip->Doorbell);
3038
3039	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3040		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3041		   "int failed (line=%d)\n", ioc->name, __LINE__);
3042		return -EFAULT;
3043	}
3044	writel(0, &ioc->chip->HostInterruptStatus);
3045
3046	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3047		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3048		    "ack failed (line=%d)\n", ioc->name, __LINE__);
3049		return -EFAULT;
3050	}
3051
3052	/* send message 32-bits at a time */
3053	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3054		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3055		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3056			failed = 1;
3057	}
3058
3059	if (failed) {
3060		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3061		    "sending request failed (line=%d)\n", ioc->name, __LINE__);
3062		return -EFAULT;
3063	}
3064
3065	/* now wait for the reply */
3066	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3067		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3068		   "int failed (line=%d)\n", ioc->name, __LINE__);
3069		return -EFAULT;
3070	}
3071
3072	/* read the first two 16-bits, it gives the total length of the reply */
3073	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3074	    & MPI2_DOORBELL_DATA_MASK);
3075	writel(0, &ioc->chip->HostInterruptStatus);
3076	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3077		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3078		   "int failed (line=%d)\n", ioc->name, __LINE__);
3079		return -EFAULT;
3080	}
3081	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3082	    & MPI2_DOORBELL_DATA_MASK);
3083	writel(0, &ioc->chip->HostInterruptStatus);
3084
3085	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3086		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3087			printk(MPT2SAS_ERR_FMT "doorbell "
3088			    "handshake int failed (line=%d)\n", ioc->name,
3089			    __LINE__);
3090			return -EFAULT;
3091		}
3092		if (i >=  reply_bytes/2) /* overflow case */
3093			dummy = readl(&ioc->chip->Doorbell);
3094		else
3095			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3096			    & MPI2_DOORBELL_DATA_MASK);
3097		writel(0, &ioc->chip->HostInterruptStatus);
3098	}
3099
3100	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3101	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3102		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
3103		    " (line=%d)\n", ioc->name, __LINE__));
3104	}
3105	writel(0, &ioc->chip->HostInterruptStatus);
3106
3107	if (ioc->logging_level & MPT_DEBUG_INIT) {
3108		mfp = (__le32 *)reply;
3109		printk(KERN_INFO "\toffset:data\n");
3110		for (i = 0; i < reply_bytes/4; i++)
3111			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3112			    le32_to_cpu(mfp[i]));
3113	}
3114	return 0;
3115}
3116
3117/**
3118 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
3119 * @ioc: per adapter object
3120 * @mpi_reply: the reply payload from FW
3121 * @mpi_request: the request payload sent to FW
3122 *
3123 * The SAS IO Unit Control Request message allows the host to perform low-level
3124 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3125 * to obtain the IOC assigned device handles for a device if it has other
3126 * identifying information about the device, in addition allows the host to
3127 * remove IOC resources associated with the device.
3128 *
3129 * Returns 0 for success, non-zero for failure.
3130 */
3131int
3132mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
3133    Mpi2SasIoUnitControlReply_t *mpi_reply,
3134    Mpi2SasIoUnitControlRequest_t *mpi_request)
3135{
3136	u16 smid;
3137	u32 ioc_state;
3138	unsigned long timeleft;
3139	u8 issue_reset;
3140	int rc;
3141	void *request;
3142	u16 wait_state_count;
3143
3144	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3145	    __func__));
3146
3147	mutex_lock(&ioc->base_cmds.mutex);
3148
3149	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3150		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3151		    ioc->name, __func__);
3152		rc = -EAGAIN;
3153		goto out;
3154	}
3155
3156	wait_state_count = 0;
3157	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3158	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3159		if (wait_state_count++ == 10) {
3160			printk(MPT2SAS_ERR_FMT
3161			    "%s: failed due to ioc not operational\n",
3162			    ioc->name, __func__);
3163			rc = -EFAULT;
3164			goto out;
3165		}
3166		ssleep(1);
3167		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3168		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3169		    "operational state(count=%d)\n", ioc->name,
3170		    __func__, wait_state_count);
3171	}
3172
3173	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3174	if (!smid) {
3175		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3176		    ioc->name, __func__);
3177		rc = -EAGAIN;
3178		goto out;
3179	}
3180
3181	rc = 0;
3182	ioc->base_cmds.status = MPT2_CMD_PENDING;
3183	request = mpt2sas_base_get_msg_frame(ioc, smid);
3184	ioc->base_cmds.smid = smid;
3185	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3186	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3187	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3188		ioc->ioc_link_reset_in_progress = 1;
3189	init_completion(&ioc->base_cmds.done);
3190	mpt2sas_base_put_smid_default(ioc, smid);
3191	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3192	    msecs_to_jiffies(10000));
3193	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3194	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3195	    ioc->ioc_link_reset_in_progress)
3196		ioc->ioc_link_reset_in_progress = 0;
3197	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3198		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3199		    ioc->name, __func__);
3200		_debug_dump_mf(mpi_request,
3201		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3202		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3203			issue_reset = 1;
3204		goto issue_host_reset;
3205	}
3206	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3207		memcpy(mpi_reply, ioc->base_cmds.reply,
3208		    sizeof(Mpi2SasIoUnitControlReply_t));
3209	else
3210		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3211	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3212	goto out;
3213
3214 issue_host_reset:
3215	if (issue_reset)
3216		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3217		    FORCE_BIG_HAMMER);
3218	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3219	rc = -EFAULT;
3220 out:
3221	mutex_unlock(&ioc->base_cmds.mutex);
3222	return rc;
3223}
3224
3225
3226/**
3227 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
3228 * @ioc: per adapter object
3229 * @mpi_reply: the reply payload from FW
3230 * @mpi_request: the request payload sent to FW
3231 *
3232 * The SCSI Enclosure Processor request message causes the IOC to
3233 * communicate with SES devices to control LED status signals.
3234 *
3235 * Returns 0 for success, non-zero for failure.
3236 */
3237int
3238mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
3239    Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3240{
3241	u16 smid;
3242	u32 ioc_state;
3243	unsigned long timeleft;
3244	u8 issue_reset;
3245	int rc;
3246	void *request;
3247	u16 wait_state_count;
3248
3249	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3250	    __func__));
3251
3252	mutex_lock(&ioc->base_cmds.mutex);
3253
3254	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3255		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3256		    ioc->name, __func__);
3257		rc = -EAGAIN;
3258		goto out;
3259	}
3260
3261	wait_state_count = 0;
3262	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3263	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3264		if (wait_state_count++ == 10) {
3265			printk(MPT2SAS_ERR_FMT
3266			    "%s: failed due to ioc not operational\n",
3267			    ioc->name, __func__);
3268			rc = -EFAULT;
3269			goto out;
3270		}
3271		ssleep(1);
3272		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3273		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3274		    "operational state(count=%d)\n", ioc->name,
3275		    __func__, wait_state_count);
3276	}
3277
3278	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3279	if (!smid) {
3280		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3281		    ioc->name, __func__);
3282		rc = -EAGAIN;
3283		goto out;
3284	}
3285
3286	rc = 0;
3287	ioc->base_cmds.status = MPT2_CMD_PENDING;
3288	request = mpt2sas_base_get_msg_frame(ioc, smid);
3289	ioc->base_cmds.smid = smid;
3290	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3291	init_completion(&ioc->base_cmds.done);
3292	mpt2sas_base_put_smid_default(ioc, smid);
3293	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3294	    msecs_to_jiffies(10000));
3295	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3296		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3297		    ioc->name, __func__);
3298		_debug_dump_mf(mpi_request,
3299		    sizeof(Mpi2SepRequest_t)/4);
3300		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3301			issue_reset = 1;
3302		goto issue_host_reset;
3303	}
3304	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3305		memcpy(mpi_reply, ioc->base_cmds.reply,
3306		    sizeof(Mpi2SepReply_t));
3307	else
3308		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3309	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3310	goto out;
3311
3312 issue_host_reset:
3313	if (issue_reset)
3314		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3315		    FORCE_BIG_HAMMER);
3316	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3317	rc = -EFAULT;
3318 out:
3319	mutex_unlock(&ioc->base_cmds.mutex);
3320	return rc;
3321}
3322
3323/**
3324 * _base_get_port_facts - obtain port facts reply and save in ioc
3325 * @ioc: per adapter object
3326 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3327 *
3328 * Returns 0 for success, non-zero for failure.
3329 */
3330static int
3331_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3332{
3333	Mpi2PortFactsRequest_t mpi_request;
3334	Mpi2PortFactsReply_t mpi_reply;
3335	struct mpt2sas_port_facts *pfacts;
3336	int mpi_reply_sz, mpi_request_sz, r;
3337
3338	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3339	    __func__));
3340
3341	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3342	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3343	memset(&mpi_request, 0, mpi_request_sz);
3344	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3345	mpi_request.PortNumber = port;
3346	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3347	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3348
3349	if (r != 0) {
3350		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3351		    ioc->name, __func__, r);
3352		return r;
3353	}
3354
3355	pfacts = &ioc->pfacts[port];
3356	memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
3357	pfacts->PortNumber = mpi_reply.PortNumber;
3358	pfacts->VP_ID = mpi_reply.VP_ID;
3359	pfacts->VF_ID = mpi_reply.VF_ID;
3360	pfacts->MaxPostedCmdBuffers =
3361	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3362
3363	return 0;
3364}
3365
3366/**
3367 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3368 * @ioc: per adapter object
3369 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3370 *
3371 * Returns 0 for success, non-zero for failure.
3372 */
3373static int
3374_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3375{
3376	Mpi2IOCFactsRequest_t mpi_request;
3377	Mpi2IOCFactsReply_t mpi_reply;
3378	struct mpt2sas_facts *facts;
3379	int mpi_reply_sz, mpi_request_sz, r;
3380
3381	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3382	    __func__));
3383
3384	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3385	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3386	memset(&mpi_request, 0, mpi_request_sz);
3387	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3388	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3389	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3390
3391	if (r != 0) {
3392		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3393		    ioc->name, __func__, r);
3394		return r;
3395	}
3396
3397	facts = &ioc->facts;
3398	memset(facts, 0, sizeof(struct mpt2sas_facts));
3399	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3400	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3401	facts->VP_ID = mpi_reply.VP_ID;
3402	facts->VF_ID = mpi_reply.VF_ID;
3403	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3404	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3405	facts->WhoInit = mpi_reply.WhoInit;
3406	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3407	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3408	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3409	facts->MaxReplyDescriptorPostQueueDepth =
3410	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3411	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3412	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3413	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3414		ioc->ir_firmware = 1;
3415	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3416	facts->IOCRequestFrameSize =
3417	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3418	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3419	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3420	ioc->shost->max_id = -1;
3421	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3422	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3423	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3424	facts->HighPriorityCredit =
3425	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3426	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3427	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3428
3429	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
3430	    "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
3431	    facts->MaxChainDepth));
3432	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
3433	    "reply frame size(%d)\n", ioc->name,
3434	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3435	return 0;
3436}
3437
3438/**
3439 * _base_send_ioc_init - send ioc_init to firmware
3440 * @ioc: per adapter object
3441 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3442 *
3443 * Returns 0 for success, non-zero for failure.
3444 */
3445static int
3446_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3447{
3448	Mpi2IOCInitRequest_t mpi_request;
3449	Mpi2IOCInitReply_t mpi_reply;
3450	int r;
3451	struct timeval current_time;
3452	u16 ioc_status;
3453
3454	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3455	    __func__));
3456
3457	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3458	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3459	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3460	mpi_request.VF_ID = 0; /* TODO */
3461	mpi_request.VP_ID = 0;
3462	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3463	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3464
3465	if (_base_is_controller_msix_enabled(ioc))
3466		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3467	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3468	mpi_request.ReplyDescriptorPostQueueDepth =
3469	    cpu_to_le16(ioc->reply_post_queue_depth);
3470	mpi_request.ReplyFreeQueueDepth =
3471	    cpu_to_le16(ioc->reply_free_queue_depth);
3472
3473	mpi_request.SenseBufferAddressHigh =
3474	    cpu_to_le32((u64)ioc->sense_dma >> 32);
3475	mpi_request.SystemReplyAddressHigh =
3476	    cpu_to_le32((u64)ioc->reply_dma >> 32);
3477	mpi_request.SystemRequestFrameBaseAddress =
3478	    cpu_to_le64((u64)ioc->request_dma);
3479	mpi_request.ReplyFreeQueueAddress =
3480	    cpu_to_le64((u64)ioc->reply_free_dma);
3481	mpi_request.ReplyDescriptorPostQueueAddress =
3482	    cpu_to_le64((u64)ioc->reply_post_free_dma);
3483
3484
3485	/* This time stamp specifies number of milliseconds
3486	 * since epoch ~ midnight January 1, 1970.
3487	 */
3488	do_gettimeofday(&current_time);
3489	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3490	    (current_time.tv_usec / 1000));
3491
3492	if (ioc->logging_level & MPT_DEBUG_INIT) {
3493		__le32 *mfp;
3494		int i;
3495
3496		mfp = (__le32 *)&mpi_request;
3497		printk(KERN_INFO "\toffset:data\n");
3498		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3499			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3500			    le32_to_cpu(mfp[i]));
3501	}
3502
3503	r = _base_handshake_req_reply_wait(ioc,
3504	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3505	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3506	    sleep_flag);
3507
3508	if (r != 0) {
3509		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3510		    ioc->name, __func__, r);
3511		return r;
3512	}
3513
3514	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3515	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3516	    mpi_reply.IOCLogInfo) {
3517		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
3518		r = -EIO;
3519	}
3520
3521	return 0;
3522}
3523
3524/**
3525 * mpt2sas_port_enable_done - command completion routine for port enable
3526 * @ioc: per adapter object
3527 * @smid: system request message index
3528 * @msix_index: MSIX table index supplied by the OS
3529 * @reply: reply message frame(lower 32bit addr)
3530 *
3531 * Return 1 meaning mf should be freed from _base_interrupt
3532 *        0 means the mf is freed from this function.
3533 */
3534u8
3535mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3536	u32 reply)
3537{
3538	MPI2DefaultReply_t *mpi_reply;
3539	u16 ioc_status;
3540
3541	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3542	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
3543		return 1;
3544
3545	if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
3546		return 1;
3547
3548	ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
3549	if (mpi_reply) {
3550		ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
3551		memcpy(ioc->port_enable_cmds.reply, mpi_reply,
3552		    mpi_reply->MsgLength*4);
3553	}
3554	ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
3555
3556	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3557
3558	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3559		ioc->port_enable_failed = 1;
3560
3561	if (ioc->is_driver_loading) {
3562		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3563			mpt2sas_port_enable_complete(ioc);
3564			return 1;
3565		} else {
3566			ioc->start_scan_failed = ioc_status;
3567			ioc->start_scan = 0;
3568			return 1;
3569		}
3570	}
3571	complete(&ioc->port_enable_cmds.done);
3572	return 1;
3573}
3574
3575
3576/**
3577 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3578 * @ioc: per adapter object
3579 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3580 *
3581 * Returns 0 for success, non-zero for failure.
3582 */
3583static int
3584_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3585{
3586	Mpi2PortEnableRequest_t *mpi_request;
3587	Mpi2PortEnableReply_t *mpi_reply;
3588	unsigned long timeleft;
3589	int r = 0;
3590	u16 smid;
3591	u16 ioc_status;
3592
3593	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3594
3595	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3596		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3597		    ioc->name, __func__);
3598		return -EAGAIN;
3599	}
3600
3601	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3602	if (!smid) {
3603		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3604		    ioc->name, __func__);
3605		return -EAGAIN;
3606	}
3607
3608	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3609	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3610	ioc->port_enable_cmds.smid = smid;
3611	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3612	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3613
3614	init_completion(&ioc->port_enable_cmds.done);
3615	mpt2sas_base_put_smid_default(ioc, smid);
3616	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3617	    300*HZ);
3618	if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
3619		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3620		    ioc->name, __func__);
3621		_debug_dump_mf(mpi_request,
3622		    sizeof(Mpi2PortEnableRequest_t)/4);
3623		if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
3624			r = -EFAULT;
3625		else
3626			r = -ETIME;
3627		goto out;
3628	}
3629	mpi_reply = ioc->port_enable_cmds.reply;
3630
3631	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3632	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3633		printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
3634		    ioc->name, __func__, ioc_status);
3635		r = -EFAULT;
3636		goto out;
3637	}
3638 out:
3639	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
3640	printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3641	    "SUCCESS" : "FAILED"));
3642	return r;
3643}
3644
3645/**
3646 * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
3647 * @ioc: per adapter object
3648 *
3649 * Returns 0 for success, non-zero for failure.
3650 */
3651int
3652mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
3653{
3654	Mpi2PortEnableRequest_t *mpi_request;
3655	u16 smid;
3656
3657	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3658
3659	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3660		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3661		    ioc->name, __func__);
3662		return -EAGAIN;
3663	}
3664
3665	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3666	if (!smid) {
3667		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3668		    ioc->name, __func__);
3669		return -EAGAIN;
3670	}
3671
3672	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3673	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3674	ioc->port_enable_cmds.smid = smid;
3675	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3676	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3677
3678	mpt2sas_base_put_smid_default(ioc, smid);
3679	return 0;
3680}
3681
3682/**
3683 * _base_determine_wait_on_discovery - desposition
3684 * @ioc: per adapter object
3685 *
3686 * Decide whether to wait on discovery to complete. Used to either
3687 * locate boot device, or report volumes ahead of physical devices.
3688 *
3689 * Returns 1 for wait, 0 for don't wait
3690 */
3691static int
3692_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
3693{
3694	/* We wait for discovery to complete if IR firmware is loaded.
3695	 * The sas topology events arrive before PD events, so we need time to
3696	 * turn on the bit in ioc->pd_handles to indicate PD
3697	 * Also, it maybe required to report Volumes ahead of physical
3698	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3699	 */
3700	if (ioc->ir_firmware)
3701		return 1;
3702
3703	/* if no Bios, then we don't need to wait */
3704	if (!ioc->bios_pg3.BiosVersion)
3705		return 0;
3706
3707	/* Bios is present, then we drop down here.
3708	 *
3709	 * If there any entries in the Bios Page 2, then we wait
3710	 * for discovery to complete.
3711	 */
3712
3713	/* Current Boot Device */
3714	if ((ioc->bios_pg2.CurrentBootDeviceForm &
3715	    MPI2_BIOSPAGE2_FORM_MASK) ==
3716	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3717	/* Request Boot Device */
3718	   (ioc->bios_pg2.ReqBootDeviceForm &
3719	    MPI2_BIOSPAGE2_FORM_MASK) ==
3720	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3721	/* Alternate Request Boot Device */
3722	   (ioc->bios_pg2.ReqAltBootDeviceForm &
3723	    MPI2_BIOSPAGE2_FORM_MASK) ==
3724	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3725		return 0;
3726
3727	return 1;
3728}
3729
3730
3731/**
3732 * _base_unmask_events - turn on notification for this event
3733 * @ioc: per adapter object
3734 * @event: firmware event
3735 *
3736 * The mask is stored in ioc->event_masks.
3737 */
3738static void
3739_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
3740{
3741	u32 desired_event;
3742
3743	if (event >= 128)
3744		return;
3745
3746	desired_event = (1 << (event % 32));
3747
3748	if (event < 32)
3749		ioc->event_masks[0] &= ~desired_event;
3750	else if (event < 64)
3751		ioc->event_masks[1] &= ~desired_event;
3752	else if (event < 96)
3753		ioc->event_masks[2] &= ~desired_event;
3754	else if (event < 128)
3755		ioc->event_masks[3] &= ~desired_event;
3756}
3757
3758/**
3759 * _base_event_notification - send event notification
3760 * @ioc: per adapter object
3761 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3762 *
3763 * Returns 0 for success, non-zero for failure.
3764 */
3765static int
3766_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3767{
3768	Mpi2EventNotificationRequest_t *mpi_request;
3769	unsigned long timeleft;
3770	u16 smid;
3771	int r = 0;
3772	int i;
3773
3774	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3775	    __func__));
3776
3777	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3778		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3779		    ioc->name, __func__);
3780		return -EAGAIN;
3781	}
3782
3783	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3784	if (!smid) {
3785		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3786		    ioc->name, __func__);
3787		return -EAGAIN;
3788	}
3789	ioc->base_cmds.status = MPT2_CMD_PENDING;
3790	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3791	ioc->base_cmds.smid = smid;
3792	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3793	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3794	mpi_request->VF_ID = 0; /* TODO */
3795	mpi_request->VP_ID = 0;
3796	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3797		mpi_request->EventMasks[i] =
3798		    cpu_to_le32(ioc->event_masks[i]);
3799	init_completion(&ioc->base_cmds.done);
3800	mpt2sas_base_put_smid_default(ioc, smid);
3801	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3802	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3803		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3804		    ioc->name, __func__);
3805		_debug_dump_mf(mpi_request,
3806		    sizeof(Mpi2EventNotificationRequest_t)/4);
3807		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3808			r = -EFAULT;
3809		else
3810			r = -ETIME;
3811	} else
3812		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3813		    ioc->name, __func__));
3814	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3815	return r;
3816}
3817
3818/**
3819 * mpt2sas_base_validate_event_type - validating event types
3820 * @ioc: per adapter object
3821 * @event: firmware event
3822 *
3823 * This will turn on firmware event notification when application
3824 * ask for that event. We don't mask events that are already enabled.
3825 */
3826void
3827mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
3828{
3829	int i, j;
3830	u32 event_mask, desired_event;
3831	u8 send_update_to_fw;
3832
3833	for (i = 0, send_update_to_fw = 0; i <
3834	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
3835		event_mask = ~event_type[i];
3836		desired_event = 1;
3837		for (j = 0; j < 32; j++) {
3838			if (!(event_mask & desired_event) &&
3839			    (ioc->event_masks[i] & desired_event)) {
3840				ioc->event_masks[i] &= ~desired_event;
3841				send_update_to_fw = 1;
3842			}
3843			desired_event = (desired_event << 1);
3844		}
3845	}
3846
3847	if (!send_update_to_fw)
3848		return;
3849
3850	mutex_lock(&ioc->base_cmds.mutex);
3851	_base_event_notification(ioc, CAN_SLEEP);
3852	mutex_unlock(&ioc->base_cmds.mutex);
3853}
3854
3855/**
3856 * _base_diag_reset - the "big hammer" start of day reset
3857 * @ioc: per adapter object
3858 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3859 *
3860 * Returns 0 for success, non-zero for failure.
3861 */
3862static int
3863_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3864{
3865	u32 host_diagnostic;
3866	u32 ioc_state;
3867	u32 count;
3868	u32 hcb_size;
3869
3870	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3871	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3872	    ioc->name));
3873
3874	count = 0;
3875	do {
3876		/* Write magic sequence to WriteSequence register
3877		 * Loop until in diagnostic mode
3878		 */
3879		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
3880		    "sequence\n", ioc->name));
3881		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3882		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
3883		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
3884		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
3885		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
3886		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
3887		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
3888
3889		/* wait 100 msec */
3890		if (sleep_flag == CAN_SLEEP)
3891			msleep(100);
3892		else
3893			mdelay(100);
3894
3895		if (count++ > 20)
3896			goto out;
3897
3898		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3899		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
3900		    "sequence: count(%d), host_diagnostic(0x%08x)\n",
3901		    ioc->name, count, host_diagnostic));
3902
3903	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
3904
3905	hcb_size = readl(&ioc->chip->HCBSize);
3906
3907	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
3908	    ioc->name));
3909	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
3910	     &ioc->chip->HostDiagnostic);
3911
3912	/* don't access any registers for 50 milliseconds */
3913	msleep(50);
3914
3915	/* 300 second max wait */
3916	for (count = 0; count < 3000000 ; count++) {
3917
3918		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3919
3920		if (host_diagnostic == 0xFFFFFFFF)
3921			goto out;
3922		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
3923			break;
3924
3925		/* wait 100 msec */
3926		if (sleep_flag == CAN_SLEEP)
3927			msleep(1);
3928		else
3929			mdelay(1);
3930	}
3931
3932	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
3933
3934		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
3935		    "assuming the HCB Address points to good F/W\n",
3936		    ioc->name));
3937		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
3938		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
3939		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
3940
3941		drsprintk(ioc, printk(MPT2SAS_INFO_FMT
3942		    "re-enable the HCDW\n", ioc->name));
3943		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
3944		    &ioc->chip->HCBSize);
3945	}
3946
3947	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
3948	    ioc->name));
3949	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
3950	    &ioc->chip->HostDiagnostic);
3951
3952	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
3953	    "diagnostic register\n", ioc->name));
3954	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3955
3956	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
3957	    "READY state\n", ioc->name));
3958	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
3959	    sleep_flag);
3960	if (ioc_state) {
3961		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3962		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3963		goto out;
3964	}
3965
3966	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
3967	return 0;
3968
3969 out:
3970	printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
3971	return -EFAULT;
3972}
3973
3974/**
3975 * _base_make_ioc_ready - put controller in READY state
3976 * @ioc: per adapter object
3977 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3978 * @type: FORCE_BIG_HAMMER or SOFT_RESET
3979 *
3980 * Returns 0 for success, non-zero for failure.
3981 */
3982static int
3983_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3984    enum reset_type type)
3985{
3986	u32 ioc_state;
3987	int rc;
3988
3989	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3990	    __func__));
3991
3992	if (ioc->pci_error_recovery)
3993		return 0;
3994
3995	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3996	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
3997	    ioc->name, __func__, ioc_state));
3998
3999	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4000		return 0;
4001
4002	if (ioc_state & MPI2_DOORBELL_USED) {
4003		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
4004		    "active!\n", ioc->name));
4005		goto issue_diag_reset;
4006	}
4007
4008	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4009		mpt2sas_base_fault_info(ioc, ioc_state &
4010		    MPI2_DOORBELL_DATA_MASK);
4011		goto issue_diag_reset;
4012	}
4013
4014	if (type == FORCE_BIG_HAMMER)
4015		goto issue_diag_reset;
4016
4017	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4018		if (!(_base_send_ioc_reset(ioc,
4019		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4020			ioc->ioc_reset_count++;
4021			return 0;
4022	}
4023
4024 issue_diag_reset:
4025	rc = _base_diag_reset(ioc, CAN_SLEEP);
4026	ioc->ioc_reset_count++;
4027	return rc;
4028}
4029
4030/**
4031 * _base_make_ioc_operational - put controller in OPERATIONAL state
4032 * @ioc: per adapter object
4033 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4034 *
4035 * Returns 0 for success, non-zero for failure.
4036 */
4037static int
4038_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4039{
4040	int r, i;
4041	unsigned long	flags;
4042	u32 reply_address;
4043	u16 smid;
4044	struct _tr_list *delayed_tr, *delayed_tr_next;
4045	u8 hide_flag;
4046	struct adapter_reply_queue *reply_q;
4047	long reply_post_free;
4048	u32 reply_post_free_sz;
4049
4050	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4051	    __func__));
4052
4053	/* clean the delayed target reset list */
4054	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4055	    &ioc->delayed_tr_list, list) {
4056		list_del(&delayed_tr->list);
4057		kfree(delayed_tr);
4058	}
4059
4060	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4061	    &ioc->delayed_tr_volume_list, list) {
4062		list_del(&delayed_tr->list);
4063		kfree(delayed_tr);
4064	}
4065
4066	/* initialize the scsi lookup free list */
4067	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4068	INIT_LIST_HEAD(&ioc->free_list);
4069	smid = 1;
4070	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4071		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4072		ioc->scsi_lookup[i].cb_idx = 0xFF;
4073		ioc->scsi_lookup[i].smid = smid;
4074		ioc->scsi_lookup[i].scmd = NULL;
4075		ioc->scsi_lookup[i].direct_io = 0;
4076		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4077		    &ioc->free_list);
4078	}
4079
4080	/* hi-priority queue */
4081	INIT_LIST_HEAD(&ioc->hpr_free_list);
4082	smid = ioc->hi_priority_smid;
4083	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4084		ioc->hpr_lookup[i].cb_idx = 0xFF;
4085		ioc->hpr_lookup[i].smid = smid;
4086		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4087		    &ioc->hpr_free_list);
4088	}
4089
4090	/* internal queue */
4091	INIT_LIST_HEAD(&ioc->internal_free_list);
4092	smid = ioc->internal_smid;
4093	for (i = 0; i < ioc->internal_depth; i++, smid++) {
4094		ioc->internal_lookup[i].cb_idx = 0xFF;
4095		ioc->internal_lookup[i].smid = smid;
4096		list_add_tail(&ioc->internal_lookup[i].tracker_list,
4097		    &ioc->internal_free_list);
4098	}
4099
4100	/* chain pool */
4101	INIT_LIST_HEAD(&ioc->free_chain_list);
4102	for (i = 0; i < ioc->chain_depth; i++)
4103		list_add_tail(&ioc->chain_lookup[i].tracker_list,
4104		    &ioc->free_chain_list);
4105
4106	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4107
4108	/* initialize Reply Free Queue */
4109	for (i = 0, reply_address = (u32)ioc->reply_dma ;
4110	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
4111	    ioc->reply_sz)
4112		ioc->reply_free[i] = cpu_to_le32(reply_address);
4113
4114	/* initialize reply queues */
4115	if (ioc->is_driver_loading)
4116		_base_assign_reply_queues(ioc);
4117
4118	/* initialize Reply Post Free Queue */
4119	reply_post_free = (long)ioc->reply_post_free;
4120	reply_post_free_sz = ioc->reply_post_queue_depth *
4121	    sizeof(Mpi2DefaultReplyDescriptor_t);
4122	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4123		reply_q->reply_post_host_index = 0;
4124		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4125		    reply_post_free;
4126		for (i = 0; i < ioc->reply_post_queue_depth; i++)
4127			reply_q->reply_post_free[i].Words =
4128							cpu_to_le64(ULLONG_MAX);
4129		if (!_base_is_controller_msix_enabled(ioc))
4130			goto skip_init_reply_post_free_queue;
4131		reply_post_free += reply_post_free_sz;
4132	}
4133 skip_init_reply_post_free_queue:
4134
4135	r = _base_send_ioc_init(ioc, sleep_flag);
4136	if (r)
4137		return r;
4138
4139	/* initialize reply free host index */
4140	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4141	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4142
4143	/* initialize reply post host index */
4144	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4145		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4146		    &ioc->chip->ReplyPostHostIndex);
4147		if (!_base_is_controller_msix_enabled(ioc))
4148			goto skip_init_reply_post_host_index;
4149	}
4150
4151 skip_init_reply_post_host_index:
4152
4153	_base_unmask_interrupts(ioc);
4154
4155	r = _base_event_notification(ioc, sleep_flag);
4156	if (r)
4157		return r;
4158
4159	if (sleep_flag == CAN_SLEEP)
4160		_base_static_config_pages(ioc);
4161
4162
4163	if (ioc->is_driver_loading) {
4164		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
4165		    == 0x80) {
4166			hide_flag = (u8) (
4167			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
4168			    MFG_PAGE10_HIDE_SSDS_MASK);
4169			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4170				ioc->mfg_pg10_hide_flag = hide_flag;
4171		}
4172		ioc->wait_for_discovery_to_complete =
4173		    _base_determine_wait_on_discovery(ioc);
4174		return r; /* scan_start and scan_finished support */
4175	}
4176	r = _base_send_port_enable(ioc, sleep_flag);
4177	if (r)
4178		return r;
4179
4180	return r;
4181}
4182
4183/**
4184 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
4185 * @ioc: per adapter object
4186 *
4187 * Return nothing.
4188 */
4189void
4190mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4191{
4192	struct pci_dev *pdev = ioc->pdev;
4193
4194	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4195	    __func__));
4196
4197	_base_mask_interrupts(ioc);
4198	ioc->shost_recovery = 1;
4199	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4200	ioc->shost_recovery = 0;
4201	_base_free_irq(ioc);
4202	_base_disable_msix(ioc);
4203	if (ioc->chip_phys)
4204		iounmap(ioc->chip);
4205	ioc->chip_phys = 0;
4206	pci_release_selected_regions(ioc->pdev, ioc->bars);
4207	pci_disable_pcie_error_reporting(pdev);
4208	pci_disable_device(pdev);
4209	return;
4210}
4211
4212/**
4213 * mpt2sas_base_attach - attach controller instance
4214 * @ioc: per adapter object
4215 *
4216 * Returns 0 for success, non-zero for failure.
4217 */
4218int
4219mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4220{
4221	int r, i;
4222	int cpu_id, last_cpu_id = 0;
4223
4224	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4225	    __func__));
4226
4227	/* setup cpu_msix_table */
4228	ioc->cpu_count = num_online_cpus();
4229	for_each_online_cpu(cpu_id)
4230		last_cpu_id = cpu_id;
4231	ioc->cpu_msix_table_sz = last_cpu_id + 1;
4232	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4233	ioc->reply_queue_count = 1;
4234	if (!ioc->cpu_msix_table) {
4235		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
4236		    "cpu_msix_table failed!!!\n", ioc->name));
4237		r = -ENOMEM;
4238		goto out_free_resources;
4239	}
4240
4241	if (ioc->is_warpdrive) {
4242		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
4243		    sizeof(resource_size_t *), GFP_KERNEL);
4244		if (!ioc->reply_post_host_index) {
4245			dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
4246				"for cpu_msix_table failed!!!\n", ioc->name));
4247			r = -ENOMEM;
4248			goto out_free_resources;
4249		}
4250	}
4251
4252	r = mpt2sas_base_map_resources(ioc);
4253	if (r)
4254		goto out_free_resources;
4255
4256	if (ioc->is_warpdrive) {
4257		ioc->reply_post_host_index[0] =
4258		    (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
4259
4260		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
4261			ioc->reply_post_host_index[i] = (resource_size_t *)
4262			((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
4263			* 4)));
4264	}
4265
4266	pci_set_drvdata(ioc->pdev, ioc->shost);
4267	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4268	if (r)
4269		goto out_free_resources;
4270
4271	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4272	if (r)
4273		goto out_free_resources;
4274
4275	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4276	    sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
4277	if (!ioc->pfacts) {
4278		r = -ENOMEM;
4279		goto out_free_resources;
4280	}
4281
4282	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4283		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4284		if (r)
4285			goto out_free_resources;
4286	}
4287
4288	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4289	if (r)
4290		goto out_free_resources;
4291
4292	init_waitqueue_head(&ioc->reset_wq);
4293	/* allocate memory pd handle bitmask list */
4294	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4295	if (ioc->facts.MaxDevHandle % 8)
4296		ioc->pd_handles_sz++;
4297	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4298	    GFP_KERNEL);
4299	if (!ioc->pd_handles) {
4300		r = -ENOMEM;
4301		goto out_free_resources;
4302	}
4303	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4304	    GFP_KERNEL);
4305	if (!ioc->blocking_handles) {
4306		r = -ENOMEM;
4307		goto out_free_resources;
4308	}
4309	ioc->fwfault_debug = mpt2sas_fwfault_debug;
4310
4311	/* base internal command bits */
4312	mutex_init(&ioc->base_cmds.mutex);
4313	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4314	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
4315
4316	/* port_enable command bits */
4317	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4318	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
4319
4320	/* transport internal command bits */
4321	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4322	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
4323	mutex_init(&ioc->transport_cmds.mutex);
4324
4325	/* scsih internal command bits */
4326	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4327	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
4328	mutex_init(&ioc->scsih_cmds.mutex);
4329
4330	/* task management internal command bits */
4331	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4332	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4333	mutex_init(&ioc->tm_cmds.mutex);
4334
4335	/* config page internal command bits */
4336	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4337	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
4338	mutex_init(&ioc->config_cmds.mutex);
4339
4340	/* ctl module internal command bits */
4341	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4342	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4343	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
4344	mutex_init(&ioc->ctl_cmds.mutex);
4345
4346	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4347	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4348	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4349	    !ioc->ctl_cmds.sense) {
4350		r = -ENOMEM;
4351		goto out_free_resources;
4352	}
4353
4354	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4355	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4356	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
4357		r = -ENOMEM;
4358		goto out_free_resources;
4359	}
4360
4361	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4362		ioc->event_masks[i] = -1;
4363
4364	/* here we enable the events we care about */
4365	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4366	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4367	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4368	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4369	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4370	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4371	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4372	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4373	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4374	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4375	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4376	if (r)
4377		goto out_free_resources;
4378
4379	if (missing_delay[0] != -1 && missing_delay[1] != -1)
4380		_base_update_missing_delay(ioc, missing_delay[0],
4381		    missing_delay[1]);
4382
4383	return 0;
4384
4385 out_free_resources:
4386
4387	ioc->remove_host = 1;
4388	mpt2sas_base_free_resources(ioc);
4389	_base_release_memory_pools(ioc);
4390	pci_set_drvdata(ioc->pdev, NULL);
4391	kfree(ioc->cpu_msix_table);
4392	if (ioc->is_warpdrive)
4393		kfree(ioc->reply_post_host_index);
4394	kfree(ioc->pd_handles);
4395	kfree(ioc->blocking_handles);
4396	kfree(ioc->tm_cmds.reply);
4397	kfree(ioc->transport_cmds.reply);
4398	kfree(ioc->scsih_cmds.reply);
4399	kfree(ioc->config_cmds.reply);
4400	kfree(ioc->base_cmds.reply);
4401	kfree(ioc->port_enable_cmds.reply);
4402	kfree(ioc->ctl_cmds.reply);
4403	kfree(ioc->ctl_cmds.sense);
4404	kfree(ioc->pfacts);
4405	ioc->ctl_cmds.reply = NULL;
4406	ioc->base_cmds.reply = NULL;
4407	ioc->tm_cmds.reply = NULL;
4408	ioc->scsih_cmds.reply = NULL;
4409	ioc->transport_cmds.reply = NULL;
4410	ioc->config_cmds.reply = NULL;
4411	ioc->pfacts = NULL;
4412	return r;
4413}
4414
4415
4416/**
4417 * mpt2sas_base_detach - remove controller instance
4418 * @ioc: per adapter object
4419 *
4420 * Return nothing.
4421 */
4422void
4423mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4424{
4425
4426	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4427	    __func__));
4428
4429	mpt2sas_base_stop_watchdog(ioc);
4430	mpt2sas_base_free_resources(ioc);
4431	_base_release_memory_pools(ioc);
4432	pci_set_drvdata(ioc->pdev, NULL);
4433	kfree(ioc->cpu_msix_table);
4434	if (ioc->is_warpdrive)
4435		kfree(ioc->reply_post_host_index);
4436	kfree(ioc->pd_handles);
4437	kfree(ioc->blocking_handles);
4438	kfree(ioc->pfacts);
4439	kfree(ioc->ctl_cmds.reply);
4440	kfree(ioc->ctl_cmds.sense);
4441	kfree(ioc->base_cmds.reply);
4442	kfree(ioc->port_enable_cmds.reply);
4443	kfree(ioc->tm_cmds.reply);
4444	kfree(ioc->transport_cmds.reply);
4445	kfree(ioc->scsih_cmds.reply);
4446	kfree(ioc->config_cmds.reply);
4447}
4448
4449/**
4450 * _base_reset_handler - reset callback handler (for base)
4451 * @ioc: per adapter object
4452 * @reset_phase: phase
4453 *
4454 * The handler for doing any required cleanup or initialization.
4455 *
4456 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
4457 * MPT2_IOC_DONE_RESET
4458 *
4459 * Return nothing.
4460 */
4461static void
4462_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
4463{
4464	mpt2sas_scsih_reset_handler(ioc, reset_phase);
4465	mpt2sas_ctl_reset_handler(ioc, reset_phase);
4466	switch (reset_phase) {
4467	case MPT2_IOC_PRE_RESET:
4468		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4469		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
4470		break;
4471	case MPT2_IOC_AFTER_RESET:
4472		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4473		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
4474		if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
4475			ioc->transport_cmds.status |= MPT2_CMD_RESET;
4476			mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4477			complete(&ioc->transport_cmds.done);
4478		}
4479		if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
4480			ioc->base_cmds.status |= MPT2_CMD_RESET;
4481			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
4482			complete(&ioc->base_cmds.done);
4483		}
4484		if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
4485			ioc->port_enable_failed = 1;
4486			ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
4487			mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4488			if (ioc->is_driver_loading) {
4489				ioc->start_scan_failed =
4490				    MPI2_IOCSTATUS_INTERNAL_ERROR;
4491				ioc->start_scan = 0;
4492				ioc->port_enable_cmds.status =
4493						MPT2_CMD_NOT_USED;
4494			} else
4495				complete(&ioc->port_enable_cmds.done);
4496
4497		}
4498		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
4499			ioc->config_cmds.status |= MPT2_CMD_RESET;
4500			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
4501			ioc->config_cmds.smid = USHRT_MAX;
4502			complete(&ioc->config_cmds.done);
4503		}
4504		break;
4505	case MPT2_IOC_DONE_RESET:
4506		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4507		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
4508		break;
4509	}
4510}
4511
4512/**
4513 * _wait_for_commands_to_complete - reset controller
4514 * @ioc: Pointer to MPT_ADAPTER structure
4515 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4516 *
4517 * This function waiting(3s) for all pending commands to complete
4518 * prior to putting controller in reset.
4519 */
4520static void
4521_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4522{
4523	u32 ioc_state;
4524	unsigned long flags;
4525	u16 i;
4526
4527	ioc->pending_io_count = 0;
4528	if (sleep_flag != CAN_SLEEP)
4529		return;
4530
4531	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4532	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4533		return;
4534
4535	/* pending command count */
4536	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4537	for (i = 0; i < ioc->scsiio_depth; i++)
4538		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4539			ioc->pending_io_count++;
4540	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4541
4542	if (!ioc->pending_io_count)
4543		return;
4544
4545	/* wait for pending commands to complete */
4546	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4547}
4548
4549/**
4550 * mpt2sas_base_hard_reset_handler - reset controller
4551 * @ioc: Pointer to MPT_ADAPTER structure
4552 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4553 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4554 *
4555 * Returns 0 for success, non-zero for failure.
4556 */
4557int
4558mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4559    enum reset_type type)
4560{
4561	int r;
4562	unsigned long flags;
4563
4564	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4565	    __func__));
4566
4567	if (ioc->pci_error_recovery) {
4568		printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
4569		    ioc->name, __func__);
4570		r = 0;
4571		goto out_unlocked;
4572	}
4573
4574	if (mpt2sas_fwfault_debug)
4575		mpt2sas_halt_firmware(ioc);
4576
4577	/* TODO - What we really should be doing is pulling
4578	 * out all the code associated with NO_SLEEP; its never used.
4579	 * That is legacy code from mpt fusion driver, ported over.
4580	 * I will leave this BUG_ON here for now till its been resolved.
4581	 */
4582	BUG_ON(sleep_flag == NO_SLEEP);
4583
4584	/* wait for an active reset in progress to complete */
4585	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4586		do {
4587			ssleep(1);
4588		} while (ioc->shost_recovery == 1);
4589		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4590		    __func__));
4591		return ioc->ioc_reset_in_progress_status;
4592	}
4593
4594	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4595	ioc->shost_recovery = 1;
4596	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4597
4598	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
4599	_wait_for_commands_to_complete(ioc, sleep_flag);
4600	_base_mask_interrupts(ioc);
4601	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4602	if (r)
4603		goto out;
4604	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4605
4606	/* If this hard reset is called while port enable is active, then
4607	 * there is no reason to call make_ioc_operational
4608	 */
4609	if (ioc->is_driver_loading && ioc->port_enable_failed) {
4610		ioc->remove_host = 1;
4611		r = -EFAULT;
4612		goto out;
4613	}
4614	r = _base_make_ioc_operational(ioc, sleep_flag);
4615	if (!r)
4616		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
4617 out:
4618	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
4619	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4620
4621	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4622	ioc->ioc_reset_in_progress_status = r;
4623	ioc->shost_recovery = 0;
4624	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4625	mutex_unlock(&ioc->reset_in_progress_mutex);
4626
4627 out_unlocked:
4628	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4629	    __func__));
4630	return r;
4631}
4632