mpt2sas_base.c revision aff132d95ffe14eca96cab90597cdd010b457af7
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2010  LSI Corporation
7 *  (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42 * USA.
43 */
44
45#include <linux/kernel.h>
46#include <linux/module.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/slab.h>
50#include <linux/types.h>
51#include <linux/pci.h>
52#include <linux/kdev_t.h>
53#include <linux/blkdev.h>
54#include <linux/delay.h>
55#include <linux/interrupt.h>
56#include <linux/dma-mapping.h>
57#include <linux/sort.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/kthread.h>
61#include <linux/aer.h>
62
63#include "mpt2sas_base.h"
64
65static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
66
67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
68
69#define MAX_HBA_QUEUE_DEPTH	30000
70#define MAX_CHAIN_DEPTH		100000
71static int max_queue_depth = -1;
72module_param(max_queue_depth, int, 0);
73MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
74
75static int max_sgl_entries = -1;
76module_param(max_sgl_entries, int, 0);
77MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
78
79static int msix_disable = -1;
80module_param(msix_disable, int, 0);
81MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
82
83static int missing_delay[2] = {-1, -1};
84module_param_array(missing_delay, int, NULL, 0);
85MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
86
87static int mpt2sas_fwfault_debug;
88MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
89	"and halt firmware - (default=0)");
90
91static int disable_discovery = -1;
92module_param(disable_discovery, int, 0);
93MODULE_PARM_DESC(disable_discovery, " disable discovery ");
94
95
96/* diag_buffer_enable is bitwise
97 * bit 0 set = TRACE
98 * bit 1 set = SNAPSHOT
99 * bit 2 set = EXTENDED
100 *
101 * Either bit can be set, or both
102 */
103static int diag_buffer_enable;
104module_param(diag_buffer_enable, int, 0);
105MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
106    "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
107
108/**
109 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
110 *
111 */
112static int
113_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
114{
115	int ret = param_set_int(val, kp);
116	struct MPT2SAS_ADAPTER *ioc;
117
118	if (ret)
119		return ret;
120
121	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
122	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
123		ioc->fwfault_debug = mpt2sas_fwfault_debug;
124	return 0;
125}
126
127module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
128    param_get_int, &mpt2sas_fwfault_debug, 0644);
129
130/**
131 *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
132 * @arg: input argument, used to derive ioc
133 *
134 * Return 0 if controller is removed from pci subsystem.
135 * Return -1 for other case.
136 */
137static int mpt2sas_remove_dead_ioc_func(void *arg)
138{
139		struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
140		struct pci_dev *pdev;
141
142		if ((ioc == NULL))
143			return -1;
144
145		pdev = ioc->pdev;
146		if ((pdev == NULL))
147			return -1;
148		pci_remove_bus_device(pdev);
149		return 0;
150}
151
152
153/**
154 * _base_fault_reset_work - workq handling ioc fault conditions
155 * @work: input argument, used to derive ioc
156 * Context: sleep.
157 *
158 * Return nothing.
159 */
160static void
161_base_fault_reset_work(struct work_struct *work)
162{
163	struct MPT2SAS_ADAPTER *ioc =
164	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
165	unsigned long	 flags;
166	u32 doorbell;
167	int rc;
168	struct task_struct *p;
169
170	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
171	if (ioc->shost_recovery)
172		goto rearm_timer;
173	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
174
175	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
176	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
177		printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
178			ioc->name, __func__);
179
180		/*
181		 * Call _scsih_flush_pending_cmds callback so that we flush all
182		 * pending commands back to OS. This call is required to aovid
183		 * deadlock at block layer. Dead IOC will fail to do diag reset,
184		 * and this call is safe since dead ioc will never return any
185		 * command back from HW.
186		 */
187		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
188		/*
189		 * Set remove_host flag early since kernel thread will
190		 * take some time to execute.
191		 */
192		ioc->remove_host = 1;
193		/*Remove the Dead Host */
194		p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
195		    "mpt2sas_dead_ioc_%d", ioc->id);
196		if (IS_ERR(p)) {
197			printk(MPT2SAS_ERR_FMT
198			"%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
199			ioc->name, __func__);
200		} else {
201		    printk(MPT2SAS_ERR_FMT
202			"%s: Running mpt2sas_dead_ioc thread success !!!!\n",
203			ioc->name, __func__);
204		}
205
206		return; /* don't rearm timer */
207	}
208
209	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
210		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
211		    FORCE_BIG_HAMMER);
212		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
213		    __func__, (rc == 0) ? "success" : "failed");
214		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
215		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
216			mpt2sas_base_fault_info(ioc, doorbell &
217			    MPI2_DOORBELL_DATA_MASK);
218	}
219
220	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
221 rearm_timer:
222	if (ioc->fault_reset_work_q)
223		queue_delayed_work(ioc->fault_reset_work_q,
224		    &ioc->fault_reset_work,
225		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
226	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
227}
228
229/**
230 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
231 * @ioc: per adapter object
232 * Context: sleep.
233 *
234 * Return nothing.
235 */
236void
237mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
238{
239	unsigned long	 flags;
240
241	if (ioc->fault_reset_work_q)
242		return;
243
244	/* initialize fault polling */
245	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
246	snprintf(ioc->fault_reset_work_q_name,
247	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
248	ioc->fault_reset_work_q =
249		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
250	if (!ioc->fault_reset_work_q) {
251		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
252		    ioc->name, __func__, __LINE__);
253			return;
254	}
255	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
256	if (ioc->fault_reset_work_q)
257		queue_delayed_work(ioc->fault_reset_work_q,
258		    &ioc->fault_reset_work,
259		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
260	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
261}
262
263/**
264 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
265 * @ioc: per adapter object
266 * Context: sleep.
267 *
268 * Return nothing.
269 */
270void
271mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
272{
273	unsigned long	 flags;
274	struct workqueue_struct *wq;
275
276	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
277	wq = ioc->fault_reset_work_q;
278	ioc->fault_reset_work_q = NULL;
279	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
280	if (wq) {
281		if (!cancel_delayed_work(&ioc->fault_reset_work))
282			flush_workqueue(wq);
283		destroy_workqueue(wq);
284	}
285}
286
287/**
288 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
289 * @ioc: per adapter object
290 * @fault_code: fault code
291 *
292 * Return nothing.
293 */
294void
295mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
296{
297	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
298	    ioc->name, fault_code);
299}
300
301/**
302 * mpt2sas_halt_firmware - halt's mpt controller firmware
303 * @ioc: per adapter object
304 *
305 * For debugging timeout related issues.  Writing 0xCOFFEE00
306 * to the doorbell register will halt controller firmware. With
307 * the purpose to stop both driver and firmware, the enduser can
308 * obtain a ring buffer from controller UART.
309 */
310void
311mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
312{
313	u32 doorbell;
314
315	if (!ioc->fwfault_debug)
316		return;
317
318	dump_stack();
319
320	doorbell = readl(&ioc->chip->Doorbell);
321	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
322		mpt2sas_base_fault_info(ioc , doorbell);
323	else {
324		writel(0xC0FFEE00, &ioc->chip->Doorbell);
325		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
326		    "timeout\n", ioc->name);
327	}
328
329	panic("panic in %s\n", __func__);
330}
331
332#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
333/**
334 * _base_sas_ioc_info - verbose translation of the ioc status
335 * @ioc: per adapter object
336 * @mpi_reply: reply mf payload returned from firmware
337 * @request_hdr: request mf
338 *
339 * Return nothing.
340 */
341static void
342_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
343     MPI2RequestHeader_t *request_hdr)
344{
345	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
346	    MPI2_IOCSTATUS_MASK;
347	char *desc = NULL;
348	u16 frame_sz;
349	char *func_str = NULL;
350
351	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
352	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
353	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
354	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
355		return;
356
357	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
358		return;
359
360	switch (ioc_status) {
361
362/****************************************************************************
363*  Common IOCStatus values for all replies
364****************************************************************************/
365
366	case MPI2_IOCSTATUS_INVALID_FUNCTION:
367		desc = "invalid function";
368		break;
369	case MPI2_IOCSTATUS_BUSY:
370		desc = "busy";
371		break;
372	case MPI2_IOCSTATUS_INVALID_SGL:
373		desc = "invalid sgl";
374		break;
375	case MPI2_IOCSTATUS_INTERNAL_ERROR:
376		desc = "internal error";
377		break;
378	case MPI2_IOCSTATUS_INVALID_VPID:
379		desc = "invalid vpid";
380		break;
381	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
382		desc = "insufficient resources";
383		break;
384	case MPI2_IOCSTATUS_INVALID_FIELD:
385		desc = "invalid field";
386		break;
387	case MPI2_IOCSTATUS_INVALID_STATE:
388		desc = "invalid state";
389		break;
390	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
391		desc = "op state not supported";
392		break;
393
394/****************************************************************************
395*  Config IOCStatus values
396****************************************************************************/
397
398	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
399		desc = "config invalid action";
400		break;
401	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
402		desc = "config invalid type";
403		break;
404	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
405		desc = "config invalid page";
406		break;
407	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
408		desc = "config invalid data";
409		break;
410	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
411		desc = "config no defaults";
412		break;
413	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
414		desc = "config cant commit";
415		break;
416
417/****************************************************************************
418*  SCSI IO Reply
419****************************************************************************/
420
421	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
422	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
423	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
424	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
425	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
426	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
427	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
428	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
429	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
430	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
431	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
432	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
433		break;
434
435/****************************************************************************
436*  For use by SCSI Initiator and SCSI Target end-to-end data protection
437****************************************************************************/
438
439	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
440		desc = "eedp guard error";
441		break;
442	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
443		desc = "eedp ref tag error";
444		break;
445	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
446		desc = "eedp app tag error";
447		break;
448
449/****************************************************************************
450*  SCSI Target values
451****************************************************************************/
452
453	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
454		desc = "target invalid io index";
455		break;
456	case MPI2_IOCSTATUS_TARGET_ABORTED:
457		desc = "target aborted";
458		break;
459	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
460		desc = "target no conn retryable";
461		break;
462	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
463		desc = "target no connection";
464		break;
465	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
466		desc = "target xfer count mismatch";
467		break;
468	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
469		desc = "target data offset error";
470		break;
471	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
472		desc = "target too much write data";
473		break;
474	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
475		desc = "target iu too short";
476		break;
477	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
478		desc = "target ack nak timeout";
479		break;
480	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
481		desc = "target nak received";
482		break;
483
484/****************************************************************************
485*  Serial Attached SCSI values
486****************************************************************************/
487
488	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
489		desc = "smp request failed";
490		break;
491	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
492		desc = "smp data overrun";
493		break;
494
495/****************************************************************************
496*  Diagnostic Buffer Post / Diagnostic Release values
497****************************************************************************/
498
499	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
500		desc = "diagnostic released";
501		break;
502	default:
503		break;
504	}
505
506	if (!desc)
507		return;
508
509	switch (request_hdr->Function) {
510	case MPI2_FUNCTION_CONFIG:
511		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
512		func_str = "config_page";
513		break;
514	case MPI2_FUNCTION_SCSI_TASK_MGMT:
515		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
516		func_str = "task_mgmt";
517		break;
518	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
519		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
520		func_str = "sas_iounit_ctl";
521		break;
522	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
523		frame_sz = sizeof(Mpi2SepRequest_t);
524		func_str = "enclosure";
525		break;
526	case MPI2_FUNCTION_IOC_INIT:
527		frame_sz = sizeof(Mpi2IOCInitRequest_t);
528		func_str = "ioc_init";
529		break;
530	case MPI2_FUNCTION_PORT_ENABLE:
531		frame_sz = sizeof(Mpi2PortEnableRequest_t);
532		func_str = "port_enable";
533		break;
534	case MPI2_FUNCTION_SMP_PASSTHROUGH:
535		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
536		func_str = "smp_passthru";
537		break;
538	default:
539		frame_sz = 32;
540		func_str = "unknown";
541		break;
542	}
543
544	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
545	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
546
547	_debug_dump_mf(request_hdr, frame_sz/4);
548}
549
550/**
551 * _base_display_event_data - verbose translation of firmware asyn events
552 * @ioc: per adapter object
553 * @mpi_reply: reply mf payload returned from firmware
554 *
555 * Return nothing.
556 */
557static void
558_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
559    Mpi2EventNotificationReply_t *mpi_reply)
560{
561	char *desc = NULL;
562	u16 event;
563
564	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
565		return;
566
567	event = le16_to_cpu(mpi_reply->Event);
568
569	switch (event) {
570	case MPI2_EVENT_LOG_DATA:
571		desc = "Log Data";
572		break;
573	case MPI2_EVENT_STATE_CHANGE:
574		desc = "Status Change";
575		break;
576	case MPI2_EVENT_HARD_RESET_RECEIVED:
577		desc = "Hard Reset Received";
578		break;
579	case MPI2_EVENT_EVENT_CHANGE:
580		desc = "Event Change";
581		break;
582	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
583		desc = "Device Status Change";
584		break;
585	case MPI2_EVENT_IR_OPERATION_STATUS:
586		if (!ioc->hide_ir_msg)
587			desc = "IR Operation Status";
588		break;
589	case MPI2_EVENT_SAS_DISCOVERY:
590	{
591		Mpi2EventDataSasDiscovery_t *event_data =
592		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
593		printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
594		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
595		    "start" : "stop");
596		if (event_data->DiscoveryStatus)
597			printk("discovery_status(0x%08x)",
598			    le32_to_cpu(event_data->DiscoveryStatus));
599		printk("\n");
600		return;
601	}
602	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
603		desc = "SAS Broadcast Primitive";
604		break;
605	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
606		desc = "SAS Init Device Status Change";
607		break;
608	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
609		desc = "SAS Init Table Overflow";
610		break;
611	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
612		desc = "SAS Topology Change List";
613		break;
614	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
615		desc = "SAS Enclosure Device Status Change";
616		break;
617	case MPI2_EVENT_IR_VOLUME:
618		if (!ioc->hide_ir_msg)
619			desc = "IR Volume";
620		break;
621	case MPI2_EVENT_IR_PHYSICAL_DISK:
622		if (!ioc->hide_ir_msg)
623			desc = "IR Physical Disk";
624		break;
625	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
626		if (!ioc->hide_ir_msg)
627			desc = "IR Configuration Change List";
628		break;
629	case MPI2_EVENT_LOG_ENTRY_ADDED:
630		if (!ioc->hide_ir_msg)
631			desc = "Log Entry Added";
632		break;
633	}
634
635	if (!desc)
636		return;
637
638	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
639}
640#endif
641
642/**
643 * _base_sas_log_info - verbose translation of firmware log info
644 * @ioc: per adapter object
645 * @log_info: log info
646 *
647 * Return nothing.
648 */
649static void
650_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
651{
652	union loginfo_type {
653		u32	loginfo;
654		struct {
655			u32	subcode:16;
656			u32	code:8;
657			u32	originator:4;
658			u32	bus_type:4;
659		} dw;
660	};
661	union loginfo_type sas_loginfo;
662	char *originator_str = NULL;
663
664	sas_loginfo.loginfo = log_info;
665	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
666		return;
667
668	/* each nexus loss loginfo */
669	if (log_info == 0x31170000)
670		return;
671
672	/* eat the loginfos associated with task aborts */
673	if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
674	    0x31140000 || log_info == 0x31130000))
675		return;
676
677	switch (sas_loginfo.dw.originator) {
678	case 0:
679		originator_str = "IOP";
680		break;
681	case 1:
682		originator_str = "PL";
683		break;
684	case 2:
685		if (!ioc->hide_ir_msg)
686			originator_str = "IR";
687		else
688			originator_str = "WarpDrive";
689		break;
690	}
691
692	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
693	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
694	     originator_str, sas_loginfo.dw.code,
695	     sas_loginfo.dw.subcode);
696}
697
698/**
699 * _base_display_reply_info -
700 * @ioc: per adapter object
701 * @smid: system request message index
702 * @msix_index: MSIX table index supplied by the OS
703 * @reply: reply message frame(lower 32bit addr)
704 *
705 * Return nothing.
706 */
707static void
708_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
709    u32 reply)
710{
711	MPI2DefaultReply_t *mpi_reply;
712	u16 ioc_status;
713
714	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
715	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
716#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
717	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
718	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
719		_base_sas_ioc_info(ioc , mpi_reply,
720		   mpt2sas_base_get_msg_frame(ioc, smid));
721	}
722#endif
723	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
724		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
725}
726
727/**
728 * mpt2sas_base_done - base internal command completion routine
729 * @ioc: per adapter object
730 * @smid: system request message index
731 * @msix_index: MSIX table index supplied by the OS
732 * @reply: reply message frame(lower 32bit addr)
733 *
734 * Return 1 meaning mf should be freed from _base_interrupt
735 *        0 means the mf is freed from this function.
736 */
737u8
738mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
739    u32 reply)
740{
741	MPI2DefaultReply_t *mpi_reply;
742
743	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
744	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
745		return 1;
746
747	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
748		return 1;
749
750	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
751	if (mpi_reply) {
752		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
753		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
754	}
755	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
756
757	complete(&ioc->base_cmds.done);
758	return 1;
759}
760
761/**
762 * _base_async_event - main callback handler for firmware asyn events
763 * @ioc: per adapter object
764 * @msix_index: MSIX table index supplied by the OS
765 * @reply: reply message frame(lower 32bit addr)
766 *
767 * Return 1 meaning mf should be freed from _base_interrupt
768 *        0 means the mf is freed from this function.
769 */
770static u8
771_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
772{
773	Mpi2EventNotificationReply_t *mpi_reply;
774	Mpi2EventAckRequest_t *ack_request;
775	u16 smid;
776
777	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
778	if (!mpi_reply)
779		return 1;
780	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
781		return 1;
782#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
783	_base_display_event_data(ioc, mpi_reply);
784#endif
785	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
786		goto out;
787	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
788	if (!smid) {
789		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
790		    ioc->name, __func__);
791		goto out;
792	}
793
794	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
795	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
796	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
797	ack_request->Event = mpi_reply->Event;
798	ack_request->EventContext = mpi_reply->EventContext;
799	ack_request->VF_ID = 0;  /* TODO */
800	ack_request->VP_ID = 0;
801	mpt2sas_base_put_smid_default(ioc, smid);
802
803 out:
804
805	/* scsih callback handler */
806	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
807
808	/* ctl callback handler */
809	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
810
811	return 1;
812}
813
814/**
815 * _base_get_cb_idx - obtain the callback index
816 * @ioc: per adapter object
817 * @smid: system request message index
818 *
819 * Return callback index.
820 */
821static u8
822_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
823{
824	int i;
825	u8 cb_idx;
826
827	if (smid < ioc->hi_priority_smid) {
828		i = smid - 1;
829		cb_idx = ioc->scsi_lookup[i].cb_idx;
830	} else if (smid < ioc->internal_smid) {
831		i = smid - ioc->hi_priority_smid;
832		cb_idx = ioc->hpr_lookup[i].cb_idx;
833	} else if (smid <= ioc->hba_queue_depth) {
834		i = smid - ioc->internal_smid;
835		cb_idx = ioc->internal_lookup[i].cb_idx;
836	} else
837		cb_idx = 0xFF;
838	return cb_idx;
839}
840
841/**
842 * _base_mask_interrupts - disable interrupts
843 * @ioc: per adapter object
844 *
845 * Disabling ResetIRQ, Reply and Doorbell Interrupts
846 *
847 * Return nothing.
848 */
849static void
850_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
851{
852	u32 him_register;
853
854	ioc->mask_interrupts = 1;
855	him_register = readl(&ioc->chip->HostInterruptMask);
856	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
857	writel(him_register, &ioc->chip->HostInterruptMask);
858	readl(&ioc->chip->HostInterruptMask);
859}
860
861/**
862 * _base_unmask_interrupts - enable interrupts
863 * @ioc: per adapter object
864 *
865 * Enabling only Reply Interrupts
866 *
867 * Return nothing.
868 */
869static void
870_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
871{
872	u32 him_register;
873
874	him_register = readl(&ioc->chip->HostInterruptMask);
875	him_register &= ~MPI2_HIM_RIM;
876	writel(him_register, &ioc->chip->HostInterruptMask);
877	ioc->mask_interrupts = 0;
878}
879
880union reply_descriptor {
881	u64 word;
882	struct {
883		u32 low;
884		u32 high;
885	} u;
886};
887
888/**
889 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
890 * @irq: irq number (not used)
891 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
892 * @r: pt_regs pointer (not used)
893 *
894 * Return IRQ_HANDLE if processed, else IRQ_NONE.
895 */
896static irqreturn_t
897_base_interrupt(int irq, void *bus_id)
898{
899	struct adapter_reply_queue *reply_q = bus_id;
900	union reply_descriptor rd;
901	u32 completed_cmds;
902	u8 request_desript_type;
903	u16 smid;
904	u8 cb_idx;
905	u32 reply;
906	u8 msix_index = reply_q->msix_index;
907	struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
908	Mpi2ReplyDescriptorsUnion_t *rpf;
909	u8 rc;
910
911	if (ioc->mask_interrupts)
912		return IRQ_NONE;
913
914	if (!atomic_add_unless(&reply_q->busy, 1, 1))
915		return IRQ_NONE;
916
917	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
918	request_desript_type = rpf->Default.ReplyFlags
919	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
920	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
921		atomic_dec(&reply_q->busy);
922		return IRQ_NONE;
923	}
924
925	completed_cmds = 0;
926	cb_idx = 0xFF;
927	do {
928		rd.word = le64_to_cpu(rpf->Words);
929		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
930			goto out;
931		reply = 0;
932		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
933		if (request_desript_type ==
934		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
935			reply = le32_to_cpu
936				(rpf->AddressReply.ReplyFrameAddress);
937			if (reply > ioc->reply_dma_max_address ||
938			    reply < ioc->reply_dma_min_address)
939				reply = 0;
940		} else if (request_desript_type ==
941		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
942			goto next;
943		else if (request_desript_type ==
944		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
945			goto next;
946		if (smid)
947			cb_idx = _base_get_cb_idx(ioc, smid);
948		if (smid && cb_idx != 0xFF) {
949			rc = mpt_callbacks[cb_idx](ioc, smid, msix_index,
950			    reply);
951			if (reply)
952				_base_display_reply_info(ioc, smid, msix_index,
953				    reply);
954			if (rc)
955				mpt2sas_base_free_smid(ioc, smid);
956		}
957		if (!smid)
958			_base_async_event(ioc, msix_index, reply);
959
960		/* reply free queue handling */
961		if (reply) {
962			ioc->reply_free_host_index =
963			    (ioc->reply_free_host_index ==
964			    (ioc->reply_free_queue_depth - 1)) ?
965			    0 : ioc->reply_free_host_index + 1;
966			ioc->reply_free[ioc->reply_free_host_index] =
967			    cpu_to_le32(reply);
968			wmb();
969			writel(ioc->reply_free_host_index,
970			    &ioc->chip->ReplyFreeHostIndex);
971		}
972
973 next:
974
975		rpf->Words = cpu_to_le64(ULLONG_MAX);
976		reply_q->reply_post_host_index =
977		    (reply_q->reply_post_host_index ==
978		    (ioc->reply_post_queue_depth - 1)) ? 0 :
979		    reply_q->reply_post_host_index + 1;
980		request_desript_type =
981		    reply_q->reply_post_free[reply_q->reply_post_host_index].
982		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
983		completed_cmds++;
984		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
985			goto out;
986		if (!reply_q->reply_post_host_index)
987			rpf = reply_q->reply_post_free;
988		else
989			rpf++;
990	} while (1);
991
992 out:
993
994	if (!completed_cmds) {
995		atomic_dec(&reply_q->busy);
996		return IRQ_NONE;
997	}
998	wmb();
999	if (ioc->is_warpdrive) {
1000		writel(reply_q->reply_post_host_index,
1001		ioc->reply_post_host_index[msix_index]);
1002		atomic_dec(&reply_q->busy);
1003		return IRQ_HANDLED;
1004	}
1005	writel(reply_q->reply_post_host_index | (msix_index <<
1006	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1007	atomic_dec(&reply_q->busy);
1008	return IRQ_HANDLED;
1009}
1010
1011/**
1012 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1013 * @ioc: per adapter object
1014 *
1015 */
1016static inline int
1017_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
1018{
1019	return (ioc->facts.IOCCapabilities &
1020	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1021}
1022
1023/**
1024 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
1025 * @ioc: per adapter object
1026 * Context: ISR conext
1027 *
1028 * Called when a Task Management request has completed. We want
1029 * to flush the other reply queues so all the outstanding IO has been
1030 * completed back to OS before we process the TM completetion.
1031 *
1032 * Return nothing.
1033 */
1034void
1035mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1036{
1037	struct adapter_reply_queue *reply_q;
1038
1039	/* If MSIX capability is turned off
1040	 * then multi-queues are not enabled
1041	 */
1042	if (!_base_is_controller_msix_enabled(ioc))
1043		return;
1044
1045	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1046		if (ioc->shost_recovery)
1047			return;
1048		/* TMs are on msix_index == 0 */
1049		if (reply_q->msix_index == 0)
1050			continue;
1051		_base_interrupt(reply_q->vector, (void *)reply_q);
1052	}
1053}
1054
1055/**
1056 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
1057 * @cb_idx: callback index
1058 *
1059 * Return nothing.
1060 */
1061void
1062mpt2sas_base_release_callback_handler(u8 cb_idx)
1063{
1064	mpt_callbacks[cb_idx] = NULL;
1065}
1066
1067/**
1068 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
1069 * @cb_func: callback function
1070 *
1071 * Returns cb_func.
1072 */
1073u8
1074mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1075{
1076	u8 cb_idx;
1077
1078	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1079		if (mpt_callbacks[cb_idx] == NULL)
1080			break;
1081
1082	mpt_callbacks[cb_idx] = cb_func;
1083	return cb_idx;
1084}
1085
1086/**
1087 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
1088 *
1089 * Return nothing.
1090 */
1091void
1092mpt2sas_base_initialize_callback_handler(void)
1093{
1094	u8 cb_idx;
1095
1096	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1097		mpt2sas_base_release_callback_handler(cb_idx);
1098}
1099
1100/**
1101 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
1102 * @ioc: per adapter object
1103 * @paddr: virtual address for SGE
1104 *
1105 * Create a zero length scatter gather entry to insure the IOCs hardware has
1106 * something to use if the target device goes brain dead and tries
1107 * to send data even when none is asked for.
1108 *
1109 * Return nothing.
1110 */
1111void
1112mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
1113{
1114	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1115	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1116	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1117	    MPI2_SGE_FLAGS_SHIFT);
1118	ioc->base_add_sg_single(paddr, flags_length, -1);
1119}
1120
1121/**
1122 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1123 * @paddr: virtual address for SGE
1124 * @flags_length: SGE flags and data transfer length
1125 * @dma_addr: Physical address
1126 *
1127 * Return nothing.
1128 */
1129static void
1130_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1131{
1132	Mpi2SGESimple32_t *sgel = paddr;
1133
1134	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1135	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1136	sgel->FlagsLength = cpu_to_le32(flags_length);
1137	sgel->Address = cpu_to_le32(dma_addr);
1138}
1139
1140
1141/**
1142 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1143 * @paddr: virtual address for SGE
1144 * @flags_length: SGE flags and data transfer length
1145 * @dma_addr: Physical address
1146 *
1147 * Return nothing.
1148 */
1149static void
1150_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1151{
1152	Mpi2SGESimple64_t *sgel = paddr;
1153
1154	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1155	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1156	sgel->FlagsLength = cpu_to_le32(flags_length);
1157	sgel->Address = cpu_to_le64(dma_addr);
1158}
1159
1160#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1161
1162/**
1163 * _base_config_dma_addressing - set dma addressing
1164 * @ioc: per adapter object
1165 * @pdev: PCI device struct
1166 *
1167 * Returns 0 for success, non-zero for failure.
1168 */
1169static int
1170_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1171{
1172	struct sysinfo s;
1173	char *desc = NULL;
1174
1175	if (sizeof(dma_addr_t) > 4) {
1176		const uint64_t required_mask =
1177		    dma_get_required_mask(&pdev->dev);
1178		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
1179		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
1180		    DMA_BIT_MASK(64))) {
1181			ioc->base_add_sg_single = &_base_add_sg_single_64;
1182			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1183			desc = "64";
1184			goto out;
1185		}
1186	}
1187
1188	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1189	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1190		ioc->base_add_sg_single = &_base_add_sg_single_32;
1191		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1192		desc = "32";
1193	} else
1194		return -ENODEV;
1195
1196 out:
1197	si_meminfo(&s);
1198	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
1199	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
1200
1201	return 0;
1202}
1203
1204/**
1205 * _base_check_enable_msix - checks MSIX capabable.
1206 * @ioc: per adapter object
1207 *
1208 * Check to see if card is capable of MSIX, and set number
1209 * of available msix vectors
1210 */
1211static int
1212_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1213{
1214	int base;
1215	u16 message_control;
1216
1217
1218	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1219	if (!base) {
1220		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1221		    "supported\n", ioc->name));
1222		return -EINVAL;
1223	}
1224
1225	/* get msix vector count */
1226	/* NUMA_IO not supported for older controllers */
1227	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1228	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1229	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1230	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1231	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1232	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1233	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1234		ioc->msix_vector_count = 1;
1235	else {
1236		pci_read_config_word(ioc->pdev, base + 2, &message_control);
1237		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1238	}
1239	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1240	    "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1241
1242	return 0;
1243}
1244
1245/**
1246 * _base_free_irq - free irq
1247 * @ioc: per adapter object
1248 *
1249 * Freeing respective reply_queue from the list.
1250 */
1251static void
1252_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1253{
1254	struct adapter_reply_queue *reply_q, *next;
1255
1256	if (list_empty(&ioc->reply_queue_list))
1257		return;
1258
1259	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1260		list_del(&reply_q->list);
1261		synchronize_irq(reply_q->vector);
1262		free_irq(reply_q->vector, reply_q);
1263		kfree(reply_q);
1264	}
1265}
1266
1267/**
1268 * _base_request_irq - request irq
1269 * @ioc: per adapter object
1270 * @index: msix index into vector table
1271 * @vector: irq vector
1272 *
1273 * Inserting respective reply_queue into the list.
1274 */
1275static int
1276_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1277{
1278	struct adapter_reply_queue *reply_q;
1279	int r;
1280
1281	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1282	if (!reply_q) {
1283		printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1284		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1285		return -ENOMEM;
1286	}
1287	reply_q->ioc = ioc;
1288	reply_q->msix_index = index;
1289	reply_q->vector = vector;
1290	atomic_set(&reply_q->busy, 0);
1291	if (ioc->msix_enable)
1292		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1293		    MPT2SAS_DRIVER_NAME, ioc->id, index);
1294	else
1295		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1296		    MPT2SAS_DRIVER_NAME, ioc->id);
1297	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1298	    reply_q);
1299	if (r) {
1300		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1301		    reply_q->name, vector);
1302		kfree(reply_q);
1303		return -EBUSY;
1304	}
1305
1306	INIT_LIST_HEAD(&reply_q->list);
1307	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1308	return 0;
1309}
1310
1311/**
1312 * _base_assign_reply_queues - assigning msix index for each cpu
1313 * @ioc: per adapter object
1314 *
1315 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1316 *
1317 * It would nice if we could call irq_set_affinity, however it is not
1318 * an exported symbol
1319 */
1320static void
1321_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1322{
1323	struct adapter_reply_queue *reply_q;
1324	int cpu_id;
1325	int cpu_grouping, loop, grouping, grouping_mod;
1326
1327	if (!_base_is_controller_msix_enabled(ioc))
1328		return;
1329
1330	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1331	/* when there are more cpus than available msix vectors,
1332	 * then group cpus togeather on same irq
1333	 */
1334	if (ioc->cpu_count > ioc->msix_vector_count) {
1335		grouping = ioc->cpu_count / ioc->msix_vector_count;
1336		grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1337		if (grouping < 2 || (grouping == 2 && !grouping_mod))
1338			cpu_grouping = 2;
1339		else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1340			cpu_grouping = 4;
1341		else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1342			cpu_grouping = 8;
1343		else
1344			cpu_grouping = 16;
1345	} else
1346		cpu_grouping = 0;
1347
1348	loop = 0;
1349	reply_q = list_entry(ioc->reply_queue_list.next,
1350	     struct adapter_reply_queue, list);
1351	for_each_online_cpu(cpu_id) {
1352		if (!cpu_grouping) {
1353			ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1354			reply_q = list_entry(reply_q->list.next,
1355			    struct adapter_reply_queue, list);
1356		} else {
1357			if (loop < cpu_grouping) {
1358				ioc->cpu_msix_table[cpu_id] =
1359					reply_q->msix_index;
1360				loop++;
1361			} else {
1362				reply_q = list_entry(reply_q->list.next,
1363				    struct adapter_reply_queue, list);
1364				ioc->cpu_msix_table[cpu_id] =
1365					reply_q->msix_index;
1366				loop = 1;
1367			}
1368		}
1369	}
1370}
1371
1372/**
1373 * _base_disable_msix - disables msix
1374 * @ioc: per adapter object
1375 *
1376 */
1377static void
1378_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1379{
1380	if (ioc->msix_enable) {
1381		pci_disable_msix(ioc->pdev);
1382		ioc->msix_enable = 0;
1383	}
1384}
1385
1386/**
1387 * _base_enable_msix - enables msix, failback to io_apic
1388 * @ioc: per adapter object
1389 *
1390 */
1391static int
1392_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1393{
1394	struct msix_entry *entries, *a;
1395	int r;
1396	int i;
1397	u8 try_msix = 0;
1398
1399	INIT_LIST_HEAD(&ioc->reply_queue_list);
1400
1401	if (msix_disable == -1 || msix_disable == 0)
1402		try_msix = 1;
1403
1404	if (!try_msix)
1405		goto try_ioapic;
1406
1407	if (_base_check_enable_msix(ioc) != 0)
1408		goto try_ioapic;
1409
1410	ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
1411	    ioc->msix_vector_count);
1412
1413	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1414	    GFP_KERNEL);
1415	if (!entries) {
1416		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1417		    "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1418		    __LINE__, __func__));
1419		goto try_ioapic;
1420	}
1421
1422	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1423		a->entry = i;
1424
1425	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1426	if (r) {
1427		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1428		    "failed (r=%d) !!!\n", ioc->name, r));
1429		kfree(entries);
1430		goto try_ioapic;
1431	}
1432
1433	ioc->msix_enable = 1;
1434	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1435		r = _base_request_irq(ioc, i, a->vector);
1436		if (r) {
1437			_base_free_irq(ioc);
1438			_base_disable_msix(ioc);
1439			kfree(entries);
1440			goto try_ioapic;
1441		}
1442	}
1443
1444	kfree(entries);
1445	return 0;
1446
1447/* failback to io_apic interrupt routing */
1448 try_ioapic:
1449
1450	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1451
1452	return r;
1453}
1454
1455/**
1456 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1457 * @ioc: per adapter object
1458 *
1459 * Returns 0 for success, non-zero for failure.
1460 */
1461int
1462mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1463{
1464	struct pci_dev *pdev = ioc->pdev;
1465	u32 memap_sz;
1466	u32 pio_sz;
1467	int i, r = 0;
1468	u64 pio_chip = 0;
1469	u64 chip_phys = 0;
1470	struct adapter_reply_queue *reply_q;
1471
1472	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1473	    ioc->name, __func__));
1474
1475	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1476	if (pci_enable_device_mem(pdev)) {
1477		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1478		    "failed\n", ioc->name);
1479		return -ENODEV;
1480	}
1481
1482
1483	if (pci_request_selected_regions(pdev, ioc->bars,
1484	    MPT2SAS_DRIVER_NAME)) {
1485		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1486		    "failed\n", ioc->name);
1487		r = -ENODEV;
1488		goto out_fail;
1489	}
1490
1491	/* AER (Advanced Error Reporting) hooks */
1492	pci_enable_pcie_error_reporting(pdev);
1493
1494	pci_set_master(pdev);
1495
1496	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1497		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1498		    ioc->name, pci_name(pdev));
1499		r = -ENODEV;
1500		goto out_fail;
1501	}
1502
1503	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1504		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1505			if (pio_sz)
1506				continue;
1507			pio_chip = (u64)pci_resource_start(pdev, i);
1508			pio_sz = pci_resource_len(pdev, i);
1509		} else {
1510			if (memap_sz)
1511				continue;
1512			/* verify memory resource is valid before using */
1513			if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1514				ioc->chip_phys = pci_resource_start(pdev, i);
1515				chip_phys = (u64)ioc->chip_phys;
1516				memap_sz = pci_resource_len(pdev, i);
1517				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1518				if (ioc->chip == NULL) {
1519					printk(MPT2SAS_ERR_FMT "unable to map "
1520					    "adapter memory!\n", ioc->name);
1521					r = -EINVAL;
1522					goto out_fail;
1523				}
1524			}
1525		}
1526	}
1527
1528	_base_mask_interrupts(ioc);
1529	r = _base_enable_msix(ioc);
1530	if (r)
1531		goto out_fail;
1532
1533	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1534		printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1535		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1536		    "IO-APIC enabled"), reply_q->vector);
1537
1538	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1539	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1540	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1541	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1542
1543	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1544	pci_save_state(pdev);
1545
1546	return 0;
1547
1548 out_fail:
1549	if (ioc->chip_phys)
1550		iounmap(ioc->chip);
1551	ioc->chip_phys = 0;
1552	pci_release_selected_regions(ioc->pdev, ioc->bars);
1553	pci_disable_pcie_error_reporting(pdev);
1554	pci_disable_device(pdev);
1555	return r;
1556}
1557
1558/**
1559 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1560 * @ioc: per adapter object
1561 * @smid: system request message index(smid zero is invalid)
1562 *
1563 * Returns virt pointer to message frame.
1564 */
1565void *
1566mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1567{
1568	return (void *)(ioc->request + (smid * ioc->request_sz));
1569}
1570
1571/**
1572 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1573 * @ioc: per adapter object
1574 * @smid: system request message index
1575 *
1576 * Returns virt pointer to sense buffer.
1577 */
1578void *
1579mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1580{
1581	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1582}
1583
1584/**
1585 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1586 * @ioc: per adapter object
1587 * @smid: system request message index
1588 *
1589 * Returns phys pointer to the low 32bit address of the sense buffer.
1590 */
1591__le32
1592mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1593{
1594	return cpu_to_le32(ioc->sense_dma +
1595			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1596}
1597
1598/**
1599 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1600 * @ioc: per adapter object
1601 * @phys_addr: lower 32 physical addr of the reply
1602 *
1603 * Converts 32bit lower physical addr into a virt address.
1604 */
1605void *
1606mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1607{
1608	if (!phys_addr)
1609		return NULL;
1610	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1611}
1612
1613/**
1614 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1615 * @ioc: per adapter object
1616 * @cb_idx: callback index
1617 *
1618 * Returns smid (zero is invalid)
1619 */
1620u16
1621mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1622{
1623	unsigned long flags;
1624	struct request_tracker *request;
1625	u16 smid;
1626
1627	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1628	if (list_empty(&ioc->internal_free_list)) {
1629		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1630		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1631		    ioc->name, __func__);
1632		return 0;
1633	}
1634
1635	request = list_entry(ioc->internal_free_list.next,
1636	    struct request_tracker, tracker_list);
1637	request->cb_idx = cb_idx;
1638	smid = request->smid;
1639	list_del(&request->tracker_list);
1640	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1641	return smid;
1642}
1643
1644/**
1645 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1646 * @ioc: per adapter object
1647 * @cb_idx: callback index
1648 * @scmd: pointer to scsi command object
1649 *
1650 * Returns smid (zero is invalid)
1651 */
1652u16
1653mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1654    struct scsi_cmnd *scmd)
1655{
1656	unsigned long flags;
1657	struct scsiio_tracker *request;
1658	u16 smid;
1659
1660	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1661	if (list_empty(&ioc->free_list)) {
1662		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1663		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1664		    ioc->name, __func__);
1665		return 0;
1666	}
1667
1668	request = list_entry(ioc->free_list.next,
1669	    struct scsiio_tracker, tracker_list);
1670	request->scmd = scmd;
1671	request->cb_idx = cb_idx;
1672	smid = request->smid;
1673	list_del(&request->tracker_list);
1674	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1675	return smid;
1676}
1677
1678/**
1679 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1680 * @ioc: per adapter object
1681 * @cb_idx: callback index
1682 *
1683 * Returns smid (zero is invalid)
1684 */
1685u16
1686mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1687{
1688	unsigned long flags;
1689	struct request_tracker *request;
1690	u16 smid;
1691
1692	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1693	if (list_empty(&ioc->hpr_free_list)) {
1694		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1695		return 0;
1696	}
1697
1698	request = list_entry(ioc->hpr_free_list.next,
1699	    struct request_tracker, tracker_list);
1700	request->cb_idx = cb_idx;
1701	smid = request->smid;
1702	list_del(&request->tracker_list);
1703	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1704	return smid;
1705}
1706
1707
1708/**
1709 * mpt2sas_base_free_smid - put smid back on free_list
1710 * @ioc: per adapter object
1711 * @smid: system request message index
1712 *
1713 * Return nothing.
1714 */
1715void
1716mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1717{
1718	unsigned long flags;
1719	int i;
1720	struct chain_tracker *chain_req, *next;
1721
1722	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1723	if (smid < ioc->hi_priority_smid) {
1724		/* scsiio queue */
1725		i = smid - 1;
1726		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1727			list_for_each_entry_safe(chain_req, next,
1728			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
1729				list_del_init(&chain_req->tracker_list);
1730				list_add_tail(&chain_req->tracker_list,
1731				    &ioc->free_chain_list);
1732			}
1733		}
1734		ioc->scsi_lookup[i].cb_idx = 0xFF;
1735		ioc->scsi_lookup[i].scmd = NULL;
1736		ioc->scsi_lookup[i].direct_io = 0;
1737		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1738		    &ioc->free_list);
1739		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1740
1741		/*
1742		 * See _wait_for_commands_to_complete() call with regards
1743		 * to this code.
1744		 */
1745		if (ioc->shost_recovery && ioc->pending_io_count) {
1746			if (ioc->pending_io_count == 1)
1747				wake_up(&ioc->reset_wq);
1748			ioc->pending_io_count--;
1749		}
1750		return;
1751	} else if (smid < ioc->internal_smid) {
1752		/* hi-priority */
1753		i = smid - ioc->hi_priority_smid;
1754		ioc->hpr_lookup[i].cb_idx = 0xFF;
1755		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1756		    &ioc->hpr_free_list);
1757	} else if (smid <= ioc->hba_queue_depth) {
1758		/* internal queue */
1759		i = smid - ioc->internal_smid;
1760		ioc->internal_lookup[i].cb_idx = 0xFF;
1761		list_add_tail(&ioc->internal_lookup[i].tracker_list,
1762		    &ioc->internal_free_list);
1763	}
1764	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1765}
1766
1767/**
1768 * _base_writeq - 64 bit write to MMIO
1769 * @ioc: per adapter object
1770 * @b: data payload
1771 * @addr: address in MMIO space
1772 * @writeq_lock: spin lock
1773 *
1774 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1775 * care of 32 bit environment where its not quarenteed to send the entire word
1776 * in one transfer.
1777 */
1778#ifndef writeq
1779static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1780    spinlock_t *writeq_lock)
1781{
1782	unsigned long flags;
1783	__u64 data_out = cpu_to_le64(b);
1784
1785	spin_lock_irqsave(writeq_lock, flags);
1786	writel((u32)(data_out), addr);
1787	writel((u32)(data_out >> 32), (addr + 4));
1788	spin_unlock_irqrestore(writeq_lock, flags);
1789}
1790#else
1791static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1792    spinlock_t *writeq_lock)
1793{
1794	writeq(cpu_to_le64(b), addr);
1795}
1796#endif
1797
1798static inline u8
1799_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1800{
1801	return ioc->cpu_msix_table[smp_processor_id()];
1802}
1803
1804/**
1805 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1806 * @ioc: per adapter object
1807 * @smid: system request message index
1808 * @handle: device handle
1809 *
1810 * Return nothing.
1811 */
1812void
1813mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1814{
1815	Mpi2RequestDescriptorUnion_t descriptor;
1816	u64 *request = (u64 *)&descriptor;
1817
1818
1819	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1820	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
1821	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1822	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1823	descriptor.SCSIIO.LMID = 0;
1824	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1825	    &ioc->scsi_lookup_lock);
1826}
1827
1828
1829/**
1830 * mpt2sas_base_put_smid_hi_priority - send Task Management request to firmware
1831 * @ioc: per adapter object
1832 * @smid: system request message index
1833 *
1834 * Return nothing.
1835 */
1836void
1837mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1838{
1839	Mpi2RequestDescriptorUnion_t descriptor;
1840	u64 *request = (u64 *)&descriptor;
1841
1842	descriptor.HighPriority.RequestFlags =
1843	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1844	descriptor.HighPriority.MSIxIndex =  0;
1845	descriptor.HighPriority.SMID = cpu_to_le16(smid);
1846	descriptor.HighPriority.LMID = 0;
1847	descriptor.HighPriority.Reserved1 = 0;
1848	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1849	    &ioc->scsi_lookup_lock);
1850}
1851
1852/**
1853 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1854 * @ioc: per adapter object
1855 * @smid: system request message index
1856 *
1857 * Return nothing.
1858 */
1859void
1860mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1861{
1862	Mpi2RequestDescriptorUnion_t descriptor;
1863	u64 *request = (u64 *)&descriptor;
1864
1865	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1866	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
1867	descriptor.Default.SMID = cpu_to_le16(smid);
1868	descriptor.Default.LMID = 0;
1869	descriptor.Default.DescriptorTypeDependent = 0;
1870	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1871	    &ioc->scsi_lookup_lock);
1872}
1873
1874/**
1875 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1876 * @ioc: per adapter object
1877 * @smid: system request message index
1878 * @io_index: value used to track the IO
1879 *
1880 * Return nothing.
1881 */
1882void
1883mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1884    u16 io_index)
1885{
1886	Mpi2RequestDescriptorUnion_t descriptor;
1887	u64 *request = (u64 *)&descriptor;
1888
1889	descriptor.SCSITarget.RequestFlags =
1890	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1891	descriptor.SCSITarget.MSIxIndex =  _base_get_msix_index(ioc);
1892	descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1893	descriptor.SCSITarget.LMID = 0;
1894	descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1895	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1896	    &ioc->scsi_lookup_lock);
1897}
1898
1899/**
1900 * _base_display_dell_branding - Disply branding string
1901 * @ioc: per adapter object
1902 *
1903 * Return nothing.
1904 */
1905static void
1906_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1907{
1908	char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
1909
1910	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
1911		return;
1912
1913	memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
1914	switch (ioc->pdev->subsystem_device) {
1915	case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
1916		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
1917		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1918		break;
1919	case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
1920		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
1921		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1922		break;
1923	case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
1924		strncpy(dell_branding,
1925		    MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
1926		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1927		break;
1928	case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
1929		strncpy(dell_branding,
1930		    MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
1931		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1932		break;
1933	case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
1934		strncpy(dell_branding,
1935		    MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
1936		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1937		break;
1938	case MPT2SAS_DELL_PERC_H200_SSDID:
1939		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
1940		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1941		break;
1942	case MPT2SAS_DELL_6GBPS_SAS_SSDID:
1943		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
1944		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1945		break;
1946	default:
1947		sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
1948		break;
1949	}
1950
1951	printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
1952	    " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
1953	    ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
1954	    ioc->pdev->subsystem_device);
1955}
1956
1957/**
1958 * _base_display_intel_branding - Display branding string
1959 * @ioc: per adapter object
1960 *
1961 * Return nothing.
1962 */
1963static void
1964_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1965{
1966	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1967		return;
1968
1969	switch (ioc->pdev->device) {
1970	case MPI2_MFGPAGE_DEVID_SAS2008:
1971		switch (ioc->pdev->subsystem_device) {
1972		case MPT2SAS_INTEL_RMS2LL080_SSDID:
1973			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1974			    MPT2SAS_INTEL_RMS2LL080_BRANDING);
1975			break;
1976		case MPT2SAS_INTEL_RMS2LL040_SSDID:
1977			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1978			    MPT2SAS_INTEL_RMS2LL040_BRANDING);
1979			break;
1980		case MPT2SAS_INTEL_RAMSDALE_SSDID:
1981			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1982			    MPT2SAS_INTEL_RAMSDALE_BRANDING);
1983			break;
1984		default:
1985			break;
1986		}
1987	case MPI2_MFGPAGE_DEVID_SAS2308_2:
1988		switch (ioc->pdev->subsystem_device) {
1989		case MPT2SAS_INTEL_RS25GB008_SSDID:
1990			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1991			    MPT2SAS_INTEL_RS25GB008_BRANDING);
1992			break;
1993		case MPT2SAS_INTEL_RMS25JB080_SSDID:
1994			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1995			    MPT2SAS_INTEL_RMS25JB080_BRANDING);
1996			break;
1997		case MPT2SAS_INTEL_RMS25JB040_SSDID:
1998			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1999			    MPT2SAS_INTEL_RMS25JB040_BRANDING);
2000			break;
2001		case MPT2SAS_INTEL_RMS25KB080_SSDID:
2002			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2003			    MPT2SAS_INTEL_RMS25KB080_BRANDING);
2004			break;
2005		case MPT2SAS_INTEL_RMS25KB040_SSDID:
2006			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2007			    MPT2SAS_INTEL_RMS25KB040_BRANDING);
2008			break;
2009		default:
2010			break;
2011		}
2012	default:
2013		break;
2014	}
2015}
2016
2017/**
2018 * _base_display_hp_branding - Display branding string
2019 * @ioc: per adapter object
2020 *
2021 * Return nothing.
2022 */
2023static void
2024_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
2025{
2026	if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
2027		return;
2028
2029	switch (ioc->pdev->device) {
2030	case MPI2_MFGPAGE_DEVID_SAS2004:
2031		switch (ioc->pdev->subsystem_device) {
2032		case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2033			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2034			    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2035			break;
2036		default:
2037			break;
2038		}
2039	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2040		switch (ioc->pdev->subsystem_device) {
2041		case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2042			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2043			    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2044			break;
2045		case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2046			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2047			    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2048			break;
2049		case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2050			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2051			    MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2052			break;
2053		case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2054			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2055			    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2056			break;
2057		default:
2058			break;
2059		}
2060	default:
2061		break;
2062	}
2063}
2064
2065/**
2066 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2067 * @ioc: per adapter object
2068 *
2069 * Return nothing.
2070 */
2071static void
2072_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2073{
2074	int i = 0;
2075	char desc[16];
2076	u8 revision;
2077	u32 iounit_pg1_flags;
2078	u32 bios_version;
2079
2080	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2081	pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
2082	strncpy(desc, ioc->manu_pg0.ChipName, 16);
2083	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
2084	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2085	    ioc->name, desc,
2086	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2087	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2088	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2089	   ioc->facts.FWVersion.Word & 0x000000FF,
2090	   revision,
2091	   (bios_version & 0xFF000000) >> 24,
2092	   (bios_version & 0x00FF0000) >> 16,
2093	   (bios_version & 0x0000FF00) >> 8,
2094	    bios_version & 0x000000FF);
2095
2096	_base_display_dell_branding(ioc);
2097	_base_display_intel_branding(ioc);
2098	_base_display_hp_branding(ioc);
2099
2100	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
2101
2102	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2103		printk("Initiator");
2104		i++;
2105	}
2106
2107	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2108		printk("%sTarget", i ? "," : "");
2109		i++;
2110	}
2111
2112	i = 0;
2113	printk("), ");
2114	printk("Capabilities=(");
2115
2116	if (!ioc->hide_ir_msg) {
2117		if (ioc->facts.IOCCapabilities &
2118		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2119			printk("Raid");
2120			i++;
2121		}
2122	}
2123
2124	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2125		printk("%sTLR", i ? "," : "");
2126		i++;
2127	}
2128
2129	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2130		printk("%sMulticast", i ? "," : "");
2131		i++;
2132	}
2133
2134	if (ioc->facts.IOCCapabilities &
2135	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2136		printk("%sBIDI Target", i ? "," : "");
2137		i++;
2138	}
2139
2140	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2141		printk("%sEEDP", i ? "," : "");
2142		i++;
2143	}
2144
2145	if (ioc->facts.IOCCapabilities &
2146	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2147		printk("%sSnapshot Buffer", i ? "," : "");
2148		i++;
2149	}
2150
2151	if (ioc->facts.IOCCapabilities &
2152	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2153		printk("%sDiag Trace Buffer", i ? "," : "");
2154		i++;
2155	}
2156
2157	if (ioc->facts.IOCCapabilities &
2158	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2159		printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
2160		i++;
2161	}
2162
2163	if (ioc->facts.IOCCapabilities &
2164	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2165		printk("%sTask Set Full", i ? "," : "");
2166		i++;
2167	}
2168
2169	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2170	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2171		printk("%sNCQ", i ? "," : "");
2172		i++;
2173	}
2174
2175	printk(")\n");
2176}
2177
2178/**
2179 * _base_update_missing_delay - change the missing delay timers
2180 * @ioc: per adapter object
2181 * @device_missing_delay: amount of time till device is reported missing
2182 * @io_missing_delay: interval IO is returned when there is a missing device
2183 *
2184 * Return nothing.
2185 *
2186 * Passed on the command line, this function will modify the device missing
2187 * delay, as well as the io missing delay. This should be called at driver
2188 * load time.
2189 */
2190static void
2191_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2192	u16 device_missing_delay, u8 io_missing_delay)
2193{
2194	u16 dmd, dmd_new, dmd_orignal;
2195	u8 io_missing_delay_original;
2196	u16 sz;
2197	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2198	Mpi2ConfigReply_t mpi_reply;
2199	u8 num_phys = 0;
2200	u16 ioc_status;
2201
2202	mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
2203	if (!num_phys)
2204		return;
2205
2206	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2207	    sizeof(Mpi2SasIOUnit1PhyData_t));
2208	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2209	if (!sas_iounit_pg1) {
2210		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2211		    ioc->name, __FILE__, __LINE__, __func__);
2212		goto out;
2213	}
2214	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2215	    sas_iounit_pg1, sz))) {
2216		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2217		    ioc->name, __FILE__, __LINE__, __func__);
2218		goto out;
2219	}
2220	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2221	    MPI2_IOCSTATUS_MASK;
2222	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2223		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2224		    ioc->name, __FILE__, __LINE__, __func__);
2225		goto out;
2226	}
2227
2228	/* device missing delay */
2229	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2230	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2231		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2232	else
2233		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2234	dmd_orignal = dmd;
2235	if (device_missing_delay > 0x7F) {
2236		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2237		    device_missing_delay;
2238		dmd = dmd / 16;
2239		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2240	} else
2241		dmd = device_missing_delay;
2242	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2243
2244	/* io missing delay */
2245	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2246	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2247
2248	if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2249	    sz)) {
2250		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2251			dmd_new = (dmd &
2252			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2253		else
2254			dmd_new =
2255		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2256		printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
2257		    "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
2258		printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
2259		    "new(%d)\n", ioc->name, io_missing_delay_original,
2260		    io_missing_delay);
2261		ioc->device_missing_delay = dmd_new;
2262		ioc->io_missing_delay = io_missing_delay;
2263	}
2264
2265out:
2266	kfree(sas_iounit_pg1);
2267}
2268
2269/**
2270 * _base_static_config_pages - static start of day config pages
2271 * @ioc: per adapter object
2272 *
2273 * Return nothing.
2274 */
2275static void
2276_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
2277{
2278	Mpi2ConfigReply_t mpi_reply;
2279	u32 iounit_pg1_flags;
2280
2281	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2282	if (ioc->ir_firmware)
2283		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2284		    &ioc->manu_pg10);
2285	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2286	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2287	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2288	mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2289	mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2290	_base_display_ioc_capabilities(ioc);
2291
2292	/*
2293	 * Enable task_set_full handling in iounit_pg1 when the
2294	 * facts capabilities indicate that its supported.
2295	 */
2296	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2297	if ((ioc->facts.IOCCapabilities &
2298	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2299		iounit_pg1_flags &=
2300		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2301	else
2302		iounit_pg1_flags |=
2303		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2304	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2305	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2306
2307}
2308
2309/**
2310 * _base_release_memory_pools - release memory
2311 * @ioc: per adapter object
2312 *
2313 * Free memory allocated from _base_allocate_memory_pools.
2314 *
2315 * Return nothing.
2316 */
2317static void
2318_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2319{
2320	int i;
2321
2322	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2323	    __func__));
2324
2325	if (ioc->request) {
2326		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2327		    ioc->request,  ioc->request_dma);
2328		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
2329		    ": free\n", ioc->name, ioc->request));
2330		ioc->request = NULL;
2331	}
2332
2333	if (ioc->sense) {
2334		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2335		if (ioc->sense_dma_pool)
2336			pci_pool_destroy(ioc->sense_dma_pool);
2337		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
2338		    ": free\n", ioc->name, ioc->sense));
2339		ioc->sense = NULL;
2340	}
2341
2342	if (ioc->reply) {
2343		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2344		if (ioc->reply_dma_pool)
2345			pci_pool_destroy(ioc->reply_dma_pool);
2346		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
2347		     ": free\n", ioc->name, ioc->reply));
2348		ioc->reply = NULL;
2349	}
2350
2351	if (ioc->reply_free) {
2352		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2353		    ioc->reply_free_dma);
2354		if (ioc->reply_free_dma_pool)
2355			pci_pool_destroy(ioc->reply_free_dma_pool);
2356		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
2357		    "(0x%p): free\n", ioc->name, ioc->reply_free));
2358		ioc->reply_free = NULL;
2359	}
2360
2361	if (ioc->reply_post_free) {
2362		pci_pool_free(ioc->reply_post_free_dma_pool,
2363		    ioc->reply_post_free, ioc->reply_post_free_dma);
2364		if (ioc->reply_post_free_dma_pool)
2365			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2366		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2367		    "reply_post_free_pool(0x%p): free\n", ioc->name,
2368		    ioc->reply_post_free));
2369		ioc->reply_post_free = NULL;
2370	}
2371
2372	if (ioc->config_page) {
2373		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2374		    "config_page(0x%p): free\n", ioc->name,
2375		    ioc->config_page));
2376		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2377		    ioc->config_page, ioc->config_page_dma);
2378	}
2379
2380	if (ioc->scsi_lookup) {
2381		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2382		ioc->scsi_lookup = NULL;
2383	}
2384	kfree(ioc->hpr_lookup);
2385	kfree(ioc->internal_lookup);
2386	if (ioc->chain_lookup) {
2387		for (i = 0; i < ioc->chain_depth; i++) {
2388			if (ioc->chain_lookup[i].chain_buffer)
2389				pci_pool_free(ioc->chain_dma_pool,
2390				    ioc->chain_lookup[i].chain_buffer,
2391				    ioc->chain_lookup[i].chain_buffer_dma);
2392		}
2393		if (ioc->chain_dma_pool)
2394			pci_pool_destroy(ioc->chain_dma_pool);
2395		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2396		ioc->chain_lookup = NULL;
2397	}
2398}
2399
2400
2401/**
2402 * _base_allocate_memory_pools - allocate start of day memory pools
2403 * @ioc: per adapter object
2404 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2405 *
2406 * Returns 0 success, anything else error
2407 */
2408static int
2409_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
2410{
2411	struct mpt2sas_facts *facts;
2412	u16 max_sge_elements;
2413	u16 chains_needed_per_io;
2414	u32 sz, total_sz, reply_post_free_sz;
2415	u32 retry_sz;
2416	u16 max_request_credit;
2417	int i;
2418
2419	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2420	    __func__));
2421
2422	retry_sz = 0;
2423	facts = &ioc->facts;
2424
2425	/* command line tunables  for max sgl entries */
2426	if (max_sgl_entries != -1) {
2427		ioc->shost->sg_tablesize = (max_sgl_entries <
2428		    MPT2SAS_SG_DEPTH) ? max_sgl_entries :
2429		    MPT2SAS_SG_DEPTH;
2430	} else {
2431		ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
2432	}
2433
2434	/* command line tunables  for max controller queue depth */
2435	if (max_queue_depth != -1)
2436		max_request_credit = (max_queue_depth < facts->RequestCredit)
2437		    ? max_queue_depth : facts->RequestCredit;
2438	else
2439		max_request_credit = min_t(u16, facts->RequestCredit,
2440		    MAX_HBA_QUEUE_DEPTH);
2441
2442	ioc->hba_queue_depth = max_request_credit;
2443	ioc->hi_priority_depth = facts->HighPriorityCredit;
2444	ioc->internal_depth = ioc->hi_priority_depth + 5;
2445
2446	/* request frame size */
2447	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2448
2449	/* reply frame size */
2450	ioc->reply_sz = facts->ReplyFrameSize * 4;
2451
2452 retry_allocation:
2453	total_sz = 0;
2454	/* calculate number of sg elements left over in the 1st frame */
2455	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2456	    sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
2457	ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
2458
2459	/* now do the same for a chain buffer */
2460	max_sge_elements = ioc->request_sz - ioc->sge_size;
2461	ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
2462
2463	ioc->chain_offset_value_for_main_message =
2464	    ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
2465	     (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
2466
2467	/*
2468	 *  MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2469	 */
2470	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2471	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2472	    + 1;
2473	if (chains_needed_per_io > facts->MaxChainDepth) {
2474		chains_needed_per_io = facts->MaxChainDepth;
2475		ioc->shost->sg_tablesize = min_t(u16,
2476		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2477		* chains_needed_per_io), ioc->shost->sg_tablesize);
2478	}
2479	ioc->chains_needed_per_io = chains_needed_per_io;
2480
2481	/* reply free queue sizing - taking into account for 64 FW events */
2482	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2483
2484	/* align the reply post queue on the next 16 count boundary */
2485	if (!ioc->reply_free_queue_depth % 16)
2486		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
2487	else
2488		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
2489				32 - (ioc->reply_free_queue_depth % 16);
2490	if (ioc->reply_post_queue_depth >
2491	    facts->MaxReplyDescriptorPostQueueDepth) {
2492		ioc->reply_post_queue_depth = min_t(u16,
2493		    (facts->MaxReplyDescriptorPostQueueDepth -
2494		    (facts->MaxReplyDescriptorPostQueueDepth % 16)),
2495		    (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
2496		ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
2497		ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
2498	}
2499
2500
2501	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2502	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2503	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2504	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2505	    ioc->chains_needed_per_io));
2506
2507	ioc->scsiio_depth = ioc->hba_queue_depth -
2508	    ioc->hi_priority_depth - ioc->internal_depth;
2509
2510	/* set the scsi host can_queue depth
2511	 * with some internal commands that could be outstanding
2512	 */
2513	ioc->shost->can_queue = ioc->scsiio_depth - (2);
2514	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2515	    "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2516
2517	/* contiguous pool for request and chains, 16 byte align, one extra "
2518	 * "frame for smid=0
2519	 */
2520	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2521	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2522
2523	/* hi-priority queue */
2524	sz += (ioc->hi_priority_depth * ioc->request_sz);
2525
2526	/* internal queue */
2527	sz += (ioc->internal_depth * ioc->request_sz);
2528
2529	ioc->request_dma_sz = sz;
2530	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2531	if (!ioc->request) {
2532		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2533		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2534		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2535		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2536		if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
2537			goto out;
2538		retry_sz += 64;
2539		ioc->hba_queue_depth = max_request_credit - retry_sz;
2540		goto retry_allocation;
2541	}
2542
2543	if (retry_sz)
2544		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2545		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2546		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2547		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2548
2549
2550	/* hi-priority queue */
2551	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2552	    ioc->request_sz);
2553	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2554	    ioc->request_sz);
2555
2556	/* internal queue */
2557	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2558	    ioc->request_sz);
2559	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2560	    ioc->request_sz);
2561
2562
2563	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2564	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2565	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2566	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2567	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2568	    ioc->name, (unsigned long long) ioc->request_dma));
2569	total_sz += sz;
2570
2571	sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2572	ioc->scsi_lookup_pages = get_order(sz);
2573	ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2574	    GFP_KERNEL, ioc->scsi_lookup_pages);
2575	if (!ioc->scsi_lookup) {
2576		printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
2577		    "sz(%d)\n", ioc->name, (int)sz);
2578		goto out;
2579	}
2580
2581	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
2582	    "depth(%d)\n", ioc->name, ioc->request,
2583	    ioc->scsiio_depth));
2584
2585	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2586	sz = ioc->chain_depth * sizeof(struct chain_tracker);
2587	ioc->chain_pages = get_order(sz);
2588
2589	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2590	    GFP_KERNEL, ioc->chain_pages);
2591	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2592	    ioc->request_sz, 16, 0);
2593	if (!ioc->chain_dma_pool) {
2594		printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
2595		    "failed\n", ioc->name);
2596		goto out;
2597	}
2598	for (i = 0; i < ioc->chain_depth; i++) {
2599		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2600		    ioc->chain_dma_pool , GFP_KERNEL,
2601		    &ioc->chain_lookup[i].chain_buffer_dma);
2602		if (!ioc->chain_lookup[i].chain_buffer) {
2603			ioc->chain_depth = i;
2604			goto chain_done;
2605		}
2606		total_sz += ioc->request_sz;
2607	}
2608chain_done:
2609	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
2610	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2611	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2612	    ioc->request_sz))/1024));
2613
2614	/* initialize hi-priority queue smid's */
2615	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2616	    sizeof(struct request_tracker), GFP_KERNEL);
2617	if (!ioc->hpr_lookup) {
2618		printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2619		    ioc->name);
2620		goto out;
2621	}
2622	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2623	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2624	    "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2625	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2626
2627	/* initialize internal queue smid's */
2628	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2629	    sizeof(struct request_tracker), GFP_KERNEL);
2630	if (!ioc->internal_lookup) {
2631		printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2632		    ioc->name);
2633		goto out;
2634	}
2635	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2636	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2637	    "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2638	     ioc->internal_depth, ioc->internal_smid));
2639
2640	/* sense buffers, 4 byte align */
2641	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2642	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2643	    0);
2644	if (!ioc->sense_dma_pool) {
2645		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
2646		    ioc->name);
2647		goto out;
2648	}
2649	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2650	    &ioc->sense_dma);
2651	if (!ioc->sense) {
2652		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
2653		    ioc->name);
2654		goto out;
2655	}
2656	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2657	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2658	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2659	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2660	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
2661	    ioc->name, (unsigned long long)ioc->sense_dma));
2662	total_sz += sz;
2663
2664	/* reply pool, 4 byte align */
2665	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2666	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2667	    0);
2668	if (!ioc->reply_dma_pool) {
2669		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
2670		    ioc->name);
2671		goto out;
2672	}
2673	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2674	    &ioc->reply_dma);
2675	if (!ioc->reply) {
2676		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
2677		    ioc->name);
2678		goto out;
2679	}
2680	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2681	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2682	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2683	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2684	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2685	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
2686	    ioc->name, (unsigned long long)ioc->reply_dma));
2687	total_sz += sz;
2688
2689	/* reply free queue, 16 byte align */
2690	sz = ioc->reply_free_queue_depth * 4;
2691	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2692	    ioc->pdev, sz, 16, 0);
2693	if (!ioc->reply_free_dma_pool) {
2694		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
2695		    "failed\n", ioc->name);
2696		goto out;
2697	}
2698	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2699	    &ioc->reply_free_dma);
2700	if (!ioc->reply_free) {
2701		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
2702		    "failed\n", ioc->name);
2703		goto out;
2704	}
2705	memset(ioc->reply_free, 0, sz);
2706	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
2707	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2708	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2709	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
2710	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2711	total_sz += sz;
2712
2713	/* reply post queue, 16 byte align */
2714	reply_post_free_sz = ioc->reply_post_queue_depth *
2715	    sizeof(Mpi2DefaultReplyDescriptor_t);
2716	if (_base_is_controller_msix_enabled(ioc))
2717		sz = reply_post_free_sz * ioc->reply_queue_count;
2718	else
2719		sz = reply_post_free_sz;
2720	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2721	    ioc->pdev, sz, 16, 0);
2722	if (!ioc->reply_post_free_dma_pool) {
2723		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
2724		    "failed\n", ioc->name);
2725		goto out;
2726	}
2727	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2728	    GFP_KERNEL, &ioc->reply_post_free_dma);
2729	if (!ioc->reply_post_free) {
2730		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
2731		    "failed\n", ioc->name);
2732		goto out;
2733	}
2734	memset(ioc->reply_post_free, 0, sz);
2735	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
2736	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2737	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2738	    sz/1024));
2739	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
2740	    "(0x%llx)\n", ioc->name, (unsigned long long)
2741	    ioc->reply_post_free_dma));
2742	total_sz += sz;
2743
2744	ioc->config_page_sz = 512;
2745	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2746	    ioc->config_page_sz, &ioc->config_page_dma);
2747	if (!ioc->config_page) {
2748		printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
2749		    "failed\n", ioc->name);
2750		goto out;
2751	}
2752	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
2753	    "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
2754	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
2755	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
2756	total_sz += ioc->config_page_sz;
2757
2758	printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
2759	    ioc->name, total_sz/1024);
2760	printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
2761	    "Max Controller Queue Depth(%d)\n",
2762	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2763	printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
2764	    ioc->name, ioc->shost->sg_tablesize);
2765	return 0;
2766
2767 out:
2768	return -ENOMEM;
2769}
2770
2771
2772/**
2773 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
2774 * @ioc: Pointer to MPT_ADAPTER structure
2775 * @cooked: Request raw or cooked IOC state
2776 *
2777 * Returns all IOC Doorbell register bits if cooked==0, else just the
2778 * Doorbell bits in MPI_IOC_STATE_MASK.
2779 */
2780u32
2781mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
2782{
2783	u32 s, sc;
2784
2785	s = readl(&ioc->chip->Doorbell);
2786	sc = s & MPI2_IOC_STATE_MASK;
2787	return cooked ? sc : s;
2788}
2789
2790/**
2791 * _base_wait_on_iocstate - waiting on a particular ioc state
2792 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2793 * @timeout: timeout in second
2794 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2795 *
2796 * Returns 0 for success, non-zero for failure.
2797 */
2798static int
2799_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2800    int sleep_flag)
2801{
2802	u32 count, cntdn;
2803	u32 current_state;
2804
2805	count = 0;
2806	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2807	do {
2808		current_state = mpt2sas_base_get_iocstate(ioc, 1);
2809		if (current_state == ioc_state)
2810			return 0;
2811		if (count && current_state == MPI2_IOC_STATE_FAULT)
2812			break;
2813		if (sleep_flag == CAN_SLEEP)
2814			msleep(1);
2815		else
2816			udelay(500);
2817		count++;
2818	} while (--cntdn);
2819
2820	return current_state;
2821}
2822
2823/**
2824 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2825 * a write to the doorbell)
2826 * @ioc: per adapter object
2827 * @timeout: timeout in second
2828 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2829 *
2830 * Returns 0 for success, non-zero for failure.
2831 *
2832 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2833 */
2834static int
2835_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2836    int sleep_flag)
2837{
2838	u32 cntdn, count;
2839	u32 int_status;
2840
2841	count = 0;
2842	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2843	do {
2844		int_status = readl(&ioc->chip->HostInterruptStatus);
2845		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2846			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2847			    "successful count(%d), timeout(%d)\n", ioc->name,
2848			    __func__, count, timeout));
2849			return 0;
2850		}
2851		if (sleep_flag == CAN_SLEEP)
2852			msleep(1);
2853		else
2854			udelay(500);
2855		count++;
2856	} while (--cntdn);
2857
2858	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2859	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2860	return -EFAULT;
2861}
2862
2863/**
2864 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2865 * @ioc: per adapter object
2866 * @timeout: timeout in second
2867 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2868 *
2869 * Returns 0 for success, non-zero for failure.
2870 *
2871 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
2872 * doorbell.
2873 */
2874static int
2875_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2876    int sleep_flag)
2877{
2878	u32 cntdn, count;
2879	u32 int_status;
2880	u32 doorbell;
2881
2882	count = 0;
2883	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2884	do {
2885		int_status = readl(&ioc->chip->HostInterruptStatus);
2886		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2887			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2888			    "successful count(%d), timeout(%d)\n", ioc->name,
2889			    __func__, count, timeout));
2890			return 0;
2891		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2892			doorbell = readl(&ioc->chip->Doorbell);
2893			if ((doorbell & MPI2_IOC_STATE_MASK) ==
2894			    MPI2_IOC_STATE_FAULT) {
2895				mpt2sas_base_fault_info(ioc , doorbell);
2896				return -EFAULT;
2897			}
2898		} else if (int_status == 0xFFFFFFFF)
2899			goto out;
2900
2901		if (sleep_flag == CAN_SLEEP)
2902			msleep(1);
2903		else
2904			udelay(500);
2905		count++;
2906	} while (--cntdn);
2907
2908 out:
2909	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2910	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2911	return -EFAULT;
2912}
2913
2914/**
2915 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
2916 * @ioc: per adapter object
2917 * @timeout: timeout in second
2918 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2919 *
2920 * Returns 0 for success, non-zero for failure.
2921 *
2922 */
2923static int
2924_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2925    int sleep_flag)
2926{
2927	u32 cntdn, count;
2928	u32 doorbell_reg;
2929
2930	count = 0;
2931	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2932	do {
2933		doorbell_reg = readl(&ioc->chip->Doorbell);
2934		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2935			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2936			    "successful count(%d), timeout(%d)\n", ioc->name,
2937			    __func__, count, timeout));
2938			return 0;
2939		}
2940		if (sleep_flag == CAN_SLEEP)
2941			msleep(1);
2942		else
2943			udelay(500);
2944		count++;
2945	} while (--cntdn);
2946
2947	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2948	    "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
2949	return -EFAULT;
2950}
2951
2952/**
2953 * _base_send_ioc_reset - send doorbell reset
2954 * @ioc: per adapter object
2955 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
2956 * @timeout: timeout in second
2957 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2958 *
2959 * Returns 0 for success, non-zero for failure.
2960 */
2961static int
2962_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
2963    int sleep_flag)
2964{
2965	u32 ioc_state;
2966	int r = 0;
2967
2968	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
2969		printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
2970		    ioc->name, __func__);
2971		return -EFAULT;
2972	}
2973
2974	if (!(ioc->facts.IOCCapabilities &
2975	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
2976		return -EFAULT;
2977
2978	printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
2979
2980	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
2981	    &ioc->chip->Doorbell);
2982	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
2983		r = -EFAULT;
2984		goto out;
2985	}
2986	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
2987	    timeout, sleep_flag);
2988	if (ioc_state) {
2989		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
2990		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
2991		r = -EFAULT;
2992		goto out;
2993	}
2994 out:
2995	printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
2996	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
2997	return r;
2998}
2999
3000/**
3001 * _base_handshake_req_reply_wait - send request thru doorbell interface
3002 * @ioc: per adapter object
3003 * @request_bytes: request length
3004 * @request: pointer having request payload
3005 * @reply_bytes: reply length
3006 * @reply: pointer to reply payload
3007 * @timeout: timeout in second
3008 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3009 *
3010 * Returns 0 for success, non-zero for failure.
3011 */
3012static int
3013_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
3014    u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3015{
3016	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3017	int i;
3018	u8 failed;
3019	u16 dummy;
3020	__le32 *mfp;
3021
3022	/* make sure doorbell is not in use */
3023	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3024		printk(MPT2SAS_ERR_FMT "doorbell is in use "
3025		    " (line=%d)\n", ioc->name, __LINE__);
3026		return -EFAULT;
3027	}
3028
3029	/* clear pending doorbell interrupts from previous state changes */
3030	if (readl(&ioc->chip->HostInterruptStatus) &
3031	    MPI2_HIS_IOC2SYS_DB_STATUS)
3032		writel(0, &ioc->chip->HostInterruptStatus);
3033
3034	/* send message to ioc */
3035	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3036	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3037	    &ioc->chip->Doorbell);
3038
3039	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3040		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3041		   "int failed (line=%d)\n", ioc->name, __LINE__);
3042		return -EFAULT;
3043	}
3044	writel(0, &ioc->chip->HostInterruptStatus);
3045
3046	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3047		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3048		    "ack failed (line=%d)\n", ioc->name, __LINE__);
3049		return -EFAULT;
3050	}
3051
3052	/* send message 32-bits at a time */
3053	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3054		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3055		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3056			failed = 1;
3057	}
3058
3059	if (failed) {
3060		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3061		    "sending request failed (line=%d)\n", ioc->name, __LINE__);
3062		return -EFAULT;
3063	}
3064
3065	/* now wait for the reply */
3066	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3067		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3068		   "int failed (line=%d)\n", ioc->name, __LINE__);
3069		return -EFAULT;
3070	}
3071
3072	/* read the first two 16-bits, it gives the total length of the reply */
3073	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3074	    & MPI2_DOORBELL_DATA_MASK);
3075	writel(0, &ioc->chip->HostInterruptStatus);
3076	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3077		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3078		   "int failed (line=%d)\n", ioc->name, __LINE__);
3079		return -EFAULT;
3080	}
3081	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3082	    & MPI2_DOORBELL_DATA_MASK);
3083	writel(0, &ioc->chip->HostInterruptStatus);
3084
3085	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3086		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3087			printk(MPT2SAS_ERR_FMT "doorbell "
3088			    "handshake int failed (line=%d)\n", ioc->name,
3089			    __LINE__);
3090			return -EFAULT;
3091		}
3092		if (i >=  reply_bytes/2) /* overflow case */
3093			dummy = readl(&ioc->chip->Doorbell);
3094		else
3095			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3096			    & MPI2_DOORBELL_DATA_MASK);
3097		writel(0, &ioc->chip->HostInterruptStatus);
3098	}
3099
3100	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3101	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3102		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
3103		    " (line=%d)\n", ioc->name, __LINE__));
3104	}
3105	writel(0, &ioc->chip->HostInterruptStatus);
3106
3107	if (ioc->logging_level & MPT_DEBUG_INIT) {
3108		mfp = (__le32 *)reply;
3109		printk(KERN_INFO "\toffset:data\n");
3110		for (i = 0; i < reply_bytes/4; i++)
3111			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3112			    le32_to_cpu(mfp[i]));
3113	}
3114	return 0;
3115}
3116
3117/**
3118 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
3119 * @ioc: per adapter object
3120 * @mpi_reply: the reply payload from FW
3121 * @mpi_request: the request payload sent to FW
3122 *
3123 * The SAS IO Unit Control Request message allows the host to perform low-level
3124 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3125 * to obtain the IOC assigned device handles for a device if it has other
3126 * identifying information about the device, in addition allows the host to
3127 * remove IOC resources associated with the device.
3128 *
3129 * Returns 0 for success, non-zero for failure.
3130 */
3131int
3132mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
3133    Mpi2SasIoUnitControlReply_t *mpi_reply,
3134    Mpi2SasIoUnitControlRequest_t *mpi_request)
3135{
3136	u16 smid;
3137	u32 ioc_state;
3138	unsigned long timeleft;
3139	u8 issue_reset;
3140	int rc;
3141	void *request;
3142	u16 wait_state_count;
3143
3144	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3145	    __func__));
3146
3147	mutex_lock(&ioc->base_cmds.mutex);
3148
3149	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3150		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3151		    ioc->name, __func__);
3152		rc = -EAGAIN;
3153		goto out;
3154	}
3155
3156	wait_state_count = 0;
3157	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3158	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3159		if (wait_state_count++ == 10) {
3160			printk(MPT2SAS_ERR_FMT
3161			    "%s: failed due to ioc not operational\n",
3162			    ioc->name, __func__);
3163			rc = -EFAULT;
3164			goto out;
3165		}
3166		ssleep(1);
3167		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3168		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3169		    "operational state(count=%d)\n", ioc->name,
3170		    __func__, wait_state_count);
3171	}
3172
3173	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3174	if (!smid) {
3175		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3176		    ioc->name, __func__);
3177		rc = -EAGAIN;
3178		goto out;
3179	}
3180
3181	rc = 0;
3182	ioc->base_cmds.status = MPT2_CMD_PENDING;
3183	request = mpt2sas_base_get_msg_frame(ioc, smid);
3184	ioc->base_cmds.smid = smid;
3185	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3186	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3187	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3188		ioc->ioc_link_reset_in_progress = 1;
3189	init_completion(&ioc->base_cmds.done);
3190	mpt2sas_base_put_smid_default(ioc, smid);
3191	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3192	    msecs_to_jiffies(10000));
3193	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3194	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3195	    ioc->ioc_link_reset_in_progress)
3196		ioc->ioc_link_reset_in_progress = 0;
3197	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3198		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3199		    ioc->name, __func__);
3200		_debug_dump_mf(mpi_request,
3201		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3202		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3203			issue_reset = 1;
3204		goto issue_host_reset;
3205	}
3206	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3207		memcpy(mpi_reply, ioc->base_cmds.reply,
3208		    sizeof(Mpi2SasIoUnitControlReply_t));
3209	else
3210		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3211	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3212	goto out;
3213
3214 issue_host_reset:
3215	if (issue_reset)
3216		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3217		    FORCE_BIG_HAMMER);
3218	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3219	rc = -EFAULT;
3220 out:
3221	mutex_unlock(&ioc->base_cmds.mutex);
3222	return rc;
3223}
3224
3225
3226/**
3227 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
3228 * @ioc: per adapter object
3229 * @mpi_reply: the reply payload from FW
3230 * @mpi_request: the request payload sent to FW
3231 *
3232 * The SCSI Enclosure Processor request message causes the IOC to
3233 * communicate with SES devices to control LED status signals.
3234 *
3235 * Returns 0 for success, non-zero for failure.
3236 */
3237int
3238mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
3239    Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3240{
3241	u16 smid;
3242	u32 ioc_state;
3243	unsigned long timeleft;
3244	u8 issue_reset;
3245	int rc;
3246	void *request;
3247	u16 wait_state_count;
3248
3249	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3250	    __func__));
3251
3252	mutex_lock(&ioc->base_cmds.mutex);
3253
3254	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3255		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3256		    ioc->name, __func__);
3257		rc = -EAGAIN;
3258		goto out;
3259	}
3260
3261	wait_state_count = 0;
3262	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3263	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3264		if (wait_state_count++ == 10) {
3265			printk(MPT2SAS_ERR_FMT
3266			    "%s: failed due to ioc not operational\n",
3267			    ioc->name, __func__);
3268			rc = -EFAULT;
3269			goto out;
3270		}
3271		ssleep(1);
3272		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3273		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3274		    "operational state(count=%d)\n", ioc->name,
3275		    __func__, wait_state_count);
3276	}
3277
3278	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3279	if (!smid) {
3280		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3281		    ioc->name, __func__);
3282		rc = -EAGAIN;
3283		goto out;
3284	}
3285
3286	rc = 0;
3287	ioc->base_cmds.status = MPT2_CMD_PENDING;
3288	request = mpt2sas_base_get_msg_frame(ioc, smid);
3289	ioc->base_cmds.smid = smid;
3290	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3291	init_completion(&ioc->base_cmds.done);
3292	mpt2sas_base_put_smid_default(ioc, smid);
3293	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3294	    msecs_to_jiffies(10000));
3295	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3296		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3297		    ioc->name, __func__);
3298		_debug_dump_mf(mpi_request,
3299		    sizeof(Mpi2SepRequest_t)/4);
3300		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3301			issue_reset = 1;
3302		goto issue_host_reset;
3303	}
3304	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3305		memcpy(mpi_reply, ioc->base_cmds.reply,
3306		    sizeof(Mpi2SepReply_t));
3307	else
3308		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3309	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3310	goto out;
3311
3312 issue_host_reset:
3313	if (issue_reset)
3314		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3315		    FORCE_BIG_HAMMER);
3316	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3317	rc = -EFAULT;
3318 out:
3319	mutex_unlock(&ioc->base_cmds.mutex);
3320	return rc;
3321}
3322
3323/**
3324 * _base_get_port_facts - obtain port facts reply and save in ioc
3325 * @ioc: per adapter object
3326 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3327 *
3328 * Returns 0 for success, non-zero for failure.
3329 */
3330static int
3331_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3332{
3333	Mpi2PortFactsRequest_t mpi_request;
3334	Mpi2PortFactsReply_t mpi_reply;
3335	struct mpt2sas_port_facts *pfacts;
3336	int mpi_reply_sz, mpi_request_sz, r;
3337
3338	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3339	    __func__));
3340
3341	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3342	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3343	memset(&mpi_request, 0, mpi_request_sz);
3344	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3345	mpi_request.PortNumber = port;
3346	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3347	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3348
3349	if (r != 0) {
3350		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3351		    ioc->name, __func__, r);
3352		return r;
3353	}
3354
3355	pfacts = &ioc->pfacts[port];
3356	memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
3357	pfacts->PortNumber = mpi_reply.PortNumber;
3358	pfacts->VP_ID = mpi_reply.VP_ID;
3359	pfacts->VF_ID = mpi_reply.VF_ID;
3360	pfacts->MaxPostedCmdBuffers =
3361	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3362
3363	return 0;
3364}
3365
3366/**
3367 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3368 * @ioc: per adapter object
3369 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3370 *
3371 * Returns 0 for success, non-zero for failure.
3372 */
3373static int
3374_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3375{
3376	Mpi2IOCFactsRequest_t mpi_request;
3377	Mpi2IOCFactsReply_t mpi_reply;
3378	struct mpt2sas_facts *facts;
3379	int mpi_reply_sz, mpi_request_sz, r;
3380
3381	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3382	    __func__));
3383
3384	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3385	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3386	memset(&mpi_request, 0, mpi_request_sz);
3387	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3388	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3389	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3390
3391	if (r != 0) {
3392		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3393		    ioc->name, __func__, r);
3394		return r;
3395	}
3396
3397	facts = &ioc->facts;
3398	memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
3399	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3400	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3401	facts->VP_ID = mpi_reply.VP_ID;
3402	facts->VF_ID = mpi_reply.VF_ID;
3403	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3404	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3405	facts->WhoInit = mpi_reply.WhoInit;
3406	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3407	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3408	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3409	facts->MaxReplyDescriptorPostQueueDepth =
3410	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3411	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3412	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3413	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3414		ioc->ir_firmware = 1;
3415	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3416	facts->IOCRequestFrameSize =
3417	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3418	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3419	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3420	ioc->shost->max_id = -1;
3421	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3422	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3423	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3424	facts->HighPriorityCredit =
3425	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3426	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3427	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3428
3429	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
3430	    "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
3431	    facts->MaxChainDepth));
3432	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
3433	    "reply frame size(%d)\n", ioc->name,
3434	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3435	return 0;
3436}
3437
3438/**
3439 * _base_send_ioc_init - send ioc_init to firmware
3440 * @ioc: per adapter object
3441 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3442 *
3443 * Returns 0 for success, non-zero for failure.
3444 */
3445static int
3446_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3447{
3448	Mpi2IOCInitRequest_t mpi_request;
3449	Mpi2IOCInitReply_t mpi_reply;
3450	int r;
3451	struct timeval current_time;
3452	u16 ioc_status;
3453
3454	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3455	    __func__));
3456
3457	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3458	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3459	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3460	mpi_request.VF_ID = 0; /* TODO */
3461	mpi_request.VP_ID = 0;
3462	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3463	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3464
3465	if (_base_is_controller_msix_enabled(ioc))
3466		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3467	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3468	mpi_request.ReplyDescriptorPostQueueDepth =
3469	    cpu_to_le16(ioc->reply_post_queue_depth);
3470	mpi_request.ReplyFreeQueueDepth =
3471	    cpu_to_le16(ioc->reply_free_queue_depth);
3472
3473	mpi_request.SenseBufferAddressHigh =
3474	    cpu_to_le32((u64)ioc->sense_dma >> 32);
3475	mpi_request.SystemReplyAddressHigh =
3476	    cpu_to_le32((u64)ioc->reply_dma >> 32);
3477	mpi_request.SystemRequestFrameBaseAddress =
3478	    cpu_to_le64((u64)ioc->request_dma);
3479	mpi_request.ReplyFreeQueueAddress =
3480	    cpu_to_le64((u64)ioc->reply_free_dma);
3481	mpi_request.ReplyDescriptorPostQueueAddress =
3482	    cpu_to_le64((u64)ioc->reply_post_free_dma);
3483
3484
3485	/* This time stamp specifies number of milliseconds
3486	 * since epoch ~ midnight January 1, 1970.
3487	 */
3488	do_gettimeofday(&current_time);
3489	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3490	    (current_time.tv_usec / 1000));
3491
3492	if (ioc->logging_level & MPT_DEBUG_INIT) {
3493		__le32 *mfp;
3494		int i;
3495
3496		mfp = (__le32 *)&mpi_request;
3497		printk(KERN_INFO "\toffset:data\n");
3498		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3499			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3500			    le32_to_cpu(mfp[i]));
3501	}
3502
3503	r = _base_handshake_req_reply_wait(ioc,
3504	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3505	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3506	    sleep_flag);
3507
3508	if (r != 0) {
3509		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3510		    ioc->name, __func__, r);
3511		return r;
3512	}
3513
3514	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3515	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3516	    mpi_reply.IOCLogInfo) {
3517		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
3518		r = -EIO;
3519	}
3520
3521	return 0;
3522}
3523
3524/**
3525 * mpt2sas_port_enable_done - command completion routine for port enable
3526 * @ioc: per adapter object
3527 * @smid: system request message index
3528 * @msix_index: MSIX table index supplied by the OS
3529 * @reply: reply message frame(lower 32bit addr)
3530 *
3531 * Return 1 meaning mf should be freed from _base_interrupt
3532 *        0 means the mf is freed from this function.
3533 */
3534u8
3535mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3536	u32 reply)
3537{
3538	MPI2DefaultReply_t *mpi_reply;
3539	u16 ioc_status;
3540
3541	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3542	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
3543		return 1;
3544
3545	if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
3546		return 1;
3547
3548	ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
3549	if (mpi_reply) {
3550		ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
3551		memcpy(ioc->port_enable_cmds.reply, mpi_reply,
3552		    mpi_reply->MsgLength*4);
3553	}
3554	ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
3555
3556	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3557
3558	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3559		ioc->port_enable_failed = 1;
3560
3561	if (ioc->is_driver_loading) {
3562		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3563			mpt2sas_port_enable_complete(ioc);
3564			return 1;
3565		} else {
3566			ioc->start_scan_failed = ioc_status;
3567			ioc->start_scan = 0;
3568			return 1;
3569		}
3570	}
3571	complete(&ioc->port_enable_cmds.done);
3572	return 1;
3573}
3574
3575
3576/**
3577 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3578 * @ioc: per adapter object
3579 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3580 *
3581 * Returns 0 for success, non-zero for failure.
3582 */
3583static int
3584_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3585{
3586	Mpi2PortEnableRequest_t *mpi_request;
3587	Mpi2PortEnableReply_t *mpi_reply;
3588	unsigned long timeleft;
3589	int r = 0;
3590	u16 smid;
3591	u16 ioc_status;
3592
3593	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3594
3595	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3596		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3597		    ioc->name, __func__);
3598		return -EAGAIN;
3599	}
3600
3601	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3602	if (!smid) {
3603		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3604		    ioc->name, __func__);
3605		return -EAGAIN;
3606	}
3607
3608	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3609	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3610	ioc->port_enable_cmds.smid = smid;
3611	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3612	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3613
3614	init_completion(&ioc->port_enable_cmds.done);
3615	mpt2sas_base_put_smid_default(ioc, smid);
3616	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3617	    300*HZ);
3618	if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
3619		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3620		    ioc->name, __func__);
3621		_debug_dump_mf(mpi_request,
3622		    sizeof(Mpi2PortEnableRequest_t)/4);
3623		if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
3624			r = -EFAULT;
3625		else
3626			r = -ETIME;
3627		goto out;
3628	}
3629	mpi_reply = ioc->port_enable_cmds.reply;
3630
3631	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3632	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3633		printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
3634		    ioc->name, __func__, ioc_status);
3635		r = -EFAULT;
3636		goto out;
3637	}
3638 out:
3639	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
3640	printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3641	    "SUCCESS" : "FAILED"));
3642	return r;
3643}
3644
3645/**
3646 * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
3647 * @ioc: per adapter object
3648 *
3649 * Returns 0 for success, non-zero for failure.
3650 */
3651int
3652mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
3653{
3654	Mpi2PortEnableRequest_t *mpi_request;
3655	u16 smid;
3656
3657	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3658
3659	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3660		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3661		    ioc->name, __func__);
3662		return -EAGAIN;
3663	}
3664
3665	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3666	if (!smid) {
3667		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3668		    ioc->name, __func__);
3669		return -EAGAIN;
3670	}
3671
3672	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3673	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3674	ioc->port_enable_cmds.smid = smid;
3675	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3676	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3677
3678	mpt2sas_base_put_smid_default(ioc, smid);
3679	return 0;
3680}
3681
3682/**
3683 * _base_determine_wait_on_discovery - desposition
3684 * @ioc: per adapter object
3685 *
3686 * Decide whether to wait on discovery to complete. Used to either
3687 * locate boot device, or report volumes ahead of physical devices.
3688 *
3689 * Returns 1 for wait, 0 for don't wait
3690 */
3691static int
3692_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
3693{
3694	/* We wait for discovery to complete if IR firmware is loaded.
3695	 * The sas topology events arrive before PD events, so we need time to
3696	 * turn on the bit in ioc->pd_handles to indicate PD
3697	 * Also, it maybe required to report Volumes ahead of physical
3698	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3699	 */
3700	if (ioc->ir_firmware)
3701		return 1;
3702
3703	/* if no Bios, then we don't need to wait */
3704	if (!ioc->bios_pg3.BiosVersion)
3705		return 0;
3706
3707	/* Bios is present, then we drop down here.
3708	 *
3709	 * If there any entries in the Bios Page 2, then we wait
3710	 * for discovery to complete.
3711	 */
3712
3713	/* Current Boot Device */
3714	if ((ioc->bios_pg2.CurrentBootDeviceForm &
3715	    MPI2_BIOSPAGE2_FORM_MASK) ==
3716	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3717	/* Request Boot Device */
3718	   (ioc->bios_pg2.ReqBootDeviceForm &
3719	    MPI2_BIOSPAGE2_FORM_MASK) ==
3720	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3721	/* Alternate Request Boot Device */
3722	   (ioc->bios_pg2.ReqAltBootDeviceForm &
3723	    MPI2_BIOSPAGE2_FORM_MASK) ==
3724	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3725		return 0;
3726
3727	return 1;
3728}
3729
3730
3731/**
3732 * _base_unmask_events - turn on notification for this event
3733 * @ioc: per adapter object
3734 * @event: firmware event
3735 *
3736 * The mask is stored in ioc->event_masks.
3737 */
3738static void
3739_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
3740{
3741	u32 desired_event;
3742
3743	if (event >= 128)
3744		return;
3745
3746	desired_event = (1 << (event % 32));
3747
3748	if (event < 32)
3749		ioc->event_masks[0] &= ~desired_event;
3750	else if (event < 64)
3751		ioc->event_masks[1] &= ~desired_event;
3752	else if (event < 96)
3753		ioc->event_masks[2] &= ~desired_event;
3754	else if (event < 128)
3755		ioc->event_masks[3] &= ~desired_event;
3756}
3757
3758/**
3759 * _base_event_notification - send event notification
3760 * @ioc: per adapter object
3761 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3762 *
3763 * Returns 0 for success, non-zero for failure.
3764 */
3765static int
3766_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3767{
3768	Mpi2EventNotificationRequest_t *mpi_request;
3769	unsigned long timeleft;
3770	u16 smid;
3771	int r = 0;
3772	int i;
3773
3774	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3775	    __func__));
3776
3777	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3778		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3779		    ioc->name, __func__);
3780		return -EAGAIN;
3781	}
3782
3783	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3784	if (!smid) {
3785		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3786		    ioc->name, __func__);
3787		return -EAGAIN;
3788	}
3789	ioc->base_cmds.status = MPT2_CMD_PENDING;
3790	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3791	ioc->base_cmds.smid = smid;
3792	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3793	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3794	mpi_request->VF_ID = 0; /* TODO */
3795	mpi_request->VP_ID = 0;
3796	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3797		mpi_request->EventMasks[i] =
3798		    cpu_to_le32(ioc->event_masks[i]);
3799	init_completion(&ioc->base_cmds.done);
3800	mpt2sas_base_put_smid_default(ioc, smid);
3801	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3802	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3803		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3804		    ioc->name, __func__);
3805		_debug_dump_mf(mpi_request,
3806		    sizeof(Mpi2EventNotificationRequest_t)/4);
3807		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3808			r = -EFAULT;
3809		else
3810			r = -ETIME;
3811	} else
3812		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3813		    ioc->name, __func__));
3814	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3815	return r;
3816}
3817
3818/**
3819 * mpt2sas_base_validate_event_type - validating event types
3820 * @ioc: per adapter object
3821 * @event: firmware event
3822 *
3823 * This will turn on firmware event notification when application
3824 * ask for that event. We don't mask events that are already enabled.
3825 */
3826void
3827mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
3828{
3829	int i, j;
3830	u32 event_mask, desired_event;
3831	u8 send_update_to_fw;
3832
3833	for (i = 0, send_update_to_fw = 0; i <
3834	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
3835		event_mask = ~event_type[i];
3836		desired_event = 1;
3837		for (j = 0; j < 32; j++) {
3838			if (!(event_mask & desired_event) &&
3839			    (ioc->event_masks[i] & desired_event)) {
3840				ioc->event_masks[i] &= ~desired_event;
3841				send_update_to_fw = 1;
3842			}
3843			desired_event = (desired_event << 1);
3844		}
3845	}
3846
3847	if (!send_update_to_fw)
3848		return;
3849
3850	mutex_lock(&ioc->base_cmds.mutex);
3851	_base_event_notification(ioc, CAN_SLEEP);
3852	mutex_unlock(&ioc->base_cmds.mutex);
3853}
3854
3855/**
3856 * _base_diag_reset - the "big hammer" start of day reset
3857 * @ioc: per adapter object
3858 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3859 *
3860 * Returns 0 for success, non-zero for failure.
3861 */
3862static int
3863_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3864{
3865	u32 host_diagnostic;
3866	u32 ioc_state;
3867	u32 count;
3868	u32 hcb_size;
3869
3870	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3871	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3872	    ioc->name));
3873
3874	count = 0;
3875	do {
3876		/* Write magic sequence to WriteSequence register
3877		 * Loop until in diagnostic mode
3878		 */
3879		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
3880		    "sequence\n", ioc->name));
3881		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3882		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
3883		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
3884		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
3885		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
3886		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
3887		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
3888
3889		/* wait 100 msec */
3890		if (sleep_flag == CAN_SLEEP)
3891			msleep(100);
3892		else
3893			mdelay(100);
3894
3895		if (count++ > 20)
3896			goto out;
3897
3898		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3899		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
3900		    "sequence: count(%d), host_diagnostic(0x%08x)\n",
3901		    ioc->name, count, host_diagnostic));
3902
3903	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
3904
3905	hcb_size = readl(&ioc->chip->HCBSize);
3906
3907	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
3908	    ioc->name));
3909	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
3910	     &ioc->chip->HostDiagnostic);
3911
3912	/* don't access any registers for 50 milliseconds */
3913	msleep(50);
3914
3915	/* 300 second max wait */
3916	for (count = 0; count < 3000000 ; count++) {
3917
3918		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3919
3920		if (host_diagnostic == 0xFFFFFFFF)
3921			goto out;
3922		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
3923			break;
3924
3925		/* wait 100 msec */
3926		if (sleep_flag == CAN_SLEEP)
3927			msleep(1);
3928		else
3929			mdelay(1);
3930	}
3931
3932	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
3933
3934		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
3935		    "assuming the HCB Address points to good F/W\n",
3936		    ioc->name));
3937		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
3938		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
3939		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
3940
3941		drsprintk(ioc, printk(MPT2SAS_INFO_FMT
3942		    "re-enable the HCDW\n", ioc->name));
3943		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
3944		    &ioc->chip->HCBSize);
3945	}
3946
3947	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
3948	    ioc->name));
3949	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
3950	    &ioc->chip->HostDiagnostic);
3951
3952	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
3953	    "diagnostic register\n", ioc->name));
3954	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3955
3956	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
3957	    "READY state\n", ioc->name));
3958	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
3959	    sleep_flag);
3960	if (ioc_state) {
3961		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3962		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3963		goto out;
3964	}
3965
3966	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
3967	return 0;
3968
3969 out:
3970	printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
3971	return -EFAULT;
3972}
3973
3974/**
3975 * _base_make_ioc_ready - put controller in READY state
3976 * @ioc: per adapter object
3977 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3978 * @type: FORCE_BIG_HAMMER or SOFT_RESET
3979 *
3980 * Returns 0 for success, non-zero for failure.
3981 */
3982static int
3983_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3984    enum reset_type type)
3985{
3986	u32 ioc_state;
3987	int rc;
3988
3989	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3990	    __func__));
3991
3992	if (ioc->pci_error_recovery)
3993		return 0;
3994
3995	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3996	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
3997	    ioc->name, __func__, ioc_state));
3998
3999	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4000		return 0;
4001
4002	if (ioc_state & MPI2_DOORBELL_USED) {
4003		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
4004		    "active!\n", ioc->name));
4005		goto issue_diag_reset;
4006	}
4007
4008	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4009		mpt2sas_base_fault_info(ioc, ioc_state &
4010		    MPI2_DOORBELL_DATA_MASK);
4011		goto issue_diag_reset;
4012	}
4013
4014	if (type == FORCE_BIG_HAMMER)
4015		goto issue_diag_reset;
4016
4017	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4018		if (!(_base_send_ioc_reset(ioc,
4019		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4020			ioc->ioc_reset_count++;
4021			return 0;
4022	}
4023
4024 issue_diag_reset:
4025	rc = _base_diag_reset(ioc, CAN_SLEEP);
4026	ioc->ioc_reset_count++;
4027	return rc;
4028}
4029
4030/**
4031 * _base_make_ioc_operational - put controller in OPERATIONAL state
4032 * @ioc: per adapter object
4033 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4034 *
4035 * Returns 0 for success, non-zero for failure.
4036 */
4037static int
4038_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4039{
4040	int r, i;
4041	unsigned long	flags;
4042	u32 reply_address;
4043	u16 smid;
4044	struct _tr_list *delayed_tr, *delayed_tr_next;
4045	u8 hide_flag;
4046	struct adapter_reply_queue *reply_q;
4047	long reply_post_free;
4048	u32 reply_post_free_sz;
4049
4050	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4051	    __func__));
4052
4053	/* clean the delayed target reset list */
4054	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4055	    &ioc->delayed_tr_list, list) {
4056		list_del(&delayed_tr->list);
4057		kfree(delayed_tr);
4058	}
4059
4060	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4061	    &ioc->delayed_tr_volume_list, list) {
4062		list_del(&delayed_tr->list);
4063		kfree(delayed_tr);
4064	}
4065
4066	/* initialize the scsi lookup free list */
4067	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4068	INIT_LIST_HEAD(&ioc->free_list);
4069	smid = 1;
4070	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4071		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4072		ioc->scsi_lookup[i].cb_idx = 0xFF;
4073		ioc->scsi_lookup[i].smid = smid;
4074		ioc->scsi_lookup[i].scmd = NULL;
4075		ioc->scsi_lookup[i].direct_io = 0;
4076		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4077		    &ioc->free_list);
4078	}
4079
4080	/* hi-priority queue */
4081	INIT_LIST_HEAD(&ioc->hpr_free_list);
4082	smid = ioc->hi_priority_smid;
4083	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4084		ioc->hpr_lookup[i].cb_idx = 0xFF;
4085		ioc->hpr_lookup[i].smid = smid;
4086		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4087		    &ioc->hpr_free_list);
4088	}
4089
4090	/* internal queue */
4091	INIT_LIST_HEAD(&ioc->internal_free_list);
4092	smid = ioc->internal_smid;
4093	for (i = 0; i < ioc->internal_depth; i++, smid++) {
4094		ioc->internal_lookup[i].cb_idx = 0xFF;
4095		ioc->internal_lookup[i].smid = smid;
4096		list_add_tail(&ioc->internal_lookup[i].tracker_list,
4097		    &ioc->internal_free_list);
4098	}
4099
4100	/* chain pool */
4101	INIT_LIST_HEAD(&ioc->free_chain_list);
4102	for (i = 0; i < ioc->chain_depth; i++)
4103		list_add_tail(&ioc->chain_lookup[i].tracker_list,
4104		    &ioc->free_chain_list);
4105
4106	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4107
4108	/* initialize Reply Free Queue */
4109	for (i = 0, reply_address = (u32)ioc->reply_dma ;
4110	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
4111	    ioc->reply_sz)
4112		ioc->reply_free[i] = cpu_to_le32(reply_address);
4113
4114	/* initialize reply queues */
4115	_base_assign_reply_queues(ioc);
4116
4117	/* initialize Reply Post Free Queue */
4118	reply_post_free = (long)ioc->reply_post_free;
4119	reply_post_free_sz = ioc->reply_post_queue_depth *
4120	    sizeof(Mpi2DefaultReplyDescriptor_t);
4121	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4122		reply_q->reply_post_host_index = 0;
4123		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4124		    reply_post_free;
4125		for (i = 0; i < ioc->reply_post_queue_depth; i++)
4126			reply_q->reply_post_free[i].Words =
4127							cpu_to_le64(ULLONG_MAX);
4128		if (!_base_is_controller_msix_enabled(ioc))
4129			goto skip_init_reply_post_free_queue;
4130		reply_post_free += reply_post_free_sz;
4131	}
4132 skip_init_reply_post_free_queue:
4133
4134	r = _base_send_ioc_init(ioc, sleep_flag);
4135	if (r)
4136		return r;
4137
4138	/* initialize reply free host index */
4139	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4140	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4141
4142	/* initialize reply post host index */
4143	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4144		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4145		    &ioc->chip->ReplyPostHostIndex);
4146		if (!_base_is_controller_msix_enabled(ioc))
4147			goto skip_init_reply_post_host_index;
4148	}
4149
4150 skip_init_reply_post_host_index:
4151
4152	_base_unmask_interrupts(ioc);
4153
4154	r = _base_event_notification(ioc, sleep_flag);
4155	if (r)
4156		return r;
4157
4158	if (sleep_flag == CAN_SLEEP)
4159		_base_static_config_pages(ioc);
4160
4161
4162	if (ioc->is_driver_loading) {
4163
4164
4165
4166		ioc->wait_for_discovery_to_complete =
4167		    _base_determine_wait_on_discovery(ioc);
4168		return r; /* scan_start and scan_finished support */
4169	}
4170
4171
4172	if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
4173		if (ioc->manu_pg10.OEMIdentifier  == 0x80) {
4174			hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
4175			    MFG_PAGE10_HIDE_SSDS_MASK);
4176			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4177				ioc->mfg_pg10_hide_flag = hide_flag;
4178		}
4179	}
4180
4181	r = _base_send_port_enable(ioc, sleep_flag);
4182	if (r)
4183		return r;
4184
4185	return r;
4186}
4187
4188/**
4189 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
4190 * @ioc: per adapter object
4191 *
4192 * Return nothing.
4193 */
4194void
4195mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4196{
4197	struct pci_dev *pdev = ioc->pdev;
4198
4199	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4200	    __func__));
4201
4202	_base_mask_interrupts(ioc);
4203	ioc->shost_recovery = 1;
4204	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4205	ioc->shost_recovery = 0;
4206	_base_free_irq(ioc);
4207	_base_disable_msix(ioc);
4208	if (ioc->chip_phys)
4209		iounmap(ioc->chip);
4210	ioc->chip_phys = 0;
4211	pci_release_selected_regions(ioc->pdev, ioc->bars);
4212	pci_disable_pcie_error_reporting(pdev);
4213	pci_disable_device(pdev);
4214	return;
4215}
4216
4217/**
4218 * mpt2sas_base_attach - attach controller instance
4219 * @ioc: per adapter object
4220 *
4221 * Returns 0 for success, non-zero for failure.
4222 */
4223int
4224mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4225{
4226	int r, i;
4227	int cpu_id, last_cpu_id = 0;
4228
4229	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4230	    __func__));
4231
4232	/* setup cpu_msix_table */
4233	ioc->cpu_count = num_online_cpus();
4234	for_each_online_cpu(cpu_id)
4235		last_cpu_id = cpu_id;
4236	ioc->cpu_msix_table_sz = last_cpu_id + 1;
4237	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4238	ioc->reply_queue_count = 1;
4239	if (!ioc->cpu_msix_table) {
4240		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
4241		    "cpu_msix_table failed!!!\n", ioc->name));
4242		r = -ENOMEM;
4243		goto out_free_resources;
4244	}
4245
4246	if (ioc->is_warpdrive) {
4247		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
4248		    sizeof(resource_size_t *), GFP_KERNEL);
4249		if (!ioc->reply_post_host_index) {
4250			dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
4251				"for cpu_msix_table failed!!!\n", ioc->name));
4252			r = -ENOMEM;
4253			goto out_free_resources;
4254		}
4255	}
4256
4257	r = mpt2sas_base_map_resources(ioc);
4258	if (r)
4259		return r;
4260
4261	if (ioc->is_warpdrive) {
4262		ioc->reply_post_host_index[0] =
4263		    (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
4264
4265		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
4266			ioc->reply_post_host_index[i] = (resource_size_t *)
4267			((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
4268			* 4)));
4269	}
4270
4271	pci_set_drvdata(ioc->pdev, ioc->shost);
4272	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4273	if (r)
4274		goto out_free_resources;
4275
4276	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4277	if (r)
4278		goto out_free_resources;
4279
4280	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4281	    sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
4282	if (!ioc->pfacts) {
4283		r = -ENOMEM;
4284		goto out_free_resources;
4285	}
4286
4287	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4288		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4289		if (r)
4290			goto out_free_resources;
4291	}
4292
4293	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4294	if (r)
4295		goto out_free_resources;
4296
4297	init_waitqueue_head(&ioc->reset_wq);
4298
4299	/* allocate memory pd handle bitmask list */
4300	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4301	if (ioc->facts.MaxDevHandle % 8)
4302		ioc->pd_handles_sz++;
4303	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4304	    GFP_KERNEL);
4305	if (!ioc->pd_handles) {
4306		r = -ENOMEM;
4307		goto out_free_resources;
4308	}
4309
4310	ioc->fwfault_debug = mpt2sas_fwfault_debug;
4311
4312	/* base internal command bits */
4313	mutex_init(&ioc->base_cmds.mutex);
4314	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4315	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
4316
4317	/* port_enable command bits */
4318	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4319	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
4320
4321	/* transport internal command bits */
4322	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4323	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
4324	mutex_init(&ioc->transport_cmds.mutex);
4325
4326	/* scsih internal command bits */
4327	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4328	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
4329	mutex_init(&ioc->scsih_cmds.mutex);
4330
4331	/* task management internal command bits */
4332	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4333	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4334	mutex_init(&ioc->tm_cmds.mutex);
4335
4336	/* config page internal command bits */
4337	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4338	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
4339	mutex_init(&ioc->config_cmds.mutex);
4340
4341	/* ctl module internal command bits */
4342	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4343	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4344	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
4345	mutex_init(&ioc->ctl_cmds.mutex);
4346
4347	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4348	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4349	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4350	    !ioc->ctl_cmds.sense) {
4351		r = -ENOMEM;
4352		goto out_free_resources;
4353	}
4354
4355	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4356	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4357	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
4358		r = -ENOMEM;
4359		goto out_free_resources;
4360	}
4361
4362	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4363		ioc->event_masks[i] = -1;
4364
4365	/* here we enable the events we care about */
4366	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4367	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4368	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4369	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4370	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4371	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4372	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4373	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4374	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4375	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4376	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4377	if (r)
4378		goto out_free_resources;
4379
4380	if (missing_delay[0] != -1 && missing_delay[1] != -1)
4381		_base_update_missing_delay(ioc, missing_delay[0],
4382		    missing_delay[1]);
4383
4384	return 0;
4385
4386 out_free_resources:
4387
4388	ioc->remove_host = 1;
4389	mpt2sas_base_free_resources(ioc);
4390	_base_release_memory_pools(ioc);
4391	pci_set_drvdata(ioc->pdev, NULL);
4392	kfree(ioc->cpu_msix_table);
4393	if (ioc->is_warpdrive)
4394		kfree(ioc->reply_post_host_index);
4395	kfree(ioc->pd_handles);
4396	kfree(ioc->tm_cmds.reply);
4397	kfree(ioc->transport_cmds.reply);
4398	kfree(ioc->scsih_cmds.reply);
4399	kfree(ioc->config_cmds.reply);
4400	kfree(ioc->base_cmds.reply);
4401	kfree(ioc->port_enable_cmds.reply);
4402	kfree(ioc->ctl_cmds.reply);
4403	kfree(ioc->ctl_cmds.sense);
4404	kfree(ioc->pfacts);
4405	ioc->ctl_cmds.reply = NULL;
4406	ioc->base_cmds.reply = NULL;
4407	ioc->tm_cmds.reply = NULL;
4408	ioc->scsih_cmds.reply = NULL;
4409	ioc->transport_cmds.reply = NULL;
4410	ioc->config_cmds.reply = NULL;
4411	ioc->pfacts = NULL;
4412	return r;
4413}
4414
4415
4416/**
4417 * mpt2sas_base_detach - remove controller instance
4418 * @ioc: per adapter object
4419 *
4420 * Return nothing.
4421 */
4422void
4423mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4424{
4425
4426	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4427	    __func__));
4428
4429	mpt2sas_base_stop_watchdog(ioc);
4430	mpt2sas_base_free_resources(ioc);
4431	_base_release_memory_pools(ioc);
4432	pci_set_drvdata(ioc->pdev, NULL);
4433	kfree(ioc->cpu_msix_table);
4434	if (ioc->is_warpdrive)
4435		kfree(ioc->reply_post_host_index);
4436	kfree(ioc->pd_handles);
4437	kfree(ioc->pfacts);
4438	kfree(ioc->ctl_cmds.reply);
4439	kfree(ioc->ctl_cmds.sense);
4440	kfree(ioc->base_cmds.reply);
4441	kfree(ioc->port_enable_cmds.reply);
4442	kfree(ioc->tm_cmds.reply);
4443	kfree(ioc->transport_cmds.reply);
4444	kfree(ioc->scsih_cmds.reply);
4445	kfree(ioc->config_cmds.reply);
4446}
4447
4448/**
4449 * _base_reset_handler - reset callback handler (for base)
4450 * @ioc: per adapter object
4451 * @reset_phase: phase
4452 *
4453 * The handler for doing any required cleanup or initialization.
4454 *
4455 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
4456 * MPT2_IOC_DONE_RESET
4457 *
4458 * Return nothing.
4459 */
4460static void
4461_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
4462{
4463	mpt2sas_scsih_reset_handler(ioc, reset_phase);
4464	mpt2sas_ctl_reset_handler(ioc, reset_phase);
4465	switch (reset_phase) {
4466	case MPT2_IOC_PRE_RESET:
4467		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4468		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
4469		break;
4470	case MPT2_IOC_AFTER_RESET:
4471		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4472		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
4473		if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
4474			ioc->transport_cmds.status |= MPT2_CMD_RESET;
4475			mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4476			complete(&ioc->transport_cmds.done);
4477		}
4478		if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
4479			ioc->base_cmds.status |= MPT2_CMD_RESET;
4480			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
4481			complete(&ioc->base_cmds.done);
4482		}
4483		if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
4484			ioc->port_enable_failed = 1;
4485			ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
4486			mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4487			if (ioc->is_driver_loading) {
4488				ioc->start_scan_failed =
4489				    MPI2_IOCSTATUS_INTERNAL_ERROR;
4490				ioc->start_scan = 0;
4491				ioc->port_enable_cmds.status =
4492						MPT2_CMD_NOT_USED;
4493			} else
4494				complete(&ioc->port_enable_cmds.done);
4495
4496		}
4497		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
4498			ioc->config_cmds.status |= MPT2_CMD_RESET;
4499			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
4500			ioc->config_cmds.smid = USHRT_MAX;
4501			complete(&ioc->config_cmds.done);
4502		}
4503		break;
4504	case MPT2_IOC_DONE_RESET:
4505		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4506		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
4507		break;
4508	}
4509}
4510
4511/**
4512 * _wait_for_commands_to_complete - reset controller
4513 * @ioc: Pointer to MPT_ADAPTER structure
4514 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4515 *
4516 * This function waiting(3s) for all pending commands to complete
4517 * prior to putting controller in reset.
4518 */
4519static void
4520_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4521{
4522	u32 ioc_state;
4523	unsigned long flags;
4524	u16 i;
4525
4526	ioc->pending_io_count = 0;
4527	if (sleep_flag != CAN_SLEEP)
4528		return;
4529
4530	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4531	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4532		return;
4533
4534	/* pending command count */
4535	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4536	for (i = 0; i < ioc->scsiio_depth; i++)
4537		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4538			ioc->pending_io_count++;
4539	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4540
4541	if (!ioc->pending_io_count)
4542		return;
4543
4544	/* wait for pending commands to complete */
4545	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4546}
4547
4548/**
4549 * mpt2sas_base_hard_reset_handler - reset controller
4550 * @ioc: Pointer to MPT_ADAPTER structure
4551 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4552 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4553 *
4554 * Returns 0 for success, non-zero for failure.
4555 */
4556int
4557mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4558    enum reset_type type)
4559{
4560	int r;
4561	unsigned long flags;
4562
4563	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4564	    __func__));
4565
4566	if (ioc->pci_error_recovery) {
4567		printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
4568		    ioc->name, __func__);
4569		r = 0;
4570		goto out;
4571	}
4572
4573	if (mpt2sas_fwfault_debug)
4574		mpt2sas_halt_firmware(ioc);
4575
4576	/* TODO - What we really should be doing is pulling
4577	 * out all the code associated with NO_SLEEP; its never used.
4578	 * That is legacy code from mpt fusion driver, ported over.
4579	 * I will leave this BUG_ON here for now till its been resolved.
4580	 */
4581	BUG_ON(sleep_flag == NO_SLEEP);
4582
4583	/* wait for an active reset in progress to complete */
4584	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4585		do {
4586			ssleep(1);
4587		} while (ioc->shost_recovery == 1);
4588		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4589		    __func__));
4590		return ioc->ioc_reset_in_progress_status;
4591	}
4592
4593	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4594	ioc->shost_recovery = 1;
4595	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4596
4597	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
4598	_wait_for_commands_to_complete(ioc, sleep_flag);
4599	_base_mask_interrupts(ioc);
4600	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4601	if (r)
4602		goto out;
4603	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4604
4605	/* If this hard reset is called while port enable is active, then
4606	 * there is no reason to call make_ioc_operational
4607	 */
4608	if (ioc->is_driver_loading && ioc->port_enable_failed) {
4609		ioc->remove_host = 1;
4610		r = -EFAULT;
4611		goto out;
4612	}
4613	r = _base_make_ioc_operational(ioc, sleep_flag);
4614	if (!r)
4615		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
4616 out:
4617	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
4618	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4619
4620	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4621	ioc->ioc_reset_in_progress_status = r;
4622	ioc->shost_recovery = 0;
4623	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4624	mutex_unlock(&ioc->reset_in_progress_mutex);
4625
4626	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4627	    __func__));
4628	return r;
4629}
4630