mpt2sas_base.c revision b0df96a0068daee4f9c2189c29b9053eb6e46b17
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2012  LSI Corporation
7 *  (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42 * USA.
43 */
44
45#include <linux/kernel.h>
46#include <linux/module.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/slab.h>
50#include <linux/types.h>
51#include <linux/pci.h>
52#include <linux/kdev_t.h>
53#include <linux/blkdev.h>
54#include <linux/delay.h>
55#include <linux/interrupt.h>
56#include <linux/dma-mapping.h>
57#include <linux/sort.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/kthread.h>
61#include <linux/aer.h>
62
63#include "mpt2sas_base.h"
64
65static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
66
67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
68
69#define MAX_HBA_QUEUE_DEPTH	30000
70#define MAX_CHAIN_DEPTH		100000
71static int max_queue_depth = -1;
72module_param(max_queue_depth, int, 0);
73MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
74
75static int max_sgl_entries = -1;
76module_param(max_sgl_entries, int, 0);
77MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
78
79static int msix_disable = -1;
80module_param(msix_disable, int, 0);
81MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
82
83static int mpt2sas_fwfault_debug;
84MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
85	"and halt firmware - (default=0)");
86
87static int disable_discovery = -1;
88module_param(disable_discovery, int, 0);
89MODULE_PARM_DESC(disable_discovery, " disable discovery ");
90
91/**
92 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
93 *
94 */
95static int
96_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
97{
98	int ret = param_set_int(val, kp);
99	struct MPT2SAS_ADAPTER *ioc;
100
101	if (ret)
102		return ret;
103
104	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
105	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
106		ioc->fwfault_debug = mpt2sas_fwfault_debug;
107	return 0;
108}
109
110module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
111    param_get_int, &mpt2sas_fwfault_debug, 0644);
112
113/**
114 *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
115 * @arg: input argument, used to derive ioc
116 *
117 * Return 0 if controller is removed from pci subsystem.
118 * Return -1 for other case.
119 */
120static int mpt2sas_remove_dead_ioc_func(void *arg)
121{
122		struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
123		struct pci_dev *pdev;
124
125		if ((ioc == NULL))
126			return -1;
127
128		pdev = ioc->pdev;
129		if ((pdev == NULL))
130			return -1;
131		pci_stop_and_remove_bus_device(pdev);
132		return 0;
133}
134
135
136/**
137 * _base_fault_reset_work - workq handling ioc fault conditions
138 * @work: input argument, used to derive ioc
139 * Context: sleep.
140 *
141 * Return nothing.
142 */
143static void
144_base_fault_reset_work(struct work_struct *work)
145{
146	struct MPT2SAS_ADAPTER *ioc =
147	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
148	unsigned long	 flags;
149	u32 doorbell;
150	int rc;
151	struct task_struct *p;
152
153	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
154	if (ioc->shost_recovery || ioc->pci_error_recovery)
155		goto rearm_timer;
156	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
157
158	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
159	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
160		printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
161			ioc->name, __func__);
162
163		/* It may be possible that EEH recovery can resolve some of
164		 * pci bus failure issues rather removing the dead ioc function
165		 * by considering controller is in a non-operational state. So
166		 * here priority is given to the EEH recovery. If it doesn't
167		 * not resolve this issue, mpt2sas driver will consider this
168		 * controller to non-operational state and remove the dead ioc
169		 * function.
170		 */
171		if (ioc->non_operational_loop++ < 5) {
172			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
173							 flags);
174			goto rearm_timer;
175		}
176
177		/*
178		 * Call _scsih_flush_pending_cmds callback so that we flush all
179		 * pending commands back to OS. This call is required to aovid
180		 * deadlock at block layer. Dead IOC will fail to do diag reset,
181		 * and this call is safe since dead ioc will never return any
182		 * command back from HW.
183		 */
184		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
185		/*
186		 * Set remove_host flag early since kernel thread will
187		 * take some time to execute.
188		 */
189		ioc->remove_host = 1;
190		/*Remove the Dead Host */
191		p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
192		    "mpt2sas_dead_ioc_%d", ioc->id);
193		if (IS_ERR(p)) {
194			printk(MPT2SAS_ERR_FMT
195			"%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
196			ioc->name, __func__);
197		} else {
198		    printk(MPT2SAS_ERR_FMT
199			"%s: Running mpt2sas_dead_ioc thread success !!!!\n",
200			ioc->name, __func__);
201		}
202
203		return; /* don't rearm timer */
204	}
205
206	ioc->non_operational_loop = 0;
207
208	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
209		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
210		    FORCE_BIG_HAMMER);
211		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
212		    __func__, (rc == 0) ? "success" : "failed");
213		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
214		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
215			mpt2sas_base_fault_info(ioc, doorbell &
216			    MPI2_DOORBELL_DATA_MASK);
217	}
218
219	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
220 rearm_timer:
221	if (ioc->fault_reset_work_q)
222		queue_delayed_work(ioc->fault_reset_work_q,
223		    &ioc->fault_reset_work,
224		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
225	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
226}
227
228/**
229 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
230 * @ioc: per adapter object
231 * Context: sleep.
232 *
233 * Return nothing.
234 */
235void
236mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
237{
238	unsigned long	 flags;
239
240	if (ioc->fault_reset_work_q)
241		return;
242
243	/* initialize fault polling */
244	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
245	snprintf(ioc->fault_reset_work_q_name,
246	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
247	ioc->fault_reset_work_q =
248		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
249	if (!ioc->fault_reset_work_q) {
250		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
251		    ioc->name, __func__, __LINE__);
252			return;
253	}
254	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
255	if (ioc->fault_reset_work_q)
256		queue_delayed_work(ioc->fault_reset_work_q,
257		    &ioc->fault_reset_work,
258		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
259	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
260}
261
262/**
263 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
264 * @ioc: per adapter object
265 * Context: sleep.
266 *
267 * Return nothing.
268 */
269void
270mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
271{
272	unsigned long	 flags;
273	struct workqueue_struct *wq;
274
275	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
276	wq = ioc->fault_reset_work_q;
277	ioc->fault_reset_work_q = NULL;
278	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
279	if (wq) {
280		if (!cancel_delayed_work(&ioc->fault_reset_work))
281			flush_workqueue(wq);
282		destroy_workqueue(wq);
283	}
284}
285
286/**
287 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
288 * @ioc: per adapter object
289 * @fault_code: fault code
290 *
291 * Return nothing.
292 */
293void
294mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
295{
296	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
297	    ioc->name, fault_code);
298}
299
300/**
301 * mpt2sas_halt_firmware - halt's mpt controller firmware
302 * @ioc: per adapter object
303 *
304 * For debugging timeout related issues.  Writing 0xCOFFEE00
305 * to the doorbell register will halt controller firmware. With
306 * the purpose to stop both driver and firmware, the enduser can
307 * obtain a ring buffer from controller UART.
308 */
309void
310mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
311{
312	u32 doorbell;
313
314	if (!ioc->fwfault_debug)
315		return;
316
317	dump_stack();
318
319	doorbell = readl(&ioc->chip->Doorbell);
320	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
321		mpt2sas_base_fault_info(ioc , doorbell);
322	else {
323		writel(0xC0FFEE00, &ioc->chip->Doorbell);
324		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
325		    "timeout\n", ioc->name);
326	}
327
328	panic("panic in %s\n", __func__);
329}
330
331#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
332/**
333 * _base_sas_ioc_info - verbose translation of the ioc status
334 * @ioc: per adapter object
335 * @mpi_reply: reply mf payload returned from firmware
336 * @request_hdr: request mf
337 *
338 * Return nothing.
339 */
340static void
341_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
342     MPI2RequestHeader_t *request_hdr)
343{
344	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
345	    MPI2_IOCSTATUS_MASK;
346	char *desc = NULL;
347	u16 frame_sz;
348	char *func_str = NULL;
349
350	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
351	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
352	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
353	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
354		return;
355
356	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
357		return;
358
359	switch (ioc_status) {
360
361/****************************************************************************
362*  Common IOCStatus values for all replies
363****************************************************************************/
364
365	case MPI2_IOCSTATUS_INVALID_FUNCTION:
366		desc = "invalid function";
367		break;
368	case MPI2_IOCSTATUS_BUSY:
369		desc = "busy";
370		break;
371	case MPI2_IOCSTATUS_INVALID_SGL:
372		desc = "invalid sgl";
373		break;
374	case MPI2_IOCSTATUS_INTERNAL_ERROR:
375		desc = "internal error";
376		break;
377	case MPI2_IOCSTATUS_INVALID_VPID:
378		desc = "invalid vpid";
379		break;
380	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
381		desc = "insufficient resources";
382		break;
383	case MPI2_IOCSTATUS_INVALID_FIELD:
384		desc = "invalid field";
385		break;
386	case MPI2_IOCSTATUS_INVALID_STATE:
387		desc = "invalid state";
388		break;
389	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
390		desc = "op state not supported";
391		break;
392
393/****************************************************************************
394*  Config IOCStatus values
395****************************************************************************/
396
397	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
398		desc = "config invalid action";
399		break;
400	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
401		desc = "config invalid type";
402		break;
403	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
404		desc = "config invalid page";
405		break;
406	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
407		desc = "config invalid data";
408		break;
409	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
410		desc = "config no defaults";
411		break;
412	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
413		desc = "config cant commit";
414		break;
415
416/****************************************************************************
417*  SCSI IO Reply
418****************************************************************************/
419
420	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
421	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
422	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
423	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
424	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
425	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
426	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
427	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
428	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
429	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
430	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
431	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
432		break;
433
434/****************************************************************************
435*  For use by SCSI Initiator and SCSI Target end-to-end data protection
436****************************************************************************/
437
438	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
439		desc = "eedp guard error";
440		break;
441	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
442		desc = "eedp ref tag error";
443		break;
444	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
445		desc = "eedp app tag error";
446		break;
447
448/****************************************************************************
449*  SCSI Target values
450****************************************************************************/
451
452	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
453		desc = "target invalid io index";
454		break;
455	case MPI2_IOCSTATUS_TARGET_ABORTED:
456		desc = "target aborted";
457		break;
458	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
459		desc = "target no conn retryable";
460		break;
461	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
462		desc = "target no connection";
463		break;
464	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
465		desc = "target xfer count mismatch";
466		break;
467	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
468		desc = "target data offset error";
469		break;
470	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
471		desc = "target too much write data";
472		break;
473	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
474		desc = "target iu too short";
475		break;
476	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
477		desc = "target ack nak timeout";
478		break;
479	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
480		desc = "target nak received";
481		break;
482
483/****************************************************************************
484*  Serial Attached SCSI values
485****************************************************************************/
486
487	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
488		desc = "smp request failed";
489		break;
490	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
491		desc = "smp data overrun";
492		break;
493
494/****************************************************************************
495*  Diagnostic Buffer Post / Diagnostic Release values
496****************************************************************************/
497
498	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
499		desc = "diagnostic released";
500		break;
501	default:
502		break;
503	}
504
505	if (!desc)
506		return;
507
508	switch (request_hdr->Function) {
509	case MPI2_FUNCTION_CONFIG:
510		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
511		func_str = "config_page";
512		break;
513	case MPI2_FUNCTION_SCSI_TASK_MGMT:
514		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
515		func_str = "task_mgmt";
516		break;
517	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
518		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
519		func_str = "sas_iounit_ctl";
520		break;
521	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
522		frame_sz = sizeof(Mpi2SepRequest_t);
523		func_str = "enclosure";
524		break;
525	case MPI2_FUNCTION_IOC_INIT:
526		frame_sz = sizeof(Mpi2IOCInitRequest_t);
527		func_str = "ioc_init";
528		break;
529	case MPI2_FUNCTION_PORT_ENABLE:
530		frame_sz = sizeof(Mpi2PortEnableRequest_t);
531		func_str = "port_enable";
532		break;
533	case MPI2_FUNCTION_SMP_PASSTHROUGH:
534		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
535		func_str = "smp_passthru";
536		break;
537	default:
538		frame_sz = 32;
539		func_str = "unknown";
540		break;
541	}
542
543	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
544	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
545
546	_debug_dump_mf(request_hdr, frame_sz/4);
547}
548
549/**
550 * _base_display_event_data - verbose translation of firmware asyn events
551 * @ioc: per adapter object
552 * @mpi_reply: reply mf payload returned from firmware
553 *
554 * Return nothing.
555 */
556static void
557_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
558    Mpi2EventNotificationReply_t *mpi_reply)
559{
560	char *desc = NULL;
561	u16 event;
562
563	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
564		return;
565
566	event = le16_to_cpu(mpi_reply->Event);
567
568	switch (event) {
569	case MPI2_EVENT_LOG_DATA:
570		desc = "Log Data";
571		break;
572	case MPI2_EVENT_STATE_CHANGE:
573		desc = "Status Change";
574		break;
575	case MPI2_EVENT_HARD_RESET_RECEIVED:
576		desc = "Hard Reset Received";
577		break;
578	case MPI2_EVENT_EVENT_CHANGE:
579		desc = "Event Change";
580		break;
581	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
582		desc = "Device Status Change";
583		break;
584	case MPI2_EVENT_IR_OPERATION_STATUS:
585		if (!ioc->hide_ir_msg)
586			desc = "IR Operation Status";
587		break;
588	case MPI2_EVENT_SAS_DISCOVERY:
589	{
590		Mpi2EventDataSasDiscovery_t *event_data =
591		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
592		printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
593		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
594		    "start" : "stop");
595		if (event_data->DiscoveryStatus)
596			printk("discovery_status(0x%08x)",
597			    le32_to_cpu(event_data->DiscoveryStatus));
598		printk("\n");
599		return;
600	}
601	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
602		desc = "SAS Broadcast Primitive";
603		break;
604	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
605		desc = "SAS Init Device Status Change";
606		break;
607	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
608		desc = "SAS Init Table Overflow";
609		break;
610	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
611		desc = "SAS Topology Change List";
612		break;
613	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
614		desc = "SAS Enclosure Device Status Change";
615		break;
616	case MPI2_EVENT_IR_VOLUME:
617		if (!ioc->hide_ir_msg)
618			desc = "IR Volume";
619		break;
620	case MPI2_EVENT_IR_PHYSICAL_DISK:
621		if (!ioc->hide_ir_msg)
622			desc = "IR Physical Disk";
623		break;
624	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
625		if (!ioc->hide_ir_msg)
626			desc = "IR Configuration Change List";
627		break;
628	case MPI2_EVENT_LOG_ENTRY_ADDED:
629		if (!ioc->hide_ir_msg)
630			desc = "Log Entry Added";
631		break;
632	}
633
634	if (!desc)
635		return;
636
637	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
638}
639#endif
640
641/**
642 * _base_sas_log_info - verbose translation of firmware log info
643 * @ioc: per adapter object
644 * @log_info: log info
645 *
646 * Return nothing.
647 */
648static void
649_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
650{
651	union loginfo_type {
652		u32	loginfo;
653		struct {
654			u32	subcode:16;
655			u32	code:8;
656			u32	originator:4;
657			u32	bus_type:4;
658		} dw;
659	};
660	union loginfo_type sas_loginfo;
661	char *originator_str = NULL;
662
663	sas_loginfo.loginfo = log_info;
664	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
665		return;
666
667	/* each nexus loss loginfo */
668	if (log_info == 0x31170000)
669		return;
670
671	/* eat the loginfos associated with task aborts */
672	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
673	    0x31140000 || log_info == 0x31130000))
674		return;
675
676	switch (sas_loginfo.dw.originator) {
677	case 0:
678		originator_str = "IOP";
679		break;
680	case 1:
681		originator_str = "PL";
682		break;
683	case 2:
684		if (!ioc->hide_ir_msg)
685			originator_str = "IR";
686		else
687			originator_str = "WarpDrive";
688		break;
689	}
690
691	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
692	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
693	     originator_str, sas_loginfo.dw.code,
694	     sas_loginfo.dw.subcode);
695}
696
697/**
698 * _base_display_reply_info -
699 * @ioc: per adapter object
700 * @smid: system request message index
701 * @msix_index: MSIX table index supplied by the OS
702 * @reply: reply message frame(lower 32bit addr)
703 *
704 * Return nothing.
705 */
706static void
707_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
708    u32 reply)
709{
710	MPI2DefaultReply_t *mpi_reply;
711	u16 ioc_status;
712
713	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
714	if (unlikely(!mpi_reply)) {
715		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
716			ioc->name, __FILE__, __LINE__, __func__);
717		return;
718	}
719	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
720#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
721	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
722	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
723		_base_sas_ioc_info(ioc , mpi_reply,
724		   mpt2sas_base_get_msg_frame(ioc, smid));
725	}
726#endif
727	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
728		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
729}
730
731/**
732 * mpt2sas_base_done - base internal command completion routine
733 * @ioc: per adapter object
734 * @smid: system request message index
735 * @msix_index: MSIX table index supplied by the OS
736 * @reply: reply message frame(lower 32bit addr)
737 *
738 * Return 1 meaning mf should be freed from _base_interrupt
739 *        0 means the mf is freed from this function.
740 */
741u8
742mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
743    u32 reply)
744{
745	MPI2DefaultReply_t *mpi_reply;
746
747	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
748	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
749		return 1;
750
751	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
752		return 1;
753
754	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
755	if (mpi_reply) {
756		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
757		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
758	}
759	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
760
761	complete(&ioc->base_cmds.done);
762	return 1;
763}
764
765/**
766 * _base_async_event - main callback handler for firmware asyn events
767 * @ioc: per adapter object
768 * @msix_index: MSIX table index supplied by the OS
769 * @reply: reply message frame(lower 32bit addr)
770 *
771 * Return 1 meaning mf should be freed from _base_interrupt
772 *        0 means the mf is freed from this function.
773 */
774static u8
775_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
776{
777	Mpi2EventNotificationReply_t *mpi_reply;
778	Mpi2EventAckRequest_t *ack_request;
779	u16 smid;
780
781	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
782	if (!mpi_reply)
783		return 1;
784	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
785		return 1;
786#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
787	_base_display_event_data(ioc, mpi_reply);
788#endif
789	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
790		goto out;
791	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
792	if (!smid) {
793		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
794		    ioc->name, __func__);
795		goto out;
796	}
797
798	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
799	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
800	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
801	ack_request->Event = mpi_reply->Event;
802	ack_request->EventContext = mpi_reply->EventContext;
803	ack_request->VF_ID = 0;  /* TODO */
804	ack_request->VP_ID = 0;
805	mpt2sas_base_put_smid_default(ioc, smid);
806
807 out:
808
809	/* scsih callback handler */
810	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
811
812	/* ctl callback handler */
813	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
814
815	return 1;
816}
817
818/**
819 * _base_get_cb_idx - obtain the callback index
820 * @ioc: per adapter object
821 * @smid: system request message index
822 *
823 * Return callback index.
824 */
825static u8
826_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
827{
828	int i;
829	u8 cb_idx;
830
831	if (smid < ioc->hi_priority_smid) {
832		i = smid - 1;
833		cb_idx = ioc->scsi_lookup[i].cb_idx;
834	} else if (smid < ioc->internal_smid) {
835		i = smid - ioc->hi_priority_smid;
836		cb_idx = ioc->hpr_lookup[i].cb_idx;
837	} else if (smid <= ioc->hba_queue_depth) {
838		i = smid - ioc->internal_smid;
839		cb_idx = ioc->internal_lookup[i].cb_idx;
840	} else
841		cb_idx = 0xFF;
842	return cb_idx;
843}
844
845/**
846 * _base_mask_interrupts - disable interrupts
847 * @ioc: per adapter object
848 *
849 * Disabling ResetIRQ, Reply and Doorbell Interrupts
850 *
851 * Return nothing.
852 */
853static void
854_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
855{
856	u32 him_register;
857
858	ioc->mask_interrupts = 1;
859	him_register = readl(&ioc->chip->HostInterruptMask);
860	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
861	writel(him_register, &ioc->chip->HostInterruptMask);
862	readl(&ioc->chip->HostInterruptMask);
863}
864
865/**
866 * _base_unmask_interrupts - enable interrupts
867 * @ioc: per adapter object
868 *
869 * Enabling only Reply Interrupts
870 *
871 * Return nothing.
872 */
873static void
874_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
875{
876	u32 him_register;
877
878	him_register = readl(&ioc->chip->HostInterruptMask);
879	him_register &= ~MPI2_HIM_RIM;
880	writel(him_register, &ioc->chip->HostInterruptMask);
881	ioc->mask_interrupts = 0;
882}
883
884union reply_descriptor {
885	u64 word;
886	struct {
887		u32 low;
888		u32 high;
889	} u;
890};
891
892/**
893 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
894 * @irq: irq number (not used)
895 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
896 * @r: pt_regs pointer (not used)
897 *
898 * Return IRQ_HANDLE if processed, else IRQ_NONE.
899 */
900static irqreturn_t
901_base_interrupt(int irq, void *bus_id)
902{
903	struct adapter_reply_queue *reply_q = bus_id;
904	union reply_descriptor rd;
905	u32 completed_cmds;
906	u8 request_desript_type;
907	u16 smid;
908	u8 cb_idx;
909	u32 reply;
910	u8 msix_index = reply_q->msix_index;
911	struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
912	Mpi2ReplyDescriptorsUnion_t *rpf;
913	u8 rc;
914
915	if (ioc->mask_interrupts)
916		return IRQ_NONE;
917
918	if (!atomic_add_unless(&reply_q->busy, 1, 1))
919		return IRQ_NONE;
920
921	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
922	request_desript_type = rpf->Default.ReplyFlags
923	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
924	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
925		atomic_dec(&reply_q->busy);
926		return IRQ_NONE;
927	}
928
929	completed_cmds = 0;
930	cb_idx = 0xFF;
931	do {
932		rd.word = le64_to_cpu(rpf->Words);
933		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
934			goto out;
935		reply = 0;
936		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
937		if (request_desript_type ==
938		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
939			reply = le32_to_cpu
940				(rpf->AddressReply.ReplyFrameAddress);
941			if (reply > ioc->reply_dma_max_address ||
942			    reply < ioc->reply_dma_min_address)
943				reply = 0;
944		} else if (request_desript_type ==
945		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
946			goto next;
947		else if (request_desript_type ==
948		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
949			goto next;
950		if (smid) {
951			cb_idx = _base_get_cb_idx(ioc, smid);
952		if ((likely(cb_idx < MPT_MAX_CALLBACKS))
953			    && (likely(mpt_callbacks[cb_idx] != NULL))) {
954				rc = mpt_callbacks[cb_idx](ioc, smid,
955				    msix_index, reply);
956			if (reply)
957				_base_display_reply_info(ioc, smid,
958				    msix_index, reply);
959			if (rc)
960				mpt2sas_base_free_smid(ioc, smid);
961			}
962		}
963		if (!smid)
964			_base_async_event(ioc, msix_index, reply);
965
966		/* reply free queue handling */
967		if (reply) {
968			ioc->reply_free_host_index =
969			    (ioc->reply_free_host_index ==
970			    (ioc->reply_free_queue_depth - 1)) ?
971			    0 : ioc->reply_free_host_index + 1;
972			ioc->reply_free[ioc->reply_free_host_index] =
973			    cpu_to_le32(reply);
974			wmb();
975			writel(ioc->reply_free_host_index,
976			    &ioc->chip->ReplyFreeHostIndex);
977		}
978
979 next:
980
981		rpf->Words = cpu_to_le64(ULLONG_MAX);
982		reply_q->reply_post_host_index =
983		    (reply_q->reply_post_host_index ==
984		    (ioc->reply_post_queue_depth - 1)) ? 0 :
985		    reply_q->reply_post_host_index + 1;
986		request_desript_type =
987		    reply_q->reply_post_free[reply_q->reply_post_host_index].
988		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
989		completed_cmds++;
990		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
991			goto out;
992		if (!reply_q->reply_post_host_index)
993			rpf = reply_q->reply_post_free;
994		else
995			rpf++;
996	} while (1);
997
998 out:
999
1000	if (!completed_cmds) {
1001		atomic_dec(&reply_q->busy);
1002		return IRQ_NONE;
1003	}
1004	wmb();
1005	if (ioc->is_warpdrive) {
1006		writel(reply_q->reply_post_host_index,
1007		ioc->reply_post_host_index[msix_index]);
1008		atomic_dec(&reply_q->busy);
1009		return IRQ_HANDLED;
1010	}
1011	writel(reply_q->reply_post_host_index | (msix_index <<
1012	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1013	atomic_dec(&reply_q->busy);
1014	return IRQ_HANDLED;
1015}
1016
1017/**
1018 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1019 * @ioc: per adapter object
1020 *
1021 */
1022static inline int
1023_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
1024{
1025	return (ioc->facts.IOCCapabilities &
1026	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1027}
1028
1029/**
1030 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
1031 * @ioc: per adapter object
1032 * Context: ISR conext
1033 *
1034 * Called when a Task Management request has completed. We want
1035 * to flush the other reply queues so all the outstanding IO has been
1036 * completed back to OS before we process the TM completetion.
1037 *
1038 * Return nothing.
1039 */
1040void
1041mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1042{
1043	struct adapter_reply_queue *reply_q;
1044
1045	/* If MSIX capability is turned off
1046	 * then multi-queues are not enabled
1047	 */
1048	if (!_base_is_controller_msix_enabled(ioc))
1049		return;
1050
1051	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1052		if (ioc->shost_recovery)
1053			return;
1054		/* TMs are on msix_index == 0 */
1055		if (reply_q->msix_index == 0)
1056			continue;
1057		_base_interrupt(reply_q->vector, (void *)reply_q);
1058	}
1059}
1060
1061/**
1062 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
1063 * @cb_idx: callback index
1064 *
1065 * Return nothing.
1066 */
1067void
1068mpt2sas_base_release_callback_handler(u8 cb_idx)
1069{
1070	mpt_callbacks[cb_idx] = NULL;
1071}
1072
1073/**
1074 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
1075 * @cb_func: callback function
1076 *
1077 * Returns cb_func.
1078 */
1079u8
1080mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1081{
1082	u8 cb_idx;
1083
1084	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1085		if (mpt_callbacks[cb_idx] == NULL)
1086			break;
1087
1088	mpt_callbacks[cb_idx] = cb_func;
1089	return cb_idx;
1090}
1091
1092/**
1093 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
1094 *
1095 * Return nothing.
1096 */
1097void
1098mpt2sas_base_initialize_callback_handler(void)
1099{
1100	u8 cb_idx;
1101
1102	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1103		mpt2sas_base_release_callback_handler(cb_idx);
1104}
1105
1106/**
1107 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
1108 * @ioc: per adapter object
1109 * @paddr: virtual address for SGE
1110 *
1111 * Create a zero length scatter gather entry to insure the IOCs hardware has
1112 * something to use if the target device goes brain dead and tries
1113 * to send data even when none is asked for.
1114 *
1115 * Return nothing.
1116 */
1117void
1118mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
1119{
1120	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1121	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1122	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1123	    MPI2_SGE_FLAGS_SHIFT);
1124	ioc->base_add_sg_single(paddr, flags_length, -1);
1125}
1126
1127/**
1128 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1129 * @paddr: virtual address for SGE
1130 * @flags_length: SGE flags and data transfer length
1131 * @dma_addr: Physical address
1132 *
1133 * Return nothing.
1134 */
1135static void
1136_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1137{
1138	Mpi2SGESimple32_t *sgel = paddr;
1139
1140	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1141	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1142	sgel->FlagsLength = cpu_to_le32(flags_length);
1143	sgel->Address = cpu_to_le32(dma_addr);
1144}
1145
1146
1147/**
1148 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1149 * @paddr: virtual address for SGE
1150 * @flags_length: SGE flags and data transfer length
1151 * @dma_addr: Physical address
1152 *
1153 * Return nothing.
1154 */
1155static void
1156_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1157{
1158	Mpi2SGESimple64_t *sgel = paddr;
1159
1160	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1161	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1162	sgel->FlagsLength = cpu_to_le32(flags_length);
1163	sgel->Address = cpu_to_le64(dma_addr);
1164}
1165
1166#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1167
1168/**
1169 * _base_config_dma_addressing - set dma addressing
1170 * @ioc: per adapter object
1171 * @pdev: PCI device struct
1172 *
1173 * Returns 0 for success, non-zero for failure.
1174 */
1175static int
1176_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1177{
1178	struct sysinfo s;
1179	char *desc = NULL;
1180
1181	if (sizeof(dma_addr_t) > 4) {
1182		const uint64_t required_mask =
1183		    dma_get_required_mask(&pdev->dev);
1184		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
1185		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
1186		    DMA_BIT_MASK(64))) {
1187			ioc->base_add_sg_single = &_base_add_sg_single_64;
1188			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1189			desc = "64";
1190			goto out;
1191		}
1192	}
1193
1194	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1195	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1196		ioc->base_add_sg_single = &_base_add_sg_single_32;
1197		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1198		desc = "32";
1199	} else
1200		return -ENODEV;
1201
1202 out:
1203	si_meminfo(&s);
1204	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
1205	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
1206
1207	return 0;
1208}
1209
1210/**
1211 * _base_check_enable_msix - checks MSIX capabable.
1212 * @ioc: per adapter object
1213 *
1214 * Check to see if card is capable of MSIX, and set number
1215 * of available msix vectors
1216 */
1217static int
1218_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1219{
1220	int base;
1221	u16 message_control;
1222
1223
1224	/* Check whether controller SAS2008 B0 controller,
1225	   if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
1226	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1227	    ioc->pdev->revision == 0x01) {
1228		return -EINVAL;
1229	}
1230
1231	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1232	if (!base) {
1233		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1234		    "supported\n", ioc->name));
1235		return -EINVAL;
1236	}
1237
1238	/* get msix vector count */
1239	/* NUMA_IO not supported for older controllers */
1240	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1241	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1242	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1243	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1244	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1245	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1246	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1247		ioc->msix_vector_count = 1;
1248	else {
1249		pci_read_config_word(ioc->pdev, base + 2, &message_control);
1250		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1251	}
1252	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1253	    "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1254
1255	return 0;
1256}
1257
1258/**
1259 * _base_free_irq - free irq
1260 * @ioc: per adapter object
1261 *
1262 * Freeing respective reply_queue from the list.
1263 */
1264static void
1265_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1266{
1267	struct adapter_reply_queue *reply_q, *next;
1268
1269	if (list_empty(&ioc->reply_queue_list))
1270		return;
1271
1272	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1273		list_del(&reply_q->list);
1274		synchronize_irq(reply_q->vector);
1275		free_irq(reply_q->vector, reply_q);
1276		kfree(reply_q);
1277	}
1278}
1279
1280/**
1281 * _base_request_irq - request irq
1282 * @ioc: per adapter object
1283 * @index: msix index into vector table
1284 * @vector: irq vector
1285 *
1286 * Inserting respective reply_queue into the list.
1287 */
1288static int
1289_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1290{
1291	struct adapter_reply_queue *reply_q;
1292	int r;
1293
1294	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1295	if (!reply_q) {
1296		printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1297		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1298		return -ENOMEM;
1299	}
1300	reply_q->ioc = ioc;
1301	reply_q->msix_index = index;
1302	reply_q->vector = vector;
1303	atomic_set(&reply_q->busy, 0);
1304	if (ioc->msix_enable)
1305		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1306		    MPT2SAS_DRIVER_NAME, ioc->id, index);
1307	else
1308		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1309		    MPT2SAS_DRIVER_NAME, ioc->id);
1310	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1311	    reply_q);
1312	if (r) {
1313		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1314		    reply_q->name, vector);
1315		kfree(reply_q);
1316		return -EBUSY;
1317	}
1318
1319	INIT_LIST_HEAD(&reply_q->list);
1320	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1321	return 0;
1322}
1323
1324/**
1325 * _base_assign_reply_queues - assigning msix index for each cpu
1326 * @ioc: per adapter object
1327 *
1328 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1329 *
1330 * It would nice if we could call irq_set_affinity, however it is not
1331 * an exported symbol
1332 */
1333static void
1334_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1335{
1336	struct adapter_reply_queue *reply_q;
1337	int cpu_id;
1338	int cpu_grouping, loop, grouping, grouping_mod;
1339
1340	if (!_base_is_controller_msix_enabled(ioc))
1341		return;
1342
1343	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1344	/* when there are more cpus than available msix vectors,
1345	 * then group cpus togeather on same irq
1346	 */
1347	if (ioc->cpu_count > ioc->msix_vector_count) {
1348		grouping = ioc->cpu_count / ioc->msix_vector_count;
1349		grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1350		if (grouping < 2 || (grouping == 2 && !grouping_mod))
1351			cpu_grouping = 2;
1352		else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1353			cpu_grouping = 4;
1354		else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1355			cpu_grouping = 8;
1356		else
1357			cpu_grouping = 16;
1358	} else
1359		cpu_grouping = 0;
1360
1361	loop = 0;
1362	reply_q = list_entry(ioc->reply_queue_list.next,
1363	     struct adapter_reply_queue, list);
1364	for_each_online_cpu(cpu_id) {
1365		if (!cpu_grouping) {
1366			ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1367			reply_q = list_entry(reply_q->list.next,
1368			    struct adapter_reply_queue, list);
1369		} else {
1370			if (loop < cpu_grouping) {
1371				ioc->cpu_msix_table[cpu_id] =
1372					reply_q->msix_index;
1373				loop++;
1374			} else {
1375				reply_q = list_entry(reply_q->list.next,
1376				    struct adapter_reply_queue, list);
1377				ioc->cpu_msix_table[cpu_id] =
1378					reply_q->msix_index;
1379				loop = 1;
1380			}
1381		}
1382	}
1383}
1384
1385/**
1386 * _base_disable_msix - disables msix
1387 * @ioc: per adapter object
1388 *
1389 */
1390static void
1391_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1392{
1393	if (ioc->msix_enable) {
1394		pci_disable_msix(ioc->pdev);
1395		ioc->msix_enable = 0;
1396	}
1397}
1398
1399/**
1400 * _base_enable_msix - enables msix, failback to io_apic
1401 * @ioc: per adapter object
1402 *
1403 */
1404static int
1405_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1406{
1407	struct msix_entry *entries, *a;
1408	int r;
1409	int i;
1410	u8 try_msix = 0;
1411
1412	INIT_LIST_HEAD(&ioc->reply_queue_list);
1413
1414	if (msix_disable == -1 || msix_disable == 0)
1415		try_msix = 1;
1416
1417	if (!try_msix)
1418		goto try_ioapic;
1419
1420	if (_base_check_enable_msix(ioc) != 0)
1421		goto try_ioapic;
1422
1423	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1424	    ioc->msix_vector_count);
1425
1426	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1427	    GFP_KERNEL);
1428	if (!entries) {
1429		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1430		    "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1431		    __LINE__, __func__));
1432		goto try_ioapic;
1433	}
1434
1435	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1436		a->entry = i;
1437
1438	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1439	if (r) {
1440		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1441		    "failed (r=%d) !!!\n", ioc->name, r));
1442		kfree(entries);
1443		goto try_ioapic;
1444	}
1445
1446	ioc->msix_enable = 1;
1447	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1448		r = _base_request_irq(ioc, i, a->vector);
1449		if (r) {
1450			_base_free_irq(ioc);
1451			_base_disable_msix(ioc);
1452			kfree(entries);
1453			goto try_ioapic;
1454		}
1455	}
1456
1457	kfree(entries);
1458	return 0;
1459
1460/* failback to io_apic interrupt routing */
1461 try_ioapic:
1462
1463	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1464
1465	return r;
1466}
1467
1468/**
1469 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1470 * @ioc: per adapter object
1471 *
1472 * Returns 0 for success, non-zero for failure.
1473 */
1474int
1475mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1476{
1477	struct pci_dev *pdev = ioc->pdev;
1478	u32 memap_sz;
1479	u32 pio_sz;
1480	int i, r = 0;
1481	u64 pio_chip = 0;
1482	u64 chip_phys = 0;
1483	struct adapter_reply_queue *reply_q;
1484
1485	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1486	    ioc->name, __func__));
1487
1488	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1489	if (pci_enable_device_mem(pdev)) {
1490		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1491		    "failed\n", ioc->name);
1492		return -ENODEV;
1493	}
1494
1495
1496	if (pci_request_selected_regions(pdev, ioc->bars,
1497	    MPT2SAS_DRIVER_NAME)) {
1498		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1499		    "failed\n", ioc->name);
1500		r = -ENODEV;
1501		goto out_fail;
1502	}
1503
1504	/* AER (Advanced Error Reporting) hooks */
1505	pci_enable_pcie_error_reporting(pdev);
1506
1507	pci_set_master(pdev);
1508
1509	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1510		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1511		    ioc->name, pci_name(pdev));
1512		r = -ENODEV;
1513		goto out_fail;
1514	}
1515
1516	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1517		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1518			if (pio_sz)
1519				continue;
1520			pio_chip = (u64)pci_resource_start(pdev, i);
1521			pio_sz = pci_resource_len(pdev, i);
1522		} else {
1523			if (memap_sz)
1524				continue;
1525			/* verify memory resource is valid before using */
1526			if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1527				ioc->chip_phys = pci_resource_start(pdev, i);
1528				chip_phys = (u64)ioc->chip_phys;
1529				memap_sz = pci_resource_len(pdev, i);
1530				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1531				if (ioc->chip == NULL) {
1532					printk(MPT2SAS_ERR_FMT "unable to map "
1533					    "adapter memory!\n", ioc->name);
1534					r = -EINVAL;
1535					goto out_fail;
1536				}
1537			}
1538		}
1539	}
1540
1541	_base_mask_interrupts(ioc);
1542	r = _base_enable_msix(ioc);
1543	if (r)
1544		goto out_fail;
1545
1546	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1547		printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1548		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1549		    "IO-APIC enabled"), reply_q->vector);
1550
1551	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1552	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1553	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1554	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1555
1556	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1557	pci_save_state(pdev);
1558
1559	return 0;
1560
1561 out_fail:
1562	if (ioc->chip_phys)
1563		iounmap(ioc->chip);
1564	ioc->chip_phys = 0;
1565	pci_release_selected_regions(ioc->pdev, ioc->bars);
1566	pci_disable_pcie_error_reporting(pdev);
1567	pci_disable_device(pdev);
1568	return r;
1569}
1570
1571/**
1572 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1573 * @ioc: per adapter object
1574 * @smid: system request message index(smid zero is invalid)
1575 *
1576 * Returns virt pointer to message frame.
1577 */
1578void *
1579mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1580{
1581	return (void *)(ioc->request + (smid * ioc->request_sz));
1582}
1583
1584/**
1585 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1586 * @ioc: per adapter object
1587 * @smid: system request message index
1588 *
1589 * Returns virt pointer to sense buffer.
1590 */
1591void *
1592mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1593{
1594	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1595}
1596
1597/**
1598 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1599 * @ioc: per adapter object
1600 * @smid: system request message index
1601 *
1602 * Returns phys pointer to the low 32bit address of the sense buffer.
1603 */
1604__le32
1605mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1606{
1607	return cpu_to_le32(ioc->sense_dma +
1608			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1609}
1610
1611/**
1612 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1613 * @ioc: per adapter object
1614 * @phys_addr: lower 32 physical addr of the reply
1615 *
1616 * Converts 32bit lower physical addr into a virt address.
1617 */
1618void *
1619mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1620{
1621	if (!phys_addr)
1622		return NULL;
1623	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1624}
1625
1626/**
1627 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1628 * @ioc: per adapter object
1629 * @cb_idx: callback index
1630 *
1631 * Returns smid (zero is invalid)
1632 */
1633u16
1634mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1635{
1636	unsigned long flags;
1637	struct request_tracker *request;
1638	u16 smid;
1639
1640	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1641	if (list_empty(&ioc->internal_free_list)) {
1642		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1643		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1644		    ioc->name, __func__);
1645		return 0;
1646	}
1647
1648	request = list_entry(ioc->internal_free_list.next,
1649	    struct request_tracker, tracker_list);
1650	request->cb_idx = cb_idx;
1651	smid = request->smid;
1652	list_del(&request->tracker_list);
1653	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1654	return smid;
1655}
1656
1657/**
1658 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1659 * @ioc: per adapter object
1660 * @cb_idx: callback index
1661 * @scmd: pointer to scsi command object
1662 *
1663 * Returns smid (zero is invalid)
1664 */
1665u16
1666mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1667    struct scsi_cmnd *scmd)
1668{
1669	unsigned long flags;
1670	struct scsiio_tracker *request;
1671	u16 smid;
1672
1673	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1674	if (list_empty(&ioc->free_list)) {
1675		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1676		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1677		    ioc->name, __func__);
1678		return 0;
1679	}
1680
1681	request = list_entry(ioc->free_list.next,
1682	    struct scsiio_tracker, tracker_list);
1683	request->scmd = scmd;
1684	request->cb_idx = cb_idx;
1685	smid = request->smid;
1686	list_del(&request->tracker_list);
1687	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1688	return smid;
1689}
1690
1691/**
1692 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1693 * @ioc: per adapter object
1694 * @cb_idx: callback index
1695 *
1696 * Returns smid (zero is invalid)
1697 */
1698u16
1699mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1700{
1701	unsigned long flags;
1702	struct request_tracker *request;
1703	u16 smid;
1704
1705	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1706	if (list_empty(&ioc->hpr_free_list)) {
1707		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1708		return 0;
1709	}
1710
1711	request = list_entry(ioc->hpr_free_list.next,
1712	    struct request_tracker, tracker_list);
1713	request->cb_idx = cb_idx;
1714	smid = request->smid;
1715	list_del(&request->tracker_list);
1716	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1717	return smid;
1718}
1719
1720
1721/**
1722 * mpt2sas_base_free_smid - put smid back on free_list
1723 * @ioc: per adapter object
1724 * @smid: system request message index
1725 *
1726 * Return nothing.
1727 */
1728void
1729mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1730{
1731	unsigned long flags;
1732	int i;
1733	struct chain_tracker *chain_req, *next;
1734
1735	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1736	if (smid < ioc->hi_priority_smid) {
1737		/* scsiio queue */
1738		i = smid - 1;
1739		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1740			list_for_each_entry_safe(chain_req, next,
1741			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
1742				list_del_init(&chain_req->tracker_list);
1743				list_add_tail(&chain_req->tracker_list,
1744				    &ioc->free_chain_list);
1745			}
1746		}
1747		ioc->scsi_lookup[i].cb_idx = 0xFF;
1748		ioc->scsi_lookup[i].scmd = NULL;
1749		ioc->scsi_lookup[i].direct_io = 0;
1750		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1751		    &ioc->free_list);
1752		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1753
1754		/*
1755		 * See _wait_for_commands_to_complete() call with regards
1756		 * to this code.
1757		 */
1758		if (ioc->shost_recovery && ioc->pending_io_count) {
1759			if (ioc->pending_io_count == 1)
1760				wake_up(&ioc->reset_wq);
1761			ioc->pending_io_count--;
1762		}
1763		return;
1764	} else if (smid < ioc->internal_smid) {
1765		/* hi-priority */
1766		i = smid - ioc->hi_priority_smid;
1767		ioc->hpr_lookup[i].cb_idx = 0xFF;
1768		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1769		    &ioc->hpr_free_list);
1770	} else if (smid <= ioc->hba_queue_depth) {
1771		/* internal queue */
1772		i = smid - ioc->internal_smid;
1773		ioc->internal_lookup[i].cb_idx = 0xFF;
1774		list_add_tail(&ioc->internal_lookup[i].tracker_list,
1775		    &ioc->internal_free_list);
1776	}
1777	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1778}
1779
1780/**
1781 * _base_writeq - 64 bit write to MMIO
1782 * @ioc: per adapter object
1783 * @b: data payload
1784 * @addr: address in MMIO space
1785 * @writeq_lock: spin lock
1786 *
1787 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1788 * care of 32 bit environment where its not quarenteed to send the entire word
1789 * in one transfer.
1790 */
1791#ifndef writeq
1792static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1793    spinlock_t *writeq_lock)
1794{
1795	unsigned long flags;
1796	__u64 data_out = cpu_to_le64(b);
1797
1798	spin_lock_irqsave(writeq_lock, flags);
1799	writel((u32)(data_out), addr);
1800	writel((u32)(data_out >> 32), (addr + 4));
1801	spin_unlock_irqrestore(writeq_lock, flags);
1802}
1803#else
1804static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1805    spinlock_t *writeq_lock)
1806{
1807	writeq(cpu_to_le64(b), addr);
1808}
1809#endif
1810
1811static inline u8
1812_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1813{
1814	return ioc->cpu_msix_table[raw_smp_processor_id()];
1815}
1816
1817/**
1818 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1819 * @ioc: per adapter object
1820 * @smid: system request message index
1821 * @handle: device handle
1822 *
1823 * Return nothing.
1824 */
1825void
1826mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1827{
1828	Mpi2RequestDescriptorUnion_t descriptor;
1829	u64 *request = (u64 *)&descriptor;
1830
1831
1832	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1833	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
1834	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1835	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1836	descriptor.SCSIIO.LMID = 0;
1837	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1838	    &ioc->scsi_lookup_lock);
1839}
1840
1841
1842/**
1843 * mpt2sas_base_put_smid_hi_priority - send Task Management request to firmware
1844 * @ioc: per adapter object
1845 * @smid: system request message index
1846 *
1847 * Return nothing.
1848 */
1849void
1850mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1851{
1852	Mpi2RequestDescriptorUnion_t descriptor;
1853	u64 *request = (u64 *)&descriptor;
1854
1855	descriptor.HighPriority.RequestFlags =
1856	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1857	descriptor.HighPriority.MSIxIndex =  0;
1858	descriptor.HighPriority.SMID = cpu_to_le16(smid);
1859	descriptor.HighPriority.LMID = 0;
1860	descriptor.HighPriority.Reserved1 = 0;
1861	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1862	    &ioc->scsi_lookup_lock);
1863}
1864
1865/**
1866 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1867 * @ioc: per adapter object
1868 * @smid: system request message index
1869 *
1870 * Return nothing.
1871 */
1872void
1873mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1874{
1875	Mpi2RequestDescriptorUnion_t descriptor;
1876	u64 *request = (u64 *)&descriptor;
1877
1878	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1879	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
1880	descriptor.Default.SMID = cpu_to_le16(smid);
1881	descriptor.Default.LMID = 0;
1882	descriptor.Default.DescriptorTypeDependent = 0;
1883	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1884	    &ioc->scsi_lookup_lock);
1885}
1886
1887/**
1888 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1889 * @ioc: per adapter object
1890 * @smid: system request message index
1891 * @io_index: value used to track the IO
1892 *
1893 * Return nothing.
1894 */
1895void
1896mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1897    u16 io_index)
1898{
1899	Mpi2RequestDescriptorUnion_t descriptor;
1900	u64 *request = (u64 *)&descriptor;
1901
1902	descriptor.SCSITarget.RequestFlags =
1903	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1904	descriptor.SCSITarget.MSIxIndex =  _base_get_msix_index(ioc);
1905	descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1906	descriptor.SCSITarget.LMID = 0;
1907	descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1908	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1909	    &ioc->scsi_lookup_lock);
1910}
1911
1912/**
1913 * _base_display_dell_branding - Disply branding string
1914 * @ioc: per adapter object
1915 *
1916 * Return nothing.
1917 */
1918static void
1919_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1920{
1921	char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
1922
1923	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
1924		return;
1925
1926	memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
1927	switch (ioc->pdev->subsystem_device) {
1928	case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
1929		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
1930		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1931		break;
1932	case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
1933		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
1934		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1935		break;
1936	case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
1937		strncpy(dell_branding,
1938		    MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
1939		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1940		break;
1941	case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
1942		strncpy(dell_branding,
1943		    MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
1944		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1945		break;
1946	case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
1947		strncpy(dell_branding,
1948		    MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
1949		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1950		break;
1951	case MPT2SAS_DELL_PERC_H200_SSDID:
1952		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
1953		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1954		break;
1955	case MPT2SAS_DELL_6GBPS_SAS_SSDID:
1956		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
1957		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1958		break;
1959	default:
1960		sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
1961		break;
1962	}
1963
1964	printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
1965	    " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
1966	    ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
1967	    ioc->pdev->subsystem_device);
1968}
1969
1970/**
1971 * _base_display_intel_branding - Display branding string
1972 * @ioc: per adapter object
1973 *
1974 * Return nothing.
1975 */
1976static void
1977_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1978{
1979	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1980		return;
1981
1982	switch (ioc->pdev->device) {
1983	case MPI2_MFGPAGE_DEVID_SAS2008:
1984		switch (ioc->pdev->subsystem_device) {
1985		case MPT2SAS_INTEL_RMS2LL080_SSDID:
1986			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1987			    MPT2SAS_INTEL_RMS2LL080_BRANDING);
1988			break;
1989		case MPT2SAS_INTEL_RMS2LL040_SSDID:
1990			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1991			    MPT2SAS_INTEL_RMS2LL040_BRANDING);
1992			break;
1993		case MPT2SAS_INTEL_SSD910_SSDID:
1994			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1995			    MPT2SAS_INTEL_SSD910_BRANDING);
1996			break;
1997		default:
1998			break;
1999		}
2000	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2001		switch (ioc->pdev->subsystem_device) {
2002		case MPT2SAS_INTEL_RS25GB008_SSDID:
2003			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2004			    MPT2SAS_INTEL_RS25GB008_BRANDING);
2005			break;
2006		case MPT2SAS_INTEL_RMS25JB080_SSDID:
2007			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2008			    MPT2SAS_INTEL_RMS25JB080_BRANDING);
2009			break;
2010		case MPT2SAS_INTEL_RMS25JB040_SSDID:
2011			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2012			    MPT2SAS_INTEL_RMS25JB040_BRANDING);
2013			break;
2014		case MPT2SAS_INTEL_RMS25KB080_SSDID:
2015			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2016			    MPT2SAS_INTEL_RMS25KB080_BRANDING);
2017			break;
2018		case MPT2SAS_INTEL_RMS25KB040_SSDID:
2019			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2020			    MPT2SAS_INTEL_RMS25KB040_BRANDING);
2021			break;
2022		case MPT2SAS_INTEL_RMS25LB040_SSDID:
2023			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2024			    MPT2SAS_INTEL_RMS25LB040_BRANDING);
2025			break;
2026		case MPT2SAS_INTEL_RMS25LB080_SSDID:
2027			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2028			    MPT2SAS_INTEL_RMS25LB080_BRANDING);
2029			break;
2030		default:
2031			break;
2032		}
2033	default:
2034		break;
2035	}
2036}
2037
2038/**
2039 * _base_display_hp_branding - Display branding string
2040 * @ioc: per adapter object
2041 *
2042 * Return nothing.
2043 */
2044static void
2045_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
2046{
2047	if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
2048		return;
2049
2050	switch (ioc->pdev->device) {
2051	case MPI2_MFGPAGE_DEVID_SAS2004:
2052		switch (ioc->pdev->subsystem_device) {
2053		case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2054			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2055			    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2056			break;
2057		default:
2058			break;
2059		}
2060	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2061		switch (ioc->pdev->subsystem_device) {
2062		case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2063			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2064			    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2065			break;
2066		case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2067			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2068			    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2069			break;
2070		case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2071			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2072			    MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2073			break;
2074		case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2075			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2076			    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2077			break;
2078		default:
2079			break;
2080		}
2081	default:
2082		break;
2083	}
2084}
2085
2086/**
2087 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2088 * @ioc: per adapter object
2089 *
2090 * Return nothing.
2091 */
2092static void
2093_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2094{
2095	int i = 0;
2096	char desc[16];
2097	u32 iounit_pg1_flags;
2098	u32 bios_version;
2099
2100	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2101	strncpy(desc, ioc->manu_pg0.ChipName, 16);
2102	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
2103	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2104	    ioc->name, desc,
2105	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2106	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2107	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2108	   ioc->facts.FWVersion.Word & 0x000000FF,
2109	   ioc->pdev->revision,
2110	   (bios_version & 0xFF000000) >> 24,
2111	   (bios_version & 0x00FF0000) >> 16,
2112	   (bios_version & 0x0000FF00) >> 8,
2113	    bios_version & 0x000000FF);
2114
2115	_base_display_dell_branding(ioc);
2116	_base_display_intel_branding(ioc);
2117	_base_display_hp_branding(ioc);
2118
2119	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
2120
2121	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2122		printk("Initiator");
2123		i++;
2124	}
2125
2126	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2127		printk("%sTarget", i ? "," : "");
2128		i++;
2129	}
2130
2131	i = 0;
2132	printk("), ");
2133	printk("Capabilities=(");
2134
2135	if (!ioc->hide_ir_msg) {
2136		if (ioc->facts.IOCCapabilities &
2137		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2138			printk("Raid");
2139			i++;
2140		}
2141	}
2142
2143	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2144		printk("%sTLR", i ? "," : "");
2145		i++;
2146	}
2147
2148	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2149		printk("%sMulticast", i ? "," : "");
2150		i++;
2151	}
2152
2153	if (ioc->facts.IOCCapabilities &
2154	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2155		printk("%sBIDI Target", i ? "," : "");
2156		i++;
2157	}
2158
2159	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2160		printk("%sEEDP", i ? "," : "");
2161		i++;
2162	}
2163
2164	if (ioc->facts.IOCCapabilities &
2165	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2166		printk("%sSnapshot Buffer", i ? "," : "");
2167		i++;
2168	}
2169
2170	if (ioc->facts.IOCCapabilities &
2171	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2172		printk("%sDiag Trace Buffer", i ? "," : "");
2173		i++;
2174	}
2175
2176	if (ioc->facts.IOCCapabilities &
2177	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2178		printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
2179		i++;
2180	}
2181
2182	if (ioc->facts.IOCCapabilities &
2183	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2184		printk("%sTask Set Full", i ? "," : "");
2185		i++;
2186	}
2187
2188	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2189	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2190		printk("%sNCQ", i ? "," : "");
2191		i++;
2192	}
2193
2194	printk(")\n");
2195}
2196
2197/**
2198 * mpt2sas_base_update_missing_delay - change the missing delay timers
2199 * @ioc: per adapter object
2200 * @device_missing_delay: amount of time till device is reported missing
2201 * @io_missing_delay: interval IO is returned when there is a missing device
2202 *
2203 * Return nothing.
2204 *
2205 * Passed on the command line, this function will modify the device missing
2206 * delay, as well as the io missing delay. This should be called at driver
2207 * load time.
2208 */
2209void
2210mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2211	u16 device_missing_delay, u8 io_missing_delay)
2212{
2213	u16 dmd, dmd_new, dmd_orignal;
2214	u8 io_missing_delay_original;
2215	u16 sz;
2216	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2217	Mpi2ConfigReply_t mpi_reply;
2218	u8 num_phys = 0;
2219	u16 ioc_status;
2220
2221	mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
2222	if (!num_phys)
2223		return;
2224
2225	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2226	    sizeof(Mpi2SasIOUnit1PhyData_t));
2227	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2228	if (!sas_iounit_pg1) {
2229		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2230		    ioc->name, __FILE__, __LINE__, __func__);
2231		goto out;
2232	}
2233	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2234	    sas_iounit_pg1, sz))) {
2235		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2236		    ioc->name, __FILE__, __LINE__, __func__);
2237		goto out;
2238	}
2239	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2240	    MPI2_IOCSTATUS_MASK;
2241	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2242		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2243		    ioc->name, __FILE__, __LINE__, __func__);
2244		goto out;
2245	}
2246
2247	/* device missing delay */
2248	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2249	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2250		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2251	else
2252		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2253	dmd_orignal = dmd;
2254	if (device_missing_delay > 0x7F) {
2255		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2256		    device_missing_delay;
2257		dmd = dmd / 16;
2258		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2259	} else
2260		dmd = device_missing_delay;
2261	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2262
2263	/* io missing delay */
2264	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2265	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2266
2267	if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2268	    sz)) {
2269		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2270			dmd_new = (dmd &
2271			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2272		else
2273			dmd_new =
2274		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2275		printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
2276		    "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
2277		printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
2278		    "new(%d)\n", ioc->name, io_missing_delay_original,
2279		    io_missing_delay);
2280		ioc->device_missing_delay = dmd_new;
2281		ioc->io_missing_delay = io_missing_delay;
2282	}
2283
2284out:
2285	kfree(sas_iounit_pg1);
2286}
2287
2288/**
2289 * _base_static_config_pages - static start of day config pages
2290 * @ioc: per adapter object
2291 *
2292 * Return nothing.
2293 */
2294static void
2295_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
2296{
2297	Mpi2ConfigReply_t mpi_reply;
2298	u32 iounit_pg1_flags;
2299
2300	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2301	if (ioc->ir_firmware)
2302		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2303		    &ioc->manu_pg10);
2304	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2305	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2306	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2307	mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2308	mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2309	_base_display_ioc_capabilities(ioc);
2310
2311	/*
2312	 * Enable task_set_full handling in iounit_pg1 when the
2313	 * facts capabilities indicate that its supported.
2314	 */
2315	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2316	if ((ioc->facts.IOCCapabilities &
2317	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2318		iounit_pg1_flags &=
2319		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2320	else
2321		iounit_pg1_flags |=
2322		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2323	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2324	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2325
2326}
2327
2328/**
2329 * _base_release_memory_pools - release memory
2330 * @ioc: per adapter object
2331 *
2332 * Free memory allocated from _base_allocate_memory_pools.
2333 *
2334 * Return nothing.
2335 */
2336static void
2337_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2338{
2339	int i;
2340
2341	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2342	    __func__));
2343
2344	if (ioc->request) {
2345		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2346		    ioc->request,  ioc->request_dma);
2347		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
2348		    ": free\n", ioc->name, ioc->request));
2349		ioc->request = NULL;
2350	}
2351
2352	if (ioc->sense) {
2353		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2354		if (ioc->sense_dma_pool)
2355			pci_pool_destroy(ioc->sense_dma_pool);
2356		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
2357		    ": free\n", ioc->name, ioc->sense));
2358		ioc->sense = NULL;
2359	}
2360
2361	if (ioc->reply) {
2362		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2363		if (ioc->reply_dma_pool)
2364			pci_pool_destroy(ioc->reply_dma_pool);
2365		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
2366		     ": free\n", ioc->name, ioc->reply));
2367		ioc->reply = NULL;
2368	}
2369
2370	if (ioc->reply_free) {
2371		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2372		    ioc->reply_free_dma);
2373		if (ioc->reply_free_dma_pool)
2374			pci_pool_destroy(ioc->reply_free_dma_pool);
2375		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
2376		    "(0x%p): free\n", ioc->name, ioc->reply_free));
2377		ioc->reply_free = NULL;
2378	}
2379
2380	if (ioc->reply_post_free) {
2381		pci_pool_free(ioc->reply_post_free_dma_pool,
2382		    ioc->reply_post_free, ioc->reply_post_free_dma);
2383		if (ioc->reply_post_free_dma_pool)
2384			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2385		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2386		    "reply_post_free_pool(0x%p): free\n", ioc->name,
2387		    ioc->reply_post_free));
2388		ioc->reply_post_free = NULL;
2389	}
2390
2391	if (ioc->config_page) {
2392		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2393		    "config_page(0x%p): free\n", ioc->name,
2394		    ioc->config_page));
2395		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2396		    ioc->config_page, ioc->config_page_dma);
2397	}
2398
2399	if (ioc->scsi_lookup) {
2400		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2401		ioc->scsi_lookup = NULL;
2402	}
2403	kfree(ioc->hpr_lookup);
2404	kfree(ioc->internal_lookup);
2405	if (ioc->chain_lookup) {
2406		for (i = 0; i < ioc->chain_depth; i++) {
2407			if (ioc->chain_lookup[i].chain_buffer)
2408				pci_pool_free(ioc->chain_dma_pool,
2409				    ioc->chain_lookup[i].chain_buffer,
2410				    ioc->chain_lookup[i].chain_buffer_dma);
2411		}
2412		if (ioc->chain_dma_pool)
2413			pci_pool_destroy(ioc->chain_dma_pool);
2414		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2415		ioc->chain_lookup = NULL;
2416	}
2417}
2418
2419
2420/**
2421 * _base_allocate_memory_pools - allocate start of day memory pools
2422 * @ioc: per adapter object
2423 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2424 *
2425 * Returns 0 success, anything else error
2426 */
2427static int
2428_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
2429{
2430	struct mpt2sas_facts *facts;
2431	u16 max_sge_elements;
2432	u16 chains_needed_per_io;
2433	u32 sz, total_sz, reply_post_free_sz;
2434	u32 retry_sz;
2435	u16 max_request_credit;
2436	int i;
2437
2438	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2439	    __func__));
2440
2441	retry_sz = 0;
2442	facts = &ioc->facts;
2443
2444	/* command line tunables  for max sgl entries */
2445	if (max_sgl_entries != -1) {
2446		ioc->shost->sg_tablesize = (max_sgl_entries <
2447		    MPT2SAS_SG_DEPTH) ? max_sgl_entries :
2448		    MPT2SAS_SG_DEPTH;
2449	} else {
2450		ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
2451	}
2452
2453	/* command line tunables  for max controller queue depth */
2454	if (max_queue_depth != -1 && max_queue_depth != 0) {
2455		max_request_credit = min_t(u16, max_queue_depth +
2456			ioc->hi_priority_depth + ioc->internal_depth,
2457			facts->RequestCredit);
2458		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2459			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
2460	} else
2461		max_request_credit = min_t(u16, facts->RequestCredit,
2462		    MAX_HBA_QUEUE_DEPTH);
2463
2464	ioc->hba_queue_depth = max_request_credit;
2465	ioc->hi_priority_depth = facts->HighPriorityCredit;
2466	ioc->internal_depth = ioc->hi_priority_depth + 5;
2467
2468	/* request frame size */
2469	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2470
2471	/* reply frame size */
2472	ioc->reply_sz = facts->ReplyFrameSize * 4;
2473
2474 retry_allocation:
2475	total_sz = 0;
2476	/* calculate number of sg elements left over in the 1st frame */
2477	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2478	    sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
2479	ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
2480
2481	/* now do the same for a chain buffer */
2482	max_sge_elements = ioc->request_sz - ioc->sge_size;
2483	ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
2484
2485	ioc->chain_offset_value_for_main_message =
2486	    ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
2487	     (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
2488
2489	/*
2490	 *  MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2491	 */
2492	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2493	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2494	    + 1;
2495	if (chains_needed_per_io > facts->MaxChainDepth) {
2496		chains_needed_per_io = facts->MaxChainDepth;
2497		ioc->shost->sg_tablesize = min_t(u16,
2498		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2499		* chains_needed_per_io), ioc->shost->sg_tablesize);
2500	}
2501	ioc->chains_needed_per_io = chains_needed_per_io;
2502
2503	/* reply free queue sizing - taking into account for 64 FW events */
2504	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2505
2506	/* align the reply post queue on the next 16 count boundary */
2507	if (!ioc->reply_free_queue_depth % 16)
2508		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
2509	else
2510		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
2511				32 - (ioc->reply_free_queue_depth % 16);
2512	if (ioc->reply_post_queue_depth >
2513	    facts->MaxReplyDescriptorPostQueueDepth) {
2514		ioc->reply_post_queue_depth = min_t(u16,
2515		    (facts->MaxReplyDescriptorPostQueueDepth -
2516		    (facts->MaxReplyDescriptorPostQueueDepth % 16)),
2517		    (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
2518		ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
2519		ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
2520	}
2521
2522
2523	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2524	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2525	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2526	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2527	    ioc->chains_needed_per_io));
2528
2529	ioc->scsiio_depth = ioc->hba_queue_depth -
2530	    ioc->hi_priority_depth - ioc->internal_depth;
2531
2532	/* set the scsi host can_queue depth
2533	 * with some internal commands that could be outstanding
2534	 */
2535	ioc->shost->can_queue = ioc->scsiio_depth;
2536	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2537	    "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2538
2539	/* contiguous pool for request and chains, 16 byte align, one extra "
2540	 * "frame for smid=0
2541	 */
2542	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2543	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2544
2545	/* hi-priority queue */
2546	sz += (ioc->hi_priority_depth * ioc->request_sz);
2547
2548	/* internal queue */
2549	sz += (ioc->internal_depth * ioc->request_sz);
2550
2551	ioc->request_dma_sz = sz;
2552	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2553	if (!ioc->request) {
2554		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2555		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2556		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2557		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2558		if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
2559			goto out;
2560		retry_sz += 64;
2561		ioc->hba_queue_depth = max_request_credit - retry_sz;
2562		goto retry_allocation;
2563	}
2564
2565	if (retry_sz)
2566		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2567		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2568		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2569		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2570
2571
2572	/* hi-priority queue */
2573	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2574	    ioc->request_sz);
2575	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2576	    ioc->request_sz);
2577
2578	/* internal queue */
2579	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2580	    ioc->request_sz);
2581	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2582	    ioc->request_sz);
2583
2584
2585	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2586	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2587	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2588	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2589	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2590	    ioc->name, (unsigned long long) ioc->request_dma));
2591	total_sz += sz;
2592
2593	sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2594	ioc->scsi_lookup_pages = get_order(sz);
2595	ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2596	    GFP_KERNEL, ioc->scsi_lookup_pages);
2597	if (!ioc->scsi_lookup) {
2598		printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
2599		    "sz(%d)\n", ioc->name, (int)sz);
2600		goto out;
2601	}
2602
2603	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
2604	    "depth(%d)\n", ioc->name, ioc->request,
2605	    ioc->scsiio_depth));
2606
2607	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2608	sz = ioc->chain_depth * sizeof(struct chain_tracker);
2609	ioc->chain_pages = get_order(sz);
2610
2611	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2612	    GFP_KERNEL, ioc->chain_pages);
2613	if (!ioc->chain_lookup) {
2614		printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
2615		    "sz(%d)\n", ioc->name, (int)sz);
2616		goto out;
2617	}
2618	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2619	    ioc->request_sz, 16, 0);
2620	if (!ioc->chain_dma_pool) {
2621		printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
2622		    "failed\n", ioc->name);
2623		goto out;
2624	}
2625	for (i = 0; i < ioc->chain_depth; i++) {
2626		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2627		    ioc->chain_dma_pool , GFP_KERNEL,
2628		    &ioc->chain_lookup[i].chain_buffer_dma);
2629		if (!ioc->chain_lookup[i].chain_buffer) {
2630			ioc->chain_depth = i;
2631			goto chain_done;
2632		}
2633		total_sz += ioc->request_sz;
2634	}
2635chain_done:
2636	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
2637	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2638	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2639	    ioc->request_sz))/1024));
2640
2641	/* initialize hi-priority queue smid's */
2642	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2643	    sizeof(struct request_tracker), GFP_KERNEL);
2644	if (!ioc->hpr_lookup) {
2645		printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2646		    ioc->name);
2647		goto out;
2648	}
2649	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2650	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2651	    "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2652	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2653
2654	/* initialize internal queue smid's */
2655	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2656	    sizeof(struct request_tracker), GFP_KERNEL);
2657	if (!ioc->internal_lookup) {
2658		printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2659		    ioc->name);
2660		goto out;
2661	}
2662	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2663	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2664	    "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2665	     ioc->internal_depth, ioc->internal_smid));
2666
2667	/* sense buffers, 4 byte align */
2668	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2669	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2670	    0);
2671	if (!ioc->sense_dma_pool) {
2672		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
2673		    ioc->name);
2674		goto out;
2675	}
2676	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2677	    &ioc->sense_dma);
2678	if (!ioc->sense) {
2679		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
2680		    ioc->name);
2681		goto out;
2682	}
2683	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2684	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2685	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2686	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2687	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
2688	    ioc->name, (unsigned long long)ioc->sense_dma));
2689	total_sz += sz;
2690
2691	/* reply pool, 4 byte align */
2692	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2693	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2694	    0);
2695	if (!ioc->reply_dma_pool) {
2696		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
2697		    ioc->name);
2698		goto out;
2699	}
2700	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2701	    &ioc->reply_dma);
2702	if (!ioc->reply) {
2703		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
2704		    ioc->name);
2705		goto out;
2706	}
2707	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2708	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2709	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2710	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2711	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2712	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
2713	    ioc->name, (unsigned long long)ioc->reply_dma));
2714	total_sz += sz;
2715
2716	/* reply free queue, 16 byte align */
2717	sz = ioc->reply_free_queue_depth * 4;
2718	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2719	    ioc->pdev, sz, 16, 0);
2720	if (!ioc->reply_free_dma_pool) {
2721		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
2722		    "failed\n", ioc->name);
2723		goto out;
2724	}
2725	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2726	    &ioc->reply_free_dma);
2727	if (!ioc->reply_free) {
2728		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
2729		    "failed\n", ioc->name);
2730		goto out;
2731	}
2732	memset(ioc->reply_free, 0, sz);
2733	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
2734	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2735	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2736	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
2737	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2738	total_sz += sz;
2739
2740	/* reply post queue, 16 byte align */
2741	reply_post_free_sz = ioc->reply_post_queue_depth *
2742	    sizeof(Mpi2DefaultReplyDescriptor_t);
2743	if (_base_is_controller_msix_enabled(ioc))
2744		sz = reply_post_free_sz * ioc->reply_queue_count;
2745	else
2746		sz = reply_post_free_sz;
2747	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2748	    ioc->pdev, sz, 16, 0);
2749	if (!ioc->reply_post_free_dma_pool) {
2750		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
2751		    "failed\n", ioc->name);
2752		goto out;
2753	}
2754	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2755	    GFP_KERNEL, &ioc->reply_post_free_dma);
2756	if (!ioc->reply_post_free) {
2757		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
2758		    "failed\n", ioc->name);
2759		goto out;
2760	}
2761	memset(ioc->reply_post_free, 0, sz);
2762	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
2763	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2764	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2765	    sz/1024));
2766	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
2767	    "(0x%llx)\n", ioc->name, (unsigned long long)
2768	    ioc->reply_post_free_dma));
2769	total_sz += sz;
2770
2771	ioc->config_page_sz = 512;
2772	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2773	    ioc->config_page_sz, &ioc->config_page_dma);
2774	if (!ioc->config_page) {
2775		printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
2776		    "failed\n", ioc->name);
2777		goto out;
2778	}
2779	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
2780	    "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
2781	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
2782	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
2783	total_sz += ioc->config_page_sz;
2784
2785	printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
2786	    ioc->name, total_sz/1024);
2787	printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
2788	    "Max Controller Queue Depth(%d)\n",
2789	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2790	printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
2791	    ioc->name, ioc->shost->sg_tablesize);
2792	return 0;
2793
2794 out:
2795	return -ENOMEM;
2796}
2797
2798
2799/**
2800 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
2801 * @ioc: Pointer to MPT_ADAPTER structure
2802 * @cooked: Request raw or cooked IOC state
2803 *
2804 * Returns all IOC Doorbell register bits if cooked==0, else just the
2805 * Doorbell bits in MPI_IOC_STATE_MASK.
2806 */
2807u32
2808mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
2809{
2810	u32 s, sc;
2811
2812	s = readl(&ioc->chip->Doorbell);
2813	sc = s & MPI2_IOC_STATE_MASK;
2814	return cooked ? sc : s;
2815}
2816
2817/**
2818 * _base_wait_on_iocstate - waiting on a particular ioc state
2819 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2820 * @timeout: timeout in second
2821 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2822 *
2823 * Returns 0 for success, non-zero for failure.
2824 */
2825static int
2826_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2827    int sleep_flag)
2828{
2829	u32 count, cntdn;
2830	u32 current_state;
2831
2832	count = 0;
2833	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2834	do {
2835		current_state = mpt2sas_base_get_iocstate(ioc, 1);
2836		if (current_state == ioc_state)
2837			return 0;
2838		if (count && current_state == MPI2_IOC_STATE_FAULT)
2839			break;
2840		if (sleep_flag == CAN_SLEEP)
2841			msleep(1);
2842		else
2843			udelay(500);
2844		count++;
2845	} while (--cntdn);
2846
2847	return current_state;
2848}
2849
2850/**
2851 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2852 * a write to the doorbell)
2853 * @ioc: per adapter object
2854 * @timeout: timeout in second
2855 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2856 *
2857 * Returns 0 for success, non-zero for failure.
2858 *
2859 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2860 */
2861static int
2862_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2863    int sleep_flag)
2864{
2865	u32 cntdn, count;
2866	u32 int_status;
2867
2868	count = 0;
2869	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2870	do {
2871		int_status = readl(&ioc->chip->HostInterruptStatus);
2872		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2873			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2874			    "successful count(%d), timeout(%d)\n", ioc->name,
2875			    __func__, count, timeout));
2876			return 0;
2877		}
2878		if (sleep_flag == CAN_SLEEP)
2879			msleep(1);
2880		else
2881			udelay(500);
2882		count++;
2883	} while (--cntdn);
2884
2885	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2886	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2887	return -EFAULT;
2888}
2889
2890/**
2891 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2892 * @ioc: per adapter object
2893 * @timeout: timeout in second
2894 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2895 *
2896 * Returns 0 for success, non-zero for failure.
2897 *
2898 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
2899 * doorbell.
2900 */
2901static int
2902_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2903    int sleep_flag)
2904{
2905	u32 cntdn, count;
2906	u32 int_status;
2907	u32 doorbell;
2908
2909	count = 0;
2910	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2911	do {
2912		int_status = readl(&ioc->chip->HostInterruptStatus);
2913		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2914			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2915			    "successful count(%d), timeout(%d)\n", ioc->name,
2916			    __func__, count, timeout));
2917			return 0;
2918		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2919			doorbell = readl(&ioc->chip->Doorbell);
2920			if ((doorbell & MPI2_IOC_STATE_MASK) ==
2921			    MPI2_IOC_STATE_FAULT) {
2922				mpt2sas_base_fault_info(ioc , doorbell);
2923				return -EFAULT;
2924			}
2925		} else if (int_status == 0xFFFFFFFF)
2926			goto out;
2927
2928		if (sleep_flag == CAN_SLEEP)
2929			msleep(1);
2930		else
2931			udelay(500);
2932		count++;
2933	} while (--cntdn);
2934
2935 out:
2936	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2937	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2938	return -EFAULT;
2939}
2940
2941/**
2942 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
2943 * @ioc: per adapter object
2944 * @timeout: timeout in second
2945 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2946 *
2947 * Returns 0 for success, non-zero for failure.
2948 *
2949 */
2950static int
2951_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2952    int sleep_flag)
2953{
2954	u32 cntdn, count;
2955	u32 doorbell_reg;
2956
2957	count = 0;
2958	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2959	do {
2960		doorbell_reg = readl(&ioc->chip->Doorbell);
2961		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2962			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2963			    "successful count(%d), timeout(%d)\n", ioc->name,
2964			    __func__, count, timeout));
2965			return 0;
2966		}
2967		if (sleep_flag == CAN_SLEEP)
2968			msleep(1);
2969		else
2970			udelay(500);
2971		count++;
2972	} while (--cntdn);
2973
2974	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2975	    "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
2976	return -EFAULT;
2977}
2978
2979/**
2980 * _base_send_ioc_reset - send doorbell reset
2981 * @ioc: per adapter object
2982 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
2983 * @timeout: timeout in second
2984 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2985 *
2986 * Returns 0 for success, non-zero for failure.
2987 */
2988static int
2989_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
2990    int sleep_flag)
2991{
2992	u32 ioc_state;
2993	int r = 0;
2994
2995	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
2996		printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
2997		    ioc->name, __func__);
2998		return -EFAULT;
2999	}
3000
3001	if (!(ioc->facts.IOCCapabilities &
3002	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3003		return -EFAULT;
3004
3005	printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
3006
3007	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3008	    &ioc->chip->Doorbell);
3009	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3010		r = -EFAULT;
3011		goto out;
3012	}
3013	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3014	    timeout, sleep_flag);
3015	if (ioc_state) {
3016		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3017		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3018		r = -EFAULT;
3019		goto out;
3020	}
3021 out:
3022	printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
3023	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3024	return r;
3025}
3026
3027/**
3028 * _base_handshake_req_reply_wait - send request thru doorbell interface
3029 * @ioc: per adapter object
3030 * @request_bytes: request length
3031 * @request: pointer having request payload
3032 * @reply_bytes: reply length
3033 * @reply: pointer to reply payload
3034 * @timeout: timeout in second
3035 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3036 *
3037 * Returns 0 for success, non-zero for failure.
3038 */
3039static int
3040_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
3041    u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3042{
3043	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3044	int i;
3045	u8 failed;
3046	u16 dummy;
3047	__le32 *mfp;
3048
3049	/* make sure doorbell is not in use */
3050	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3051		printk(MPT2SAS_ERR_FMT "doorbell is in use "
3052		    " (line=%d)\n", ioc->name, __LINE__);
3053		return -EFAULT;
3054	}
3055
3056	/* clear pending doorbell interrupts from previous state changes */
3057	if (readl(&ioc->chip->HostInterruptStatus) &
3058	    MPI2_HIS_IOC2SYS_DB_STATUS)
3059		writel(0, &ioc->chip->HostInterruptStatus);
3060
3061	/* send message to ioc */
3062	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3063	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3064	    &ioc->chip->Doorbell);
3065
3066	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3067		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3068		   "int failed (line=%d)\n", ioc->name, __LINE__);
3069		return -EFAULT;
3070	}
3071	writel(0, &ioc->chip->HostInterruptStatus);
3072
3073	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3074		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3075		    "ack failed (line=%d)\n", ioc->name, __LINE__);
3076		return -EFAULT;
3077	}
3078
3079	/* send message 32-bits at a time */
3080	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3081		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3082		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3083			failed = 1;
3084	}
3085
3086	if (failed) {
3087		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3088		    "sending request failed (line=%d)\n", ioc->name, __LINE__);
3089		return -EFAULT;
3090	}
3091
3092	/* now wait for the reply */
3093	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3094		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3095		   "int failed (line=%d)\n", ioc->name, __LINE__);
3096		return -EFAULT;
3097	}
3098
3099	/* read the first two 16-bits, it gives the total length of the reply */
3100	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3101	    & MPI2_DOORBELL_DATA_MASK);
3102	writel(0, &ioc->chip->HostInterruptStatus);
3103	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3104		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3105		   "int failed (line=%d)\n", ioc->name, __LINE__);
3106		return -EFAULT;
3107	}
3108	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3109	    & MPI2_DOORBELL_DATA_MASK);
3110	writel(0, &ioc->chip->HostInterruptStatus);
3111
3112	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3113		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3114			printk(MPT2SAS_ERR_FMT "doorbell "
3115			    "handshake int failed (line=%d)\n", ioc->name,
3116			    __LINE__);
3117			return -EFAULT;
3118		}
3119		if (i >=  reply_bytes/2) /* overflow case */
3120			dummy = readl(&ioc->chip->Doorbell);
3121		else
3122			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3123			    & MPI2_DOORBELL_DATA_MASK);
3124		writel(0, &ioc->chip->HostInterruptStatus);
3125	}
3126
3127	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3128	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3129		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
3130		    " (line=%d)\n", ioc->name, __LINE__));
3131	}
3132	writel(0, &ioc->chip->HostInterruptStatus);
3133
3134	if (ioc->logging_level & MPT_DEBUG_INIT) {
3135		mfp = (__le32 *)reply;
3136		printk(KERN_INFO "\toffset:data\n");
3137		for (i = 0; i < reply_bytes/4; i++)
3138			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3139			    le32_to_cpu(mfp[i]));
3140	}
3141	return 0;
3142}
3143
3144/**
3145 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
3146 * @ioc: per adapter object
3147 * @mpi_reply: the reply payload from FW
3148 * @mpi_request: the request payload sent to FW
3149 *
3150 * The SAS IO Unit Control Request message allows the host to perform low-level
3151 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3152 * to obtain the IOC assigned device handles for a device if it has other
3153 * identifying information about the device, in addition allows the host to
3154 * remove IOC resources associated with the device.
3155 *
3156 * Returns 0 for success, non-zero for failure.
3157 */
3158int
3159mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
3160    Mpi2SasIoUnitControlReply_t *mpi_reply,
3161    Mpi2SasIoUnitControlRequest_t *mpi_request)
3162{
3163	u16 smid;
3164	u32 ioc_state;
3165	unsigned long timeleft;
3166	u8 issue_reset;
3167	int rc;
3168	void *request;
3169	u16 wait_state_count;
3170
3171	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3172	    __func__));
3173
3174	mutex_lock(&ioc->base_cmds.mutex);
3175
3176	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3177		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3178		    ioc->name, __func__);
3179		rc = -EAGAIN;
3180		goto out;
3181	}
3182
3183	wait_state_count = 0;
3184	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3185	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3186		if (wait_state_count++ == 10) {
3187			printk(MPT2SAS_ERR_FMT
3188			    "%s: failed due to ioc not operational\n",
3189			    ioc->name, __func__);
3190			rc = -EFAULT;
3191			goto out;
3192		}
3193		ssleep(1);
3194		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3195		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3196		    "operational state(count=%d)\n", ioc->name,
3197		    __func__, wait_state_count);
3198	}
3199
3200	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3201	if (!smid) {
3202		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3203		    ioc->name, __func__);
3204		rc = -EAGAIN;
3205		goto out;
3206	}
3207
3208	rc = 0;
3209	ioc->base_cmds.status = MPT2_CMD_PENDING;
3210	request = mpt2sas_base_get_msg_frame(ioc, smid);
3211	ioc->base_cmds.smid = smid;
3212	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3213	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3214	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3215		ioc->ioc_link_reset_in_progress = 1;
3216	init_completion(&ioc->base_cmds.done);
3217	mpt2sas_base_put_smid_default(ioc, smid);
3218	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3219	    msecs_to_jiffies(10000));
3220	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3221	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3222	    ioc->ioc_link_reset_in_progress)
3223		ioc->ioc_link_reset_in_progress = 0;
3224	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3225		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3226		    ioc->name, __func__);
3227		_debug_dump_mf(mpi_request,
3228		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3229		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3230			issue_reset = 1;
3231		goto issue_host_reset;
3232	}
3233	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3234		memcpy(mpi_reply, ioc->base_cmds.reply,
3235		    sizeof(Mpi2SasIoUnitControlReply_t));
3236	else
3237		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3238	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3239	goto out;
3240
3241 issue_host_reset:
3242	if (issue_reset)
3243		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3244		    FORCE_BIG_HAMMER);
3245	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3246	rc = -EFAULT;
3247 out:
3248	mutex_unlock(&ioc->base_cmds.mutex);
3249	return rc;
3250}
3251
3252
3253/**
3254 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
3255 * @ioc: per adapter object
3256 * @mpi_reply: the reply payload from FW
3257 * @mpi_request: the request payload sent to FW
3258 *
3259 * The SCSI Enclosure Processor request message causes the IOC to
3260 * communicate with SES devices to control LED status signals.
3261 *
3262 * Returns 0 for success, non-zero for failure.
3263 */
3264int
3265mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
3266    Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3267{
3268	u16 smid;
3269	u32 ioc_state;
3270	unsigned long timeleft;
3271	u8 issue_reset;
3272	int rc;
3273	void *request;
3274	u16 wait_state_count;
3275
3276	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3277	    __func__));
3278
3279	mutex_lock(&ioc->base_cmds.mutex);
3280
3281	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3282		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3283		    ioc->name, __func__);
3284		rc = -EAGAIN;
3285		goto out;
3286	}
3287
3288	wait_state_count = 0;
3289	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3290	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3291		if (wait_state_count++ == 10) {
3292			printk(MPT2SAS_ERR_FMT
3293			    "%s: failed due to ioc not operational\n",
3294			    ioc->name, __func__);
3295			rc = -EFAULT;
3296			goto out;
3297		}
3298		ssleep(1);
3299		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3300		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3301		    "operational state(count=%d)\n", ioc->name,
3302		    __func__, wait_state_count);
3303	}
3304
3305	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3306	if (!smid) {
3307		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3308		    ioc->name, __func__);
3309		rc = -EAGAIN;
3310		goto out;
3311	}
3312
3313	rc = 0;
3314	ioc->base_cmds.status = MPT2_CMD_PENDING;
3315	request = mpt2sas_base_get_msg_frame(ioc, smid);
3316	ioc->base_cmds.smid = smid;
3317	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3318	init_completion(&ioc->base_cmds.done);
3319	mpt2sas_base_put_smid_default(ioc, smid);
3320	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3321	    msecs_to_jiffies(10000));
3322	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3323		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3324		    ioc->name, __func__);
3325		_debug_dump_mf(mpi_request,
3326		    sizeof(Mpi2SepRequest_t)/4);
3327		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3328			issue_reset = 1;
3329		goto issue_host_reset;
3330	}
3331	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3332		memcpy(mpi_reply, ioc->base_cmds.reply,
3333		    sizeof(Mpi2SepReply_t));
3334	else
3335		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3336	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3337	goto out;
3338
3339 issue_host_reset:
3340	if (issue_reset)
3341		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3342		    FORCE_BIG_HAMMER);
3343	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3344	rc = -EFAULT;
3345 out:
3346	mutex_unlock(&ioc->base_cmds.mutex);
3347	return rc;
3348}
3349
3350/**
3351 * _base_get_port_facts - obtain port facts reply and save in ioc
3352 * @ioc: per adapter object
3353 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3354 *
3355 * Returns 0 for success, non-zero for failure.
3356 */
3357static int
3358_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3359{
3360	Mpi2PortFactsRequest_t mpi_request;
3361	Mpi2PortFactsReply_t mpi_reply;
3362	struct mpt2sas_port_facts *pfacts;
3363	int mpi_reply_sz, mpi_request_sz, r;
3364
3365	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3366	    __func__));
3367
3368	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3369	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3370	memset(&mpi_request, 0, mpi_request_sz);
3371	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3372	mpi_request.PortNumber = port;
3373	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3374	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3375
3376	if (r != 0) {
3377		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3378		    ioc->name, __func__, r);
3379		return r;
3380	}
3381
3382	pfacts = &ioc->pfacts[port];
3383	memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
3384	pfacts->PortNumber = mpi_reply.PortNumber;
3385	pfacts->VP_ID = mpi_reply.VP_ID;
3386	pfacts->VF_ID = mpi_reply.VF_ID;
3387	pfacts->MaxPostedCmdBuffers =
3388	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3389
3390	return 0;
3391}
3392
3393/**
3394 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3395 * @ioc: per adapter object
3396 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3397 *
3398 * Returns 0 for success, non-zero for failure.
3399 */
3400static int
3401_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3402{
3403	Mpi2IOCFactsRequest_t mpi_request;
3404	Mpi2IOCFactsReply_t mpi_reply;
3405	struct mpt2sas_facts *facts;
3406	int mpi_reply_sz, mpi_request_sz, r;
3407
3408	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3409	    __func__));
3410
3411	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3412	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3413	memset(&mpi_request, 0, mpi_request_sz);
3414	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3415	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3416	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3417
3418	if (r != 0) {
3419		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3420		    ioc->name, __func__, r);
3421		return r;
3422	}
3423
3424	facts = &ioc->facts;
3425	memset(facts, 0, sizeof(struct mpt2sas_facts));
3426	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3427	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3428	facts->VP_ID = mpi_reply.VP_ID;
3429	facts->VF_ID = mpi_reply.VF_ID;
3430	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3431	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3432	facts->WhoInit = mpi_reply.WhoInit;
3433	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3434	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3435	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3436	facts->MaxReplyDescriptorPostQueueDepth =
3437	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3438	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3439	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3440	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3441		ioc->ir_firmware = 1;
3442	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3443	facts->IOCRequestFrameSize =
3444	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3445	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3446	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3447	ioc->shost->max_id = -1;
3448	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3449	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3450	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3451	facts->HighPriorityCredit =
3452	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3453	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3454	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3455
3456	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
3457	    "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
3458	    facts->MaxChainDepth));
3459	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
3460	    "reply frame size(%d)\n", ioc->name,
3461	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3462	return 0;
3463}
3464
3465/**
3466 * _base_send_ioc_init - send ioc_init to firmware
3467 * @ioc: per adapter object
3468 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3469 *
3470 * Returns 0 for success, non-zero for failure.
3471 */
3472static int
3473_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3474{
3475	Mpi2IOCInitRequest_t mpi_request;
3476	Mpi2IOCInitReply_t mpi_reply;
3477	int r;
3478	struct timeval current_time;
3479	u16 ioc_status;
3480
3481	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3482	    __func__));
3483
3484	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3485	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3486	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3487	mpi_request.VF_ID = 0; /* TODO */
3488	mpi_request.VP_ID = 0;
3489	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3490	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3491
3492	if (_base_is_controller_msix_enabled(ioc))
3493		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3494	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3495	mpi_request.ReplyDescriptorPostQueueDepth =
3496	    cpu_to_le16(ioc->reply_post_queue_depth);
3497	mpi_request.ReplyFreeQueueDepth =
3498	    cpu_to_le16(ioc->reply_free_queue_depth);
3499
3500	mpi_request.SenseBufferAddressHigh =
3501	    cpu_to_le32((u64)ioc->sense_dma >> 32);
3502	mpi_request.SystemReplyAddressHigh =
3503	    cpu_to_le32((u64)ioc->reply_dma >> 32);
3504	mpi_request.SystemRequestFrameBaseAddress =
3505	    cpu_to_le64((u64)ioc->request_dma);
3506	mpi_request.ReplyFreeQueueAddress =
3507	    cpu_to_le64((u64)ioc->reply_free_dma);
3508	mpi_request.ReplyDescriptorPostQueueAddress =
3509	    cpu_to_le64((u64)ioc->reply_post_free_dma);
3510
3511
3512	/* This time stamp specifies number of milliseconds
3513	 * since epoch ~ midnight January 1, 1970.
3514	 */
3515	do_gettimeofday(&current_time);
3516	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3517	    (current_time.tv_usec / 1000));
3518
3519	if (ioc->logging_level & MPT_DEBUG_INIT) {
3520		__le32 *mfp;
3521		int i;
3522
3523		mfp = (__le32 *)&mpi_request;
3524		printk(KERN_INFO "\toffset:data\n");
3525		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3526			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3527			    le32_to_cpu(mfp[i]));
3528	}
3529
3530	r = _base_handshake_req_reply_wait(ioc,
3531	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3532	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3533	    sleep_flag);
3534
3535	if (r != 0) {
3536		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3537		    ioc->name, __func__, r);
3538		return r;
3539	}
3540
3541	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3542	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3543	    mpi_reply.IOCLogInfo) {
3544		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
3545		r = -EIO;
3546	}
3547
3548	return 0;
3549}
3550
3551/**
3552 * mpt2sas_port_enable_done - command completion routine for port enable
3553 * @ioc: per adapter object
3554 * @smid: system request message index
3555 * @msix_index: MSIX table index supplied by the OS
3556 * @reply: reply message frame(lower 32bit addr)
3557 *
3558 * Return 1 meaning mf should be freed from _base_interrupt
3559 *        0 means the mf is freed from this function.
3560 */
3561u8
3562mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3563	u32 reply)
3564{
3565	MPI2DefaultReply_t *mpi_reply;
3566	u16 ioc_status;
3567
3568	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3569	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
3570		return 1;
3571
3572	if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
3573		return 1;
3574
3575	ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
3576	if (mpi_reply) {
3577		ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
3578		memcpy(ioc->port_enable_cmds.reply, mpi_reply,
3579		    mpi_reply->MsgLength*4);
3580	}
3581	ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
3582
3583	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3584
3585	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3586		ioc->port_enable_failed = 1;
3587
3588	if (ioc->is_driver_loading) {
3589		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3590			mpt2sas_port_enable_complete(ioc);
3591			return 1;
3592		} else {
3593			ioc->start_scan_failed = ioc_status;
3594			ioc->start_scan = 0;
3595			return 1;
3596		}
3597	}
3598	complete(&ioc->port_enable_cmds.done);
3599	return 1;
3600}
3601
3602
3603/**
3604 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3605 * @ioc: per adapter object
3606 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3607 *
3608 * Returns 0 for success, non-zero for failure.
3609 */
3610static int
3611_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3612{
3613	Mpi2PortEnableRequest_t *mpi_request;
3614	Mpi2PortEnableReply_t *mpi_reply;
3615	unsigned long timeleft;
3616	int r = 0;
3617	u16 smid;
3618	u16 ioc_status;
3619
3620	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3621
3622	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3623		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3624		    ioc->name, __func__);
3625		return -EAGAIN;
3626	}
3627
3628	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3629	if (!smid) {
3630		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3631		    ioc->name, __func__);
3632		return -EAGAIN;
3633	}
3634
3635	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3636	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3637	ioc->port_enable_cmds.smid = smid;
3638	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3639	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3640
3641	init_completion(&ioc->port_enable_cmds.done);
3642	mpt2sas_base_put_smid_default(ioc, smid);
3643	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3644	    300*HZ);
3645	if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
3646		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3647		    ioc->name, __func__);
3648		_debug_dump_mf(mpi_request,
3649		    sizeof(Mpi2PortEnableRequest_t)/4);
3650		if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
3651			r = -EFAULT;
3652		else
3653			r = -ETIME;
3654		goto out;
3655	}
3656	mpi_reply = ioc->port_enable_cmds.reply;
3657
3658	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3659	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3660		printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
3661		    ioc->name, __func__, ioc_status);
3662		r = -EFAULT;
3663		goto out;
3664	}
3665 out:
3666	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
3667	printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3668	    "SUCCESS" : "FAILED"));
3669	return r;
3670}
3671
3672/**
3673 * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
3674 * @ioc: per adapter object
3675 *
3676 * Returns 0 for success, non-zero for failure.
3677 */
3678int
3679mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
3680{
3681	Mpi2PortEnableRequest_t *mpi_request;
3682	u16 smid;
3683
3684	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3685
3686	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3687		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3688		    ioc->name, __func__);
3689		return -EAGAIN;
3690	}
3691
3692	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3693	if (!smid) {
3694		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3695		    ioc->name, __func__);
3696		return -EAGAIN;
3697	}
3698
3699	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3700	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3701	ioc->port_enable_cmds.smid = smid;
3702	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3703	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3704
3705	mpt2sas_base_put_smid_default(ioc, smid);
3706	return 0;
3707}
3708
3709/**
3710 * _base_determine_wait_on_discovery - desposition
3711 * @ioc: per adapter object
3712 *
3713 * Decide whether to wait on discovery to complete. Used to either
3714 * locate boot device, or report volumes ahead of physical devices.
3715 *
3716 * Returns 1 for wait, 0 for don't wait
3717 */
3718static int
3719_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
3720{
3721	/* We wait for discovery to complete if IR firmware is loaded.
3722	 * The sas topology events arrive before PD events, so we need time to
3723	 * turn on the bit in ioc->pd_handles to indicate PD
3724	 * Also, it maybe required to report Volumes ahead of physical
3725	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3726	 */
3727	if (ioc->ir_firmware)
3728		return 1;
3729
3730	/* if no Bios, then we don't need to wait */
3731	if (!ioc->bios_pg3.BiosVersion)
3732		return 0;
3733
3734	/* Bios is present, then we drop down here.
3735	 *
3736	 * If there any entries in the Bios Page 2, then we wait
3737	 * for discovery to complete.
3738	 */
3739
3740	/* Current Boot Device */
3741	if ((ioc->bios_pg2.CurrentBootDeviceForm &
3742	    MPI2_BIOSPAGE2_FORM_MASK) ==
3743	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3744	/* Request Boot Device */
3745	   (ioc->bios_pg2.ReqBootDeviceForm &
3746	    MPI2_BIOSPAGE2_FORM_MASK) ==
3747	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3748	/* Alternate Request Boot Device */
3749	   (ioc->bios_pg2.ReqAltBootDeviceForm &
3750	    MPI2_BIOSPAGE2_FORM_MASK) ==
3751	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3752		return 0;
3753
3754	return 1;
3755}
3756
3757
3758/**
3759 * _base_unmask_events - turn on notification for this event
3760 * @ioc: per adapter object
3761 * @event: firmware event
3762 *
3763 * The mask is stored in ioc->event_masks.
3764 */
3765static void
3766_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
3767{
3768	u32 desired_event;
3769
3770	if (event >= 128)
3771		return;
3772
3773	desired_event = (1 << (event % 32));
3774
3775	if (event < 32)
3776		ioc->event_masks[0] &= ~desired_event;
3777	else if (event < 64)
3778		ioc->event_masks[1] &= ~desired_event;
3779	else if (event < 96)
3780		ioc->event_masks[2] &= ~desired_event;
3781	else if (event < 128)
3782		ioc->event_masks[3] &= ~desired_event;
3783}
3784
3785/**
3786 * _base_event_notification - send event notification
3787 * @ioc: per adapter object
3788 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3789 *
3790 * Returns 0 for success, non-zero for failure.
3791 */
3792static int
3793_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3794{
3795	Mpi2EventNotificationRequest_t *mpi_request;
3796	unsigned long timeleft;
3797	u16 smid;
3798	int r = 0;
3799	int i;
3800
3801	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3802	    __func__));
3803
3804	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3805		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3806		    ioc->name, __func__);
3807		return -EAGAIN;
3808	}
3809
3810	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3811	if (!smid) {
3812		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3813		    ioc->name, __func__);
3814		return -EAGAIN;
3815	}
3816	ioc->base_cmds.status = MPT2_CMD_PENDING;
3817	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3818	ioc->base_cmds.smid = smid;
3819	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3820	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3821	mpi_request->VF_ID = 0; /* TODO */
3822	mpi_request->VP_ID = 0;
3823	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3824		mpi_request->EventMasks[i] =
3825		    cpu_to_le32(ioc->event_masks[i]);
3826	init_completion(&ioc->base_cmds.done);
3827	mpt2sas_base_put_smid_default(ioc, smid);
3828	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3829	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3830		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3831		    ioc->name, __func__);
3832		_debug_dump_mf(mpi_request,
3833		    sizeof(Mpi2EventNotificationRequest_t)/4);
3834		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3835			r = -EFAULT;
3836		else
3837			r = -ETIME;
3838	} else
3839		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3840		    ioc->name, __func__));
3841	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3842	return r;
3843}
3844
3845/**
3846 * mpt2sas_base_validate_event_type - validating event types
3847 * @ioc: per adapter object
3848 * @event: firmware event
3849 *
3850 * This will turn on firmware event notification when application
3851 * ask for that event. We don't mask events that are already enabled.
3852 */
3853void
3854mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
3855{
3856	int i, j;
3857	u32 event_mask, desired_event;
3858	u8 send_update_to_fw;
3859
3860	for (i = 0, send_update_to_fw = 0; i <
3861	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
3862		event_mask = ~event_type[i];
3863		desired_event = 1;
3864		for (j = 0; j < 32; j++) {
3865			if (!(event_mask & desired_event) &&
3866			    (ioc->event_masks[i] & desired_event)) {
3867				ioc->event_masks[i] &= ~desired_event;
3868				send_update_to_fw = 1;
3869			}
3870			desired_event = (desired_event << 1);
3871		}
3872	}
3873
3874	if (!send_update_to_fw)
3875		return;
3876
3877	mutex_lock(&ioc->base_cmds.mutex);
3878	_base_event_notification(ioc, CAN_SLEEP);
3879	mutex_unlock(&ioc->base_cmds.mutex);
3880}
3881
3882/**
3883 * _base_diag_reset - the "big hammer" start of day reset
3884 * @ioc: per adapter object
3885 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3886 *
3887 * Returns 0 for success, non-zero for failure.
3888 */
3889static int
3890_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3891{
3892	u32 host_diagnostic;
3893	u32 ioc_state;
3894	u32 count;
3895	u32 hcb_size;
3896
3897	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3898	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3899	    ioc->name));
3900
3901	count = 0;
3902	do {
3903		/* Write magic sequence to WriteSequence register
3904		 * Loop until in diagnostic mode
3905		 */
3906		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
3907		    "sequence\n", ioc->name));
3908		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3909		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
3910		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
3911		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
3912		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
3913		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
3914		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
3915
3916		/* wait 100 msec */
3917		if (sleep_flag == CAN_SLEEP)
3918			msleep(100);
3919		else
3920			mdelay(100);
3921
3922		if (count++ > 20)
3923			goto out;
3924
3925		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3926		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
3927		    "sequence: count(%d), host_diagnostic(0x%08x)\n",
3928		    ioc->name, count, host_diagnostic));
3929
3930	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
3931
3932	hcb_size = readl(&ioc->chip->HCBSize);
3933
3934	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
3935	    ioc->name));
3936	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
3937	     &ioc->chip->HostDiagnostic);
3938
3939	/* don't access any registers for 50 milliseconds */
3940	msleep(50);
3941
3942	/* 300 second max wait */
3943	for (count = 0; count < 3000000 ; count++) {
3944
3945		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3946
3947		if (host_diagnostic == 0xFFFFFFFF)
3948			goto out;
3949		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
3950			break;
3951
3952		/* wait 100 msec */
3953		if (sleep_flag == CAN_SLEEP)
3954			msleep(1);
3955		else
3956			mdelay(1);
3957	}
3958
3959	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
3960
3961		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
3962		    "assuming the HCB Address points to good F/W\n",
3963		    ioc->name));
3964		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
3965		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
3966		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
3967
3968		drsprintk(ioc, printk(MPT2SAS_INFO_FMT
3969		    "re-enable the HCDW\n", ioc->name));
3970		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
3971		    &ioc->chip->HCBSize);
3972	}
3973
3974	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
3975	    ioc->name));
3976	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
3977	    &ioc->chip->HostDiagnostic);
3978
3979	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
3980	    "diagnostic register\n", ioc->name));
3981	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3982
3983	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
3984	    "READY state\n", ioc->name));
3985	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
3986	    sleep_flag);
3987	if (ioc_state) {
3988		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3989		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3990		goto out;
3991	}
3992
3993	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
3994	return 0;
3995
3996 out:
3997	printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
3998	return -EFAULT;
3999}
4000
4001/**
4002 * _base_make_ioc_ready - put controller in READY state
4003 * @ioc: per adapter object
4004 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4005 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4006 *
4007 * Returns 0 for success, non-zero for failure.
4008 */
4009static int
4010_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4011    enum reset_type type)
4012{
4013	u32 ioc_state;
4014	int rc;
4015
4016	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4017	    __func__));
4018
4019	if (ioc->pci_error_recovery)
4020		return 0;
4021
4022	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4023	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
4024	    ioc->name, __func__, ioc_state));
4025
4026	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4027		return 0;
4028
4029	if (ioc_state & MPI2_DOORBELL_USED) {
4030		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
4031		    "active!\n", ioc->name));
4032		goto issue_diag_reset;
4033	}
4034
4035	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4036		mpt2sas_base_fault_info(ioc, ioc_state &
4037		    MPI2_DOORBELL_DATA_MASK);
4038		goto issue_diag_reset;
4039	}
4040
4041	if (type == FORCE_BIG_HAMMER)
4042		goto issue_diag_reset;
4043
4044	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4045		if (!(_base_send_ioc_reset(ioc,
4046		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4047			ioc->ioc_reset_count++;
4048			return 0;
4049	}
4050
4051 issue_diag_reset:
4052	rc = _base_diag_reset(ioc, CAN_SLEEP);
4053	ioc->ioc_reset_count++;
4054	return rc;
4055}
4056
4057/**
4058 * _base_make_ioc_operational - put controller in OPERATIONAL state
4059 * @ioc: per adapter object
4060 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4061 *
4062 * Returns 0 for success, non-zero for failure.
4063 */
4064static int
4065_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4066{
4067	int r, i;
4068	unsigned long	flags;
4069	u32 reply_address;
4070	u16 smid;
4071	struct _tr_list *delayed_tr, *delayed_tr_next;
4072	u8 hide_flag;
4073	struct adapter_reply_queue *reply_q;
4074	long reply_post_free;
4075	u32 reply_post_free_sz;
4076
4077	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4078	    __func__));
4079
4080	/* clean the delayed target reset list */
4081	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4082	    &ioc->delayed_tr_list, list) {
4083		list_del(&delayed_tr->list);
4084		kfree(delayed_tr);
4085	}
4086
4087	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4088	    &ioc->delayed_tr_volume_list, list) {
4089		list_del(&delayed_tr->list);
4090		kfree(delayed_tr);
4091	}
4092
4093	/* initialize the scsi lookup free list */
4094	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4095	INIT_LIST_HEAD(&ioc->free_list);
4096	smid = 1;
4097	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4098		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4099		ioc->scsi_lookup[i].cb_idx = 0xFF;
4100		ioc->scsi_lookup[i].smid = smid;
4101		ioc->scsi_lookup[i].scmd = NULL;
4102		ioc->scsi_lookup[i].direct_io = 0;
4103		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4104		    &ioc->free_list);
4105	}
4106
4107	/* hi-priority queue */
4108	INIT_LIST_HEAD(&ioc->hpr_free_list);
4109	smid = ioc->hi_priority_smid;
4110	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4111		ioc->hpr_lookup[i].cb_idx = 0xFF;
4112		ioc->hpr_lookup[i].smid = smid;
4113		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4114		    &ioc->hpr_free_list);
4115	}
4116
4117	/* internal queue */
4118	INIT_LIST_HEAD(&ioc->internal_free_list);
4119	smid = ioc->internal_smid;
4120	for (i = 0; i < ioc->internal_depth; i++, smid++) {
4121		ioc->internal_lookup[i].cb_idx = 0xFF;
4122		ioc->internal_lookup[i].smid = smid;
4123		list_add_tail(&ioc->internal_lookup[i].tracker_list,
4124		    &ioc->internal_free_list);
4125	}
4126
4127	/* chain pool */
4128	INIT_LIST_HEAD(&ioc->free_chain_list);
4129	for (i = 0; i < ioc->chain_depth; i++)
4130		list_add_tail(&ioc->chain_lookup[i].tracker_list,
4131		    &ioc->free_chain_list);
4132
4133	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4134
4135	/* initialize Reply Free Queue */
4136	for (i = 0, reply_address = (u32)ioc->reply_dma ;
4137	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
4138	    ioc->reply_sz)
4139		ioc->reply_free[i] = cpu_to_le32(reply_address);
4140
4141	/* initialize reply queues */
4142	if (ioc->is_driver_loading)
4143		_base_assign_reply_queues(ioc);
4144
4145	/* initialize Reply Post Free Queue */
4146	reply_post_free = (long)ioc->reply_post_free;
4147	reply_post_free_sz = ioc->reply_post_queue_depth *
4148	    sizeof(Mpi2DefaultReplyDescriptor_t);
4149	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4150		reply_q->reply_post_host_index = 0;
4151		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4152		    reply_post_free;
4153		for (i = 0; i < ioc->reply_post_queue_depth; i++)
4154			reply_q->reply_post_free[i].Words =
4155							cpu_to_le64(ULLONG_MAX);
4156		if (!_base_is_controller_msix_enabled(ioc))
4157			goto skip_init_reply_post_free_queue;
4158		reply_post_free += reply_post_free_sz;
4159	}
4160 skip_init_reply_post_free_queue:
4161
4162	r = _base_send_ioc_init(ioc, sleep_flag);
4163	if (r)
4164		return r;
4165
4166	/* initialize reply free host index */
4167	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4168	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4169
4170	/* initialize reply post host index */
4171	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4172		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4173		    &ioc->chip->ReplyPostHostIndex);
4174		if (!_base_is_controller_msix_enabled(ioc))
4175			goto skip_init_reply_post_host_index;
4176	}
4177
4178 skip_init_reply_post_host_index:
4179
4180	_base_unmask_interrupts(ioc);
4181
4182	r = _base_event_notification(ioc, sleep_flag);
4183	if (r)
4184		return r;
4185
4186	if (sleep_flag == CAN_SLEEP)
4187		_base_static_config_pages(ioc);
4188
4189
4190	if (ioc->is_driver_loading) {
4191		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
4192		    == 0x80) {
4193			hide_flag = (u8) (
4194			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
4195			    MFG_PAGE10_HIDE_SSDS_MASK);
4196			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4197				ioc->mfg_pg10_hide_flag = hide_flag;
4198		}
4199		ioc->wait_for_discovery_to_complete =
4200		    _base_determine_wait_on_discovery(ioc);
4201		return r; /* scan_start and scan_finished support */
4202	}
4203	r = _base_send_port_enable(ioc, sleep_flag);
4204	if (r)
4205		return r;
4206
4207	return r;
4208}
4209
4210/**
4211 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
4212 * @ioc: per adapter object
4213 *
4214 * Return nothing.
4215 */
4216void
4217mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4218{
4219	struct pci_dev *pdev = ioc->pdev;
4220
4221	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4222	    __func__));
4223
4224	_base_mask_interrupts(ioc);
4225	ioc->shost_recovery = 1;
4226	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4227	ioc->shost_recovery = 0;
4228	_base_free_irq(ioc);
4229	_base_disable_msix(ioc);
4230	if (ioc->chip_phys)
4231		iounmap(ioc->chip);
4232	ioc->chip_phys = 0;
4233	pci_release_selected_regions(ioc->pdev, ioc->bars);
4234	pci_disable_pcie_error_reporting(pdev);
4235	pci_disable_device(pdev);
4236	return;
4237}
4238
4239/**
4240 * mpt2sas_base_attach - attach controller instance
4241 * @ioc: per adapter object
4242 *
4243 * Returns 0 for success, non-zero for failure.
4244 */
4245int
4246mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4247{
4248	int r, i;
4249	int cpu_id, last_cpu_id = 0;
4250
4251	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4252	    __func__));
4253
4254	/* setup cpu_msix_table */
4255	ioc->cpu_count = num_online_cpus();
4256	for_each_online_cpu(cpu_id)
4257		last_cpu_id = cpu_id;
4258	ioc->cpu_msix_table_sz = last_cpu_id + 1;
4259	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4260	ioc->reply_queue_count = 1;
4261	if (!ioc->cpu_msix_table) {
4262		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
4263		    "cpu_msix_table failed!!!\n", ioc->name));
4264		r = -ENOMEM;
4265		goto out_free_resources;
4266	}
4267
4268	if (ioc->is_warpdrive) {
4269		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
4270		    sizeof(resource_size_t *), GFP_KERNEL);
4271		if (!ioc->reply_post_host_index) {
4272			dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
4273				"for cpu_msix_table failed!!!\n", ioc->name));
4274			r = -ENOMEM;
4275			goto out_free_resources;
4276		}
4277	}
4278
4279	r = mpt2sas_base_map_resources(ioc);
4280	if (r)
4281		goto out_free_resources;
4282
4283	if (ioc->is_warpdrive) {
4284		ioc->reply_post_host_index[0] =
4285		    (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
4286
4287		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
4288			ioc->reply_post_host_index[i] = (resource_size_t *)
4289			((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
4290			* 4)));
4291	}
4292
4293	pci_set_drvdata(ioc->pdev, ioc->shost);
4294	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4295	if (r)
4296		goto out_free_resources;
4297
4298	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4299	if (r)
4300		goto out_free_resources;
4301
4302	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4303	    sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
4304	if (!ioc->pfacts) {
4305		r = -ENOMEM;
4306		goto out_free_resources;
4307	}
4308
4309	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4310		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4311		if (r)
4312			goto out_free_resources;
4313	}
4314
4315	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4316	if (r)
4317		goto out_free_resources;
4318
4319	init_waitqueue_head(&ioc->reset_wq);
4320	/* allocate memory pd handle bitmask list */
4321	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4322	if (ioc->facts.MaxDevHandle % 8)
4323		ioc->pd_handles_sz++;
4324	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4325	    GFP_KERNEL);
4326	if (!ioc->pd_handles) {
4327		r = -ENOMEM;
4328		goto out_free_resources;
4329	}
4330	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4331	    GFP_KERNEL);
4332	if (!ioc->blocking_handles) {
4333		r = -ENOMEM;
4334		goto out_free_resources;
4335	}
4336	ioc->fwfault_debug = mpt2sas_fwfault_debug;
4337
4338	/* base internal command bits */
4339	mutex_init(&ioc->base_cmds.mutex);
4340	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4341	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
4342
4343	/* port_enable command bits */
4344	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4345	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
4346
4347	/* transport internal command bits */
4348	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4349	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
4350	mutex_init(&ioc->transport_cmds.mutex);
4351
4352	/* scsih internal command bits */
4353	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4354	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
4355	mutex_init(&ioc->scsih_cmds.mutex);
4356
4357	/* task management internal command bits */
4358	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4359	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4360	mutex_init(&ioc->tm_cmds.mutex);
4361
4362	/* config page internal command bits */
4363	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4364	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
4365	mutex_init(&ioc->config_cmds.mutex);
4366
4367	/* ctl module internal command bits */
4368	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4369	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4370	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
4371	mutex_init(&ioc->ctl_cmds.mutex);
4372
4373	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4374	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4375	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4376	    !ioc->ctl_cmds.sense) {
4377		r = -ENOMEM;
4378		goto out_free_resources;
4379	}
4380
4381	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4382	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4383	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
4384		r = -ENOMEM;
4385		goto out_free_resources;
4386	}
4387
4388	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4389		ioc->event_masks[i] = -1;
4390
4391	/* here we enable the events we care about */
4392	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4393	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4394	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4395	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4396	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4397	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4398	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4399	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4400	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4401	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4402	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4403	if (r)
4404		goto out_free_resources;
4405
4406	ioc->non_operational_loop = 0;
4407
4408	return 0;
4409
4410 out_free_resources:
4411
4412	ioc->remove_host = 1;
4413	mpt2sas_base_free_resources(ioc);
4414	_base_release_memory_pools(ioc);
4415	pci_set_drvdata(ioc->pdev, NULL);
4416	kfree(ioc->cpu_msix_table);
4417	if (ioc->is_warpdrive)
4418		kfree(ioc->reply_post_host_index);
4419	kfree(ioc->pd_handles);
4420	kfree(ioc->blocking_handles);
4421	kfree(ioc->tm_cmds.reply);
4422	kfree(ioc->transport_cmds.reply);
4423	kfree(ioc->scsih_cmds.reply);
4424	kfree(ioc->config_cmds.reply);
4425	kfree(ioc->base_cmds.reply);
4426	kfree(ioc->port_enable_cmds.reply);
4427	kfree(ioc->ctl_cmds.reply);
4428	kfree(ioc->ctl_cmds.sense);
4429	kfree(ioc->pfacts);
4430	ioc->ctl_cmds.reply = NULL;
4431	ioc->base_cmds.reply = NULL;
4432	ioc->tm_cmds.reply = NULL;
4433	ioc->scsih_cmds.reply = NULL;
4434	ioc->transport_cmds.reply = NULL;
4435	ioc->config_cmds.reply = NULL;
4436	ioc->pfacts = NULL;
4437	return r;
4438}
4439
4440
4441/**
4442 * mpt2sas_base_detach - remove controller instance
4443 * @ioc: per adapter object
4444 *
4445 * Return nothing.
4446 */
4447void
4448mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4449{
4450
4451	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4452	    __func__));
4453
4454	mpt2sas_base_stop_watchdog(ioc);
4455	mpt2sas_base_free_resources(ioc);
4456	_base_release_memory_pools(ioc);
4457	pci_set_drvdata(ioc->pdev, NULL);
4458	kfree(ioc->cpu_msix_table);
4459	if (ioc->is_warpdrive)
4460		kfree(ioc->reply_post_host_index);
4461	kfree(ioc->pd_handles);
4462	kfree(ioc->blocking_handles);
4463	kfree(ioc->pfacts);
4464	kfree(ioc->ctl_cmds.reply);
4465	kfree(ioc->ctl_cmds.sense);
4466	kfree(ioc->base_cmds.reply);
4467	kfree(ioc->port_enable_cmds.reply);
4468	kfree(ioc->tm_cmds.reply);
4469	kfree(ioc->transport_cmds.reply);
4470	kfree(ioc->scsih_cmds.reply);
4471	kfree(ioc->config_cmds.reply);
4472}
4473
4474/**
4475 * _base_reset_handler - reset callback handler (for base)
4476 * @ioc: per adapter object
4477 * @reset_phase: phase
4478 *
4479 * The handler for doing any required cleanup or initialization.
4480 *
4481 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
4482 * MPT2_IOC_DONE_RESET
4483 *
4484 * Return nothing.
4485 */
4486static void
4487_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
4488{
4489	mpt2sas_scsih_reset_handler(ioc, reset_phase);
4490	mpt2sas_ctl_reset_handler(ioc, reset_phase);
4491	switch (reset_phase) {
4492	case MPT2_IOC_PRE_RESET:
4493		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4494		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
4495		break;
4496	case MPT2_IOC_AFTER_RESET:
4497		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4498		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
4499		if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
4500			ioc->transport_cmds.status |= MPT2_CMD_RESET;
4501			mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4502			complete(&ioc->transport_cmds.done);
4503		}
4504		if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
4505			ioc->base_cmds.status |= MPT2_CMD_RESET;
4506			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
4507			complete(&ioc->base_cmds.done);
4508		}
4509		if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
4510			ioc->port_enable_failed = 1;
4511			ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
4512			mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4513			if (ioc->is_driver_loading) {
4514				ioc->start_scan_failed =
4515				    MPI2_IOCSTATUS_INTERNAL_ERROR;
4516				ioc->start_scan = 0;
4517				ioc->port_enable_cmds.status =
4518						MPT2_CMD_NOT_USED;
4519			} else
4520				complete(&ioc->port_enable_cmds.done);
4521
4522		}
4523		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
4524			ioc->config_cmds.status |= MPT2_CMD_RESET;
4525			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
4526			ioc->config_cmds.smid = USHRT_MAX;
4527			complete(&ioc->config_cmds.done);
4528		}
4529		break;
4530	case MPT2_IOC_DONE_RESET:
4531		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4532		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
4533		break;
4534	}
4535}
4536
4537/**
4538 * _wait_for_commands_to_complete - reset controller
4539 * @ioc: Pointer to MPT_ADAPTER structure
4540 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4541 *
4542 * This function waiting(3s) for all pending commands to complete
4543 * prior to putting controller in reset.
4544 */
4545static void
4546_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4547{
4548	u32 ioc_state;
4549	unsigned long flags;
4550	u16 i;
4551
4552	ioc->pending_io_count = 0;
4553	if (sleep_flag != CAN_SLEEP)
4554		return;
4555
4556	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4557	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4558		return;
4559
4560	/* pending command count */
4561	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4562	for (i = 0; i < ioc->scsiio_depth; i++)
4563		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4564			ioc->pending_io_count++;
4565	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4566
4567	if (!ioc->pending_io_count)
4568		return;
4569
4570	/* wait for pending commands to complete */
4571	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4572}
4573
4574/**
4575 * mpt2sas_base_hard_reset_handler - reset controller
4576 * @ioc: Pointer to MPT_ADAPTER structure
4577 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4578 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4579 *
4580 * Returns 0 for success, non-zero for failure.
4581 */
4582int
4583mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4584    enum reset_type type)
4585{
4586	int r;
4587	unsigned long flags;
4588
4589	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4590	    __func__));
4591
4592	if (ioc->pci_error_recovery) {
4593		printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
4594		    ioc->name, __func__);
4595		r = 0;
4596		goto out_unlocked;
4597	}
4598
4599	if (mpt2sas_fwfault_debug)
4600		mpt2sas_halt_firmware(ioc);
4601
4602	/* TODO - What we really should be doing is pulling
4603	 * out all the code associated with NO_SLEEP; its never used.
4604	 * That is legacy code from mpt fusion driver, ported over.
4605	 * I will leave this BUG_ON here for now till its been resolved.
4606	 */
4607	BUG_ON(sleep_flag == NO_SLEEP);
4608
4609	/* wait for an active reset in progress to complete */
4610	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4611		do {
4612			ssleep(1);
4613		} while (ioc->shost_recovery == 1);
4614		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4615		    __func__));
4616		return ioc->ioc_reset_in_progress_status;
4617	}
4618
4619	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4620	ioc->shost_recovery = 1;
4621	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4622
4623	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
4624	_wait_for_commands_to_complete(ioc, sleep_flag);
4625	_base_mask_interrupts(ioc);
4626	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4627	if (r)
4628		goto out;
4629	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4630
4631	/* If this hard reset is called while port enable is active, then
4632	 * there is no reason to call make_ioc_operational
4633	 */
4634	if (ioc->is_driver_loading && ioc->port_enable_failed) {
4635		ioc->remove_host = 1;
4636		r = -EFAULT;
4637		goto out;
4638	}
4639	r = _base_make_ioc_operational(ioc, sleep_flag);
4640	if (!r)
4641		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
4642 out:
4643	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
4644	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4645
4646	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4647	ioc->ioc_reset_in_progress_status = r;
4648	ioc->shost_recovery = 0;
4649	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4650	mutex_unlock(&ioc->reset_in_progress_mutex);
4651
4652 out_unlocked:
4653	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4654	    __func__));
4655	return r;
4656}
4657