mpt2sas_base.c revision 148124d9310f3870fb016bd2637057841d5b7705
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2012  LSI Corporation
7 *  (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42 * USA.
43 */
44
45#include <linux/kernel.h>
46#include <linux/module.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/slab.h>
50#include <linux/types.h>
51#include <linux/pci.h>
52#include <linux/kdev_t.h>
53#include <linux/blkdev.h>
54#include <linux/delay.h>
55#include <linux/interrupt.h>
56#include <linux/dma-mapping.h>
57#include <linux/sort.h>
58#include <linux/io.h>
59#include <linux/time.h>
60#include <linux/kthread.h>
61#include <linux/aer.h>
62
63#include "mpt2sas_base.h"
64
65static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
66
67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
68
69#define MAX_HBA_QUEUE_DEPTH	30000
70#define MAX_CHAIN_DEPTH		100000
71static int max_queue_depth = -1;
72module_param(max_queue_depth, int, 0);
73MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
74
75static int max_sgl_entries = -1;
76module_param(max_sgl_entries, int, 0);
77MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
78
79static int msix_disable = -1;
80module_param(msix_disable, int, 0);
81MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
82
83static int mpt2sas_fwfault_debug;
84MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
85	"and halt firmware - (default=0)");
86
87static int disable_discovery = -1;
88module_param(disable_discovery, int, 0);
89MODULE_PARM_DESC(disable_discovery, " disable discovery ");
90
91/**
92 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
93 *
94 */
95static int
96_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
97{
98	int ret = param_set_int(val, kp);
99	struct MPT2SAS_ADAPTER *ioc;
100
101	if (ret)
102		return ret;
103
104	printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
105	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
106		ioc->fwfault_debug = mpt2sas_fwfault_debug;
107	return 0;
108}
109
110module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
111    param_get_int, &mpt2sas_fwfault_debug, 0644);
112
113/**
114 *  mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
115 * @arg: input argument, used to derive ioc
116 *
117 * Return 0 if controller is removed from pci subsystem.
118 * Return -1 for other case.
119 */
120static int mpt2sas_remove_dead_ioc_func(void *arg)
121{
122		struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
123		struct pci_dev *pdev;
124
125		if ((ioc == NULL))
126			return -1;
127
128		pdev = ioc->pdev;
129		if ((pdev == NULL))
130			return -1;
131		pci_stop_and_remove_bus_device(pdev);
132		return 0;
133}
134
135
136/**
137 * _base_fault_reset_work - workq handling ioc fault conditions
138 * @work: input argument, used to derive ioc
139 * Context: sleep.
140 *
141 * Return nothing.
142 */
143static void
144_base_fault_reset_work(struct work_struct *work)
145{
146	struct MPT2SAS_ADAPTER *ioc =
147	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
148	unsigned long	 flags;
149	u32 doorbell;
150	int rc;
151	struct task_struct *p;
152
153	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
154	if (ioc->shost_recovery || ioc->pci_error_recovery)
155		goto rearm_timer;
156	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
157
158	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
159	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
160		printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
161			ioc->name, __func__);
162
163		/* It may be possible that EEH recovery can resolve some of
164		 * pci bus failure issues rather removing the dead ioc function
165		 * by considering controller is in a non-operational state. So
166		 * here priority is given to the EEH recovery. If it doesn't
167		 * not resolve this issue, mpt2sas driver will consider this
168		 * controller to non-operational state and remove the dead ioc
169		 * function.
170		 */
171		if (ioc->non_operational_loop++ < 5) {
172			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
173							 flags);
174			goto rearm_timer;
175		}
176
177		/*
178		 * Call _scsih_flush_pending_cmds callback so that we flush all
179		 * pending commands back to OS. This call is required to aovid
180		 * deadlock at block layer. Dead IOC will fail to do diag reset,
181		 * and this call is safe since dead ioc will never return any
182		 * command back from HW.
183		 */
184		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
185		/*
186		 * Set remove_host flag early since kernel thread will
187		 * take some time to execute.
188		 */
189		ioc->remove_host = 1;
190		/*Remove the Dead Host */
191		p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
192		    "mpt2sas_dead_ioc_%d", ioc->id);
193		if (IS_ERR(p)) {
194			printk(MPT2SAS_ERR_FMT
195			"%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
196			ioc->name, __func__);
197		} else {
198		    printk(MPT2SAS_ERR_FMT
199			"%s: Running mpt2sas_dead_ioc thread success !!!!\n",
200			ioc->name, __func__);
201		}
202
203		return; /* don't rearm timer */
204	}
205
206	ioc->non_operational_loop = 0;
207
208	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
209		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
210		    FORCE_BIG_HAMMER);
211		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
212		    __func__, (rc == 0) ? "success" : "failed");
213		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
214		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
215			mpt2sas_base_fault_info(ioc, doorbell &
216			    MPI2_DOORBELL_DATA_MASK);
217	}
218
219	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
220 rearm_timer:
221	if (ioc->fault_reset_work_q)
222		queue_delayed_work(ioc->fault_reset_work_q,
223		    &ioc->fault_reset_work,
224		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
225	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
226}
227
228/**
229 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
230 * @ioc: per adapter object
231 * Context: sleep.
232 *
233 * Return nothing.
234 */
235void
236mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
237{
238	unsigned long	 flags;
239
240	if (ioc->fault_reset_work_q)
241		return;
242
243	/* initialize fault polling */
244	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
245	snprintf(ioc->fault_reset_work_q_name,
246	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
247	ioc->fault_reset_work_q =
248		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
249	if (!ioc->fault_reset_work_q) {
250		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
251		    ioc->name, __func__, __LINE__);
252			return;
253	}
254	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
255	if (ioc->fault_reset_work_q)
256		queue_delayed_work(ioc->fault_reset_work_q,
257		    &ioc->fault_reset_work,
258		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
259	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
260}
261
262/**
263 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
264 * @ioc: per adapter object
265 * Context: sleep.
266 *
267 * Return nothing.
268 */
269void
270mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
271{
272	unsigned long	 flags;
273	struct workqueue_struct *wq;
274
275	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
276	wq = ioc->fault_reset_work_q;
277	ioc->fault_reset_work_q = NULL;
278	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
279	if (wq) {
280		if (!cancel_delayed_work(&ioc->fault_reset_work))
281			flush_workqueue(wq);
282		destroy_workqueue(wq);
283	}
284}
285
286/**
287 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
288 * @ioc: per adapter object
289 * @fault_code: fault code
290 *
291 * Return nothing.
292 */
293void
294mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
295{
296	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
297	    ioc->name, fault_code);
298}
299
300/**
301 * mpt2sas_halt_firmware - halt's mpt controller firmware
302 * @ioc: per adapter object
303 *
304 * For debugging timeout related issues.  Writing 0xCOFFEE00
305 * to the doorbell register will halt controller firmware. With
306 * the purpose to stop both driver and firmware, the enduser can
307 * obtain a ring buffer from controller UART.
308 */
309void
310mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
311{
312	u32 doorbell;
313
314	if (!ioc->fwfault_debug)
315		return;
316
317	dump_stack();
318
319	doorbell = readl(&ioc->chip->Doorbell);
320	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
321		mpt2sas_base_fault_info(ioc , doorbell);
322	else {
323		writel(0xC0FFEE00, &ioc->chip->Doorbell);
324		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
325		    "timeout\n", ioc->name);
326	}
327
328	panic("panic in %s\n", __func__);
329}
330
331#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
332/**
333 * _base_sas_ioc_info - verbose translation of the ioc status
334 * @ioc: per adapter object
335 * @mpi_reply: reply mf payload returned from firmware
336 * @request_hdr: request mf
337 *
338 * Return nothing.
339 */
340static void
341_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
342     MPI2RequestHeader_t *request_hdr)
343{
344	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
345	    MPI2_IOCSTATUS_MASK;
346	char *desc = NULL;
347	u16 frame_sz;
348	char *func_str = NULL;
349
350	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
351	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
352	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
353	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
354		return;
355
356	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
357		return;
358
359	switch (ioc_status) {
360
361/****************************************************************************
362*  Common IOCStatus values for all replies
363****************************************************************************/
364
365	case MPI2_IOCSTATUS_INVALID_FUNCTION:
366		desc = "invalid function";
367		break;
368	case MPI2_IOCSTATUS_BUSY:
369		desc = "busy";
370		break;
371	case MPI2_IOCSTATUS_INVALID_SGL:
372		desc = "invalid sgl";
373		break;
374	case MPI2_IOCSTATUS_INTERNAL_ERROR:
375		desc = "internal error";
376		break;
377	case MPI2_IOCSTATUS_INVALID_VPID:
378		desc = "invalid vpid";
379		break;
380	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
381		desc = "insufficient resources";
382		break;
383	case MPI2_IOCSTATUS_INVALID_FIELD:
384		desc = "invalid field";
385		break;
386	case MPI2_IOCSTATUS_INVALID_STATE:
387		desc = "invalid state";
388		break;
389	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
390		desc = "op state not supported";
391		break;
392
393/****************************************************************************
394*  Config IOCStatus values
395****************************************************************************/
396
397	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
398		desc = "config invalid action";
399		break;
400	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
401		desc = "config invalid type";
402		break;
403	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
404		desc = "config invalid page";
405		break;
406	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
407		desc = "config invalid data";
408		break;
409	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
410		desc = "config no defaults";
411		break;
412	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
413		desc = "config cant commit";
414		break;
415
416/****************************************************************************
417*  SCSI IO Reply
418****************************************************************************/
419
420	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
421	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
422	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
423	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
424	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
425	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
426	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
427	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
428	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
429	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
430	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
431	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
432		break;
433
434/****************************************************************************
435*  For use by SCSI Initiator and SCSI Target end-to-end data protection
436****************************************************************************/
437
438	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
439		desc = "eedp guard error";
440		break;
441	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
442		desc = "eedp ref tag error";
443		break;
444	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
445		desc = "eedp app tag error";
446		break;
447
448/****************************************************************************
449*  SCSI Target values
450****************************************************************************/
451
452	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
453		desc = "target invalid io index";
454		break;
455	case MPI2_IOCSTATUS_TARGET_ABORTED:
456		desc = "target aborted";
457		break;
458	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
459		desc = "target no conn retryable";
460		break;
461	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
462		desc = "target no connection";
463		break;
464	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
465		desc = "target xfer count mismatch";
466		break;
467	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
468		desc = "target data offset error";
469		break;
470	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
471		desc = "target too much write data";
472		break;
473	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
474		desc = "target iu too short";
475		break;
476	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
477		desc = "target ack nak timeout";
478		break;
479	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
480		desc = "target nak received";
481		break;
482
483/****************************************************************************
484*  Serial Attached SCSI values
485****************************************************************************/
486
487	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
488		desc = "smp request failed";
489		break;
490	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
491		desc = "smp data overrun";
492		break;
493
494/****************************************************************************
495*  Diagnostic Buffer Post / Diagnostic Release values
496****************************************************************************/
497
498	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
499		desc = "diagnostic released";
500		break;
501	default:
502		break;
503	}
504
505	if (!desc)
506		return;
507
508	switch (request_hdr->Function) {
509	case MPI2_FUNCTION_CONFIG:
510		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
511		func_str = "config_page";
512		break;
513	case MPI2_FUNCTION_SCSI_TASK_MGMT:
514		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
515		func_str = "task_mgmt";
516		break;
517	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
518		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
519		func_str = "sas_iounit_ctl";
520		break;
521	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
522		frame_sz = sizeof(Mpi2SepRequest_t);
523		func_str = "enclosure";
524		break;
525	case MPI2_FUNCTION_IOC_INIT:
526		frame_sz = sizeof(Mpi2IOCInitRequest_t);
527		func_str = "ioc_init";
528		break;
529	case MPI2_FUNCTION_PORT_ENABLE:
530		frame_sz = sizeof(Mpi2PortEnableRequest_t);
531		func_str = "port_enable";
532		break;
533	case MPI2_FUNCTION_SMP_PASSTHROUGH:
534		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
535		func_str = "smp_passthru";
536		break;
537	default:
538		frame_sz = 32;
539		func_str = "unknown";
540		break;
541	}
542
543	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
544	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
545
546	_debug_dump_mf(request_hdr, frame_sz/4);
547}
548
549/**
550 * _base_display_event_data - verbose translation of firmware asyn events
551 * @ioc: per adapter object
552 * @mpi_reply: reply mf payload returned from firmware
553 *
554 * Return nothing.
555 */
556static void
557_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
558    Mpi2EventNotificationReply_t *mpi_reply)
559{
560	char *desc = NULL;
561	u16 event;
562
563	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
564		return;
565
566	event = le16_to_cpu(mpi_reply->Event);
567
568	switch (event) {
569	case MPI2_EVENT_LOG_DATA:
570		desc = "Log Data";
571		break;
572	case MPI2_EVENT_STATE_CHANGE:
573		desc = "Status Change";
574		break;
575	case MPI2_EVENT_HARD_RESET_RECEIVED:
576		desc = "Hard Reset Received";
577		break;
578	case MPI2_EVENT_EVENT_CHANGE:
579		desc = "Event Change";
580		break;
581	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
582		desc = "Device Status Change";
583		break;
584	case MPI2_EVENT_IR_OPERATION_STATUS:
585		if (!ioc->hide_ir_msg)
586			desc = "IR Operation Status";
587		break;
588	case MPI2_EVENT_SAS_DISCOVERY:
589	{
590		Mpi2EventDataSasDiscovery_t *event_data =
591		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
592		printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
593		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
594		    "start" : "stop");
595		if (event_data->DiscoveryStatus)
596			printk("discovery_status(0x%08x)",
597			    le32_to_cpu(event_data->DiscoveryStatus));
598		printk("\n");
599		return;
600	}
601	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
602		desc = "SAS Broadcast Primitive";
603		break;
604	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
605		desc = "SAS Init Device Status Change";
606		break;
607	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
608		desc = "SAS Init Table Overflow";
609		break;
610	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
611		desc = "SAS Topology Change List";
612		break;
613	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
614		desc = "SAS Enclosure Device Status Change";
615		break;
616	case MPI2_EVENT_IR_VOLUME:
617		if (!ioc->hide_ir_msg)
618			desc = "IR Volume";
619		break;
620	case MPI2_EVENT_IR_PHYSICAL_DISK:
621		if (!ioc->hide_ir_msg)
622			desc = "IR Physical Disk";
623		break;
624	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
625		if (!ioc->hide_ir_msg)
626			desc = "IR Configuration Change List";
627		break;
628	case MPI2_EVENT_LOG_ENTRY_ADDED:
629		if (!ioc->hide_ir_msg)
630			desc = "Log Entry Added";
631		break;
632	}
633
634	if (!desc)
635		return;
636
637	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
638}
639#endif
640
641/**
642 * _base_sas_log_info - verbose translation of firmware log info
643 * @ioc: per adapter object
644 * @log_info: log info
645 *
646 * Return nothing.
647 */
648static void
649_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
650{
651	union loginfo_type {
652		u32	loginfo;
653		struct {
654			u32	subcode:16;
655			u32	code:8;
656			u32	originator:4;
657			u32	bus_type:4;
658		} dw;
659	};
660	union loginfo_type sas_loginfo;
661	char *originator_str = NULL;
662
663	sas_loginfo.loginfo = log_info;
664	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
665		return;
666
667	/* each nexus loss loginfo */
668	if (log_info == 0x31170000)
669		return;
670
671	/* eat the loginfos associated with task aborts */
672	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
673	    0x31140000 || log_info == 0x31130000))
674		return;
675
676	switch (sas_loginfo.dw.originator) {
677	case 0:
678		originator_str = "IOP";
679		break;
680	case 1:
681		originator_str = "PL";
682		break;
683	case 2:
684		if (!ioc->hide_ir_msg)
685			originator_str = "IR";
686		else
687			originator_str = "WarpDrive";
688		break;
689	}
690
691	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
692	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
693	     originator_str, sas_loginfo.dw.code,
694	     sas_loginfo.dw.subcode);
695}
696
697/**
698 * _base_display_reply_info -
699 * @ioc: per adapter object
700 * @smid: system request message index
701 * @msix_index: MSIX table index supplied by the OS
702 * @reply: reply message frame(lower 32bit addr)
703 *
704 * Return nothing.
705 */
706static void
707_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
708    u32 reply)
709{
710	MPI2DefaultReply_t *mpi_reply;
711	u16 ioc_status;
712
713	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
714	if (unlikely(!mpi_reply)) {
715		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
716			ioc->name, __FILE__, __LINE__, __func__);
717		return;
718	}
719	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
720#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
721	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
722	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
723		_base_sas_ioc_info(ioc , mpi_reply,
724		   mpt2sas_base_get_msg_frame(ioc, smid));
725	}
726#endif
727	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
728		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
729}
730
731/**
732 * mpt2sas_base_done - base internal command completion routine
733 * @ioc: per adapter object
734 * @smid: system request message index
735 * @msix_index: MSIX table index supplied by the OS
736 * @reply: reply message frame(lower 32bit addr)
737 *
738 * Return 1 meaning mf should be freed from _base_interrupt
739 *        0 means the mf is freed from this function.
740 */
741u8
742mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
743    u32 reply)
744{
745	MPI2DefaultReply_t *mpi_reply;
746
747	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
748	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
749		return 1;
750
751	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
752		return 1;
753
754	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
755	if (mpi_reply) {
756		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
757		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
758	}
759	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
760
761	complete(&ioc->base_cmds.done);
762	return 1;
763}
764
765/**
766 * _base_async_event - main callback handler for firmware asyn events
767 * @ioc: per adapter object
768 * @msix_index: MSIX table index supplied by the OS
769 * @reply: reply message frame(lower 32bit addr)
770 *
771 * Return 1 meaning mf should be freed from _base_interrupt
772 *        0 means the mf is freed from this function.
773 */
774static u8
775_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
776{
777	Mpi2EventNotificationReply_t *mpi_reply;
778	Mpi2EventAckRequest_t *ack_request;
779	u16 smid;
780
781	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
782	if (!mpi_reply)
783		return 1;
784	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
785		return 1;
786#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
787	_base_display_event_data(ioc, mpi_reply);
788#endif
789	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
790		goto out;
791	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
792	if (!smid) {
793		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
794		    ioc->name, __func__);
795		goto out;
796	}
797
798	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
799	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
800	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
801	ack_request->Event = mpi_reply->Event;
802	ack_request->EventContext = mpi_reply->EventContext;
803	ack_request->VF_ID = 0;  /* TODO */
804	ack_request->VP_ID = 0;
805	mpt2sas_base_put_smid_default(ioc, smid);
806
807 out:
808
809	/* scsih callback handler */
810	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
811
812	/* ctl callback handler */
813	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
814
815	return 1;
816}
817
818/**
819 * _base_get_cb_idx - obtain the callback index
820 * @ioc: per adapter object
821 * @smid: system request message index
822 *
823 * Return callback index.
824 */
825static u8
826_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
827{
828	int i;
829	u8 cb_idx;
830
831	if (smid < ioc->hi_priority_smid) {
832		i = smid - 1;
833		cb_idx = ioc->scsi_lookup[i].cb_idx;
834	} else if (smid < ioc->internal_smid) {
835		i = smid - ioc->hi_priority_smid;
836		cb_idx = ioc->hpr_lookup[i].cb_idx;
837	} else if (smid <= ioc->hba_queue_depth) {
838		i = smid - ioc->internal_smid;
839		cb_idx = ioc->internal_lookup[i].cb_idx;
840	} else
841		cb_idx = 0xFF;
842	return cb_idx;
843}
844
845/**
846 * _base_mask_interrupts - disable interrupts
847 * @ioc: per adapter object
848 *
849 * Disabling ResetIRQ, Reply and Doorbell Interrupts
850 *
851 * Return nothing.
852 */
853static void
854_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
855{
856	u32 him_register;
857
858	ioc->mask_interrupts = 1;
859	him_register = readl(&ioc->chip->HostInterruptMask);
860	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
861	writel(him_register, &ioc->chip->HostInterruptMask);
862	readl(&ioc->chip->HostInterruptMask);
863}
864
865/**
866 * _base_unmask_interrupts - enable interrupts
867 * @ioc: per adapter object
868 *
869 * Enabling only Reply Interrupts
870 *
871 * Return nothing.
872 */
873static void
874_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
875{
876	u32 him_register;
877
878	him_register = readl(&ioc->chip->HostInterruptMask);
879	him_register &= ~MPI2_HIM_RIM;
880	writel(him_register, &ioc->chip->HostInterruptMask);
881	ioc->mask_interrupts = 0;
882}
883
884union reply_descriptor {
885	u64 word;
886	struct {
887		u32 low;
888		u32 high;
889	} u;
890};
891
892/**
893 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
894 * @irq: irq number (not used)
895 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
896 * @r: pt_regs pointer (not used)
897 *
898 * Return IRQ_HANDLE if processed, else IRQ_NONE.
899 */
900static irqreturn_t
901_base_interrupt(int irq, void *bus_id)
902{
903	struct adapter_reply_queue *reply_q = bus_id;
904	union reply_descriptor rd;
905	u32 completed_cmds;
906	u8 request_desript_type;
907	u16 smid;
908	u8 cb_idx;
909	u32 reply;
910	u8 msix_index = reply_q->msix_index;
911	struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
912	Mpi2ReplyDescriptorsUnion_t *rpf;
913	u8 rc;
914
915	if (ioc->mask_interrupts)
916		return IRQ_NONE;
917
918	if (!atomic_add_unless(&reply_q->busy, 1, 1))
919		return IRQ_NONE;
920
921	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
922	request_desript_type = rpf->Default.ReplyFlags
923	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
924	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
925		atomic_dec(&reply_q->busy);
926		return IRQ_NONE;
927	}
928
929	completed_cmds = 0;
930	cb_idx = 0xFF;
931	do {
932		rd.word = le64_to_cpu(rpf->Words);
933		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
934			goto out;
935		reply = 0;
936		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
937		if (request_desript_type ==
938		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
939			reply = le32_to_cpu
940				(rpf->AddressReply.ReplyFrameAddress);
941			if (reply > ioc->reply_dma_max_address ||
942			    reply < ioc->reply_dma_min_address)
943				reply = 0;
944		} else if (request_desript_type ==
945		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
946			goto next;
947		else if (request_desript_type ==
948		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
949			goto next;
950		if (smid) {
951			cb_idx = _base_get_cb_idx(ioc, smid);
952		if ((likely(cb_idx < MPT_MAX_CALLBACKS))
953			    && (likely(mpt_callbacks[cb_idx] != NULL))) {
954				rc = mpt_callbacks[cb_idx](ioc, smid,
955				    msix_index, reply);
956			if (reply)
957				_base_display_reply_info(ioc, smid,
958				    msix_index, reply);
959			if (rc)
960				mpt2sas_base_free_smid(ioc, smid);
961			}
962		}
963		if (!smid)
964			_base_async_event(ioc, msix_index, reply);
965
966		/* reply free queue handling */
967		if (reply) {
968			ioc->reply_free_host_index =
969			    (ioc->reply_free_host_index ==
970			    (ioc->reply_free_queue_depth - 1)) ?
971			    0 : ioc->reply_free_host_index + 1;
972			ioc->reply_free[ioc->reply_free_host_index] =
973			    cpu_to_le32(reply);
974			wmb();
975			writel(ioc->reply_free_host_index,
976			    &ioc->chip->ReplyFreeHostIndex);
977		}
978
979 next:
980
981		rpf->Words = cpu_to_le64(ULLONG_MAX);
982		reply_q->reply_post_host_index =
983		    (reply_q->reply_post_host_index ==
984		    (ioc->reply_post_queue_depth - 1)) ? 0 :
985		    reply_q->reply_post_host_index + 1;
986		request_desript_type =
987		    reply_q->reply_post_free[reply_q->reply_post_host_index].
988		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
989		completed_cmds++;
990		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
991			goto out;
992		if (!reply_q->reply_post_host_index)
993			rpf = reply_q->reply_post_free;
994		else
995			rpf++;
996	} while (1);
997
998 out:
999
1000	if (!completed_cmds) {
1001		atomic_dec(&reply_q->busy);
1002		return IRQ_NONE;
1003	}
1004	wmb();
1005	if (ioc->is_warpdrive) {
1006		writel(reply_q->reply_post_host_index,
1007		ioc->reply_post_host_index[msix_index]);
1008		atomic_dec(&reply_q->busy);
1009		return IRQ_HANDLED;
1010	}
1011	writel(reply_q->reply_post_host_index | (msix_index <<
1012	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1013	atomic_dec(&reply_q->busy);
1014	return IRQ_HANDLED;
1015}
1016
1017/**
1018 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1019 * @ioc: per adapter object
1020 *
1021 */
1022static inline int
1023_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
1024{
1025	return (ioc->facts.IOCCapabilities &
1026	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1027}
1028
1029/**
1030 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
1031 * @ioc: per adapter object
1032 * Context: ISR conext
1033 *
1034 * Called when a Task Management request has completed. We want
1035 * to flush the other reply queues so all the outstanding IO has been
1036 * completed back to OS before we process the TM completetion.
1037 *
1038 * Return nothing.
1039 */
1040void
1041mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1042{
1043	struct adapter_reply_queue *reply_q;
1044
1045	/* If MSIX capability is turned off
1046	 * then multi-queues are not enabled
1047	 */
1048	if (!_base_is_controller_msix_enabled(ioc))
1049		return;
1050
1051	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1052		if (ioc->shost_recovery)
1053			return;
1054		/* TMs are on msix_index == 0 */
1055		if (reply_q->msix_index == 0)
1056			continue;
1057		_base_interrupt(reply_q->vector, (void *)reply_q);
1058	}
1059}
1060
1061/**
1062 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
1063 * @cb_idx: callback index
1064 *
1065 * Return nothing.
1066 */
1067void
1068mpt2sas_base_release_callback_handler(u8 cb_idx)
1069{
1070	mpt_callbacks[cb_idx] = NULL;
1071}
1072
1073/**
1074 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
1075 * @cb_func: callback function
1076 *
1077 * Returns cb_func.
1078 */
1079u8
1080mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1081{
1082	u8 cb_idx;
1083
1084	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1085		if (mpt_callbacks[cb_idx] == NULL)
1086			break;
1087
1088	mpt_callbacks[cb_idx] = cb_func;
1089	return cb_idx;
1090}
1091
1092/**
1093 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
1094 *
1095 * Return nothing.
1096 */
1097void
1098mpt2sas_base_initialize_callback_handler(void)
1099{
1100	u8 cb_idx;
1101
1102	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1103		mpt2sas_base_release_callback_handler(cb_idx);
1104}
1105
1106/**
1107 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
1108 * @ioc: per adapter object
1109 * @paddr: virtual address for SGE
1110 *
1111 * Create a zero length scatter gather entry to insure the IOCs hardware has
1112 * something to use if the target device goes brain dead and tries
1113 * to send data even when none is asked for.
1114 *
1115 * Return nothing.
1116 */
1117void
1118mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
1119{
1120	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1121	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1122	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1123	    MPI2_SGE_FLAGS_SHIFT);
1124	ioc->base_add_sg_single(paddr, flags_length, -1);
1125}
1126
1127/**
1128 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1129 * @paddr: virtual address for SGE
1130 * @flags_length: SGE flags and data transfer length
1131 * @dma_addr: Physical address
1132 *
1133 * Return nothing.
1134 */
1135static void
1136_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1137{
1138	Mpi2SGESimple32_t *sgel = paddr;
1139
1140	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1141	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1142	sgel->FlagsLength = cpu_to_le32(flags_length);
1143	sgel->Address = cpu_to_le32(dma_addr);
1144}
1145
1146
1147/**
1148 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1149 * @paddr: virtual address for SGE
1150 * @flags_length: SGE flags and data transfer length
1151 * @dma_addr: Physical address
1152 *
1153 * Return nothing.
1154 */
1155static void
1156_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1157{
1158	Mpi2SGESimple64_t *sgel = paddr;
1159
1160	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1161	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1162	sgel->FlagsLength = cpu_to_le32(flags_length);
1163	sgel->Address = cpu_to_le64(dma_addr);
1164}
1165
1166#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1167
1168/**
1169 * _base_config_dma_addressing - set dma addressing
1170 * @ioc: per adapter object
1171 * @pdev: PCI device struct
1172 *
1173 * Returns 0 for success, non-zero for failure.
1174 */
1175static int
1176_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1177{
1178	struct sysinfo s;
1179	char *desc = NULL;
1180
1181	if (sizeof(dma_addr_t) > 4) {
1182		const uint64_t required_mask =
1183		    dma_get_required_mask(&pdev->dev);
1184		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
1185		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
1186		    DMA_BIT_MASK(64))) {
1187			ioc->base_add_sg_single = &_base_add_sg_single_64;
1188			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1189			desc = "64";
1190			goto out;
1191		}
1192	}
1193
1194	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1195	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1196		ioc->base_add_sg_single = &_base_add_sg_single_32;
1197		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1198		desc = "32";
1199	} else
1200		return -ENODEV;
1201
1202 out:
1203	si_meminfo(&s);
1204	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
1205	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
1206
1207	return 0;
1208}
1209
1210/**
1211 * _base_check_enable_msix - checks MSIX capabable.
1212 * @ioc: per adapter object
1213 *
1214 * Check to see if card is capable of MSIX, and set number
1215 * of available msix vectors
1216 */
1217static int
1218_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1219{
1220	int base;
1221	u16 message_control;
1222
1223
1224	/* Check whether controller SAS2008 B0 controller,
1225	   if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
1226	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1227	    ioc->pdev->revision == 0x01) {
1228		return -EINVAL;
1229	}
1230
1231	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1232	if (!base) {
1233		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1234		    "supported\n", ioc->name));
1235		return -EINVAL;
1236	}
1237
1238	/* get msix vector count */
1239	/* NUMA_IO not supported for older controllers */
1240	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1241	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1242	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1243	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1244	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1245	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1246	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1247		ioc->msix_vector_count = 1;
1248	else {
1249		pci_read_config_word(ioc->pdev, base + 2, &message_control);
1250		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1251	}
1252	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1253	    "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1254
1255	return 0;
1256}
1257
1258/**
1259 * _base_free_irq - free irq
1260 * @ioc: per adapter object
1261 *
1262 * Freeing respective reply_queue from the list.
1263 */
1264static void
1265_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1266{
1267	struct adapter_reply_queue *reply_q, *next;
1268
1269	if (list_empty(&ioc->reply_queue_list))
1270		return;
1271
1272	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1273		list_del(&reply_q->list);
1274		synchronize_irq(reply_q->vector);
1275		free_irq(reply_q->vector, reply_q);
1276		kfree(reply_q);
1277	}
1278}
1279
1280/**
1281 * _base_request_irq - request irq
1282 * @ioc: per adapter object
1283 * @index: msix index into vector table
1284 * @vector: irq vector
1285 *
1286 * Inserting respective reply_queue into the list.
1287 */
1288static int
1289_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1290{
1291	struct adapter_reply_queue *reply_q;
1292	int r;
1293
1294	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1295	if (!reply_q) {
1296		printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1297		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1298		return -ENOMEM;
1299	}
1300	reply_q->ioc = ioc;
1301	reply_q->msix_index = index;
1302	reply_q->vector = vector;
1303	atomic_set(&reply_q->busy, 0);
1304	if (ioc->msix_enable)
1305		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1306		    MPT2SAS_DRIVER_NAME, ioc->id, index);
1307	else
1308		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1309		    MPT2SAS_DRIVER_NAME, ioc->id);
1310	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1311	    reply_q);
1312	if (r) {
1313		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1314		    reply_q->name, vector);
1315		kfree(reply_q);
1316		return -EBUSY;
1317	}
1318
1319	INIT_LIST_HEAD(&reply_q->list);
1320	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1321	return 0;
1322}
1323
1324/**
1325 * _base_assign_reply_queues - assigning msix index for each cpu
1326 * @ioc: per adapter object
1327 *
1328 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1329 *
1330 * It would nice if we could call irq_set_affinity, however it is not
1331 * an exported symbol
1332 */
1333static void
1334_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1335{
1336	struct adapter_reply_queue *reply_q;
1337	int cpu_id;
1338	int cpu_grouping, loop, grouping, grouping_mod;
1339
1340	if (!_base_is_controller_msix_enabled(ioc))
1341		return;
1342
1343	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1344	/* when there are more cpus than available msix vectors,
1345	 * then group cpus togeather on same irq
1346	 */
1347	if (ioc->cpu_count > ioc->msix_vector_count) {
1348		grouping = ioc->cpu_count / ioc->msix_vector_count;
1349		grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1350		if (grouping < 2 || (grouping == 2 && !grouping_mod))
1351			cpu_grouping = 2;
1352		else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1353			cpu_grouping = 4;
1354		else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1355			cpu_grouping = 8;
1356		else
1357			cpu_grouping = 16;
1358	} else
1359		cpu_grouping = 0;
1360
1361	loop = 0;
1362	reply_q = list_entry(ioc->reply_queue_list.next,
1363	     struct adapter_reply_queue, list);
1364	for_each_online_cpu(cpu_id) {
1365		if (!cpu_grouping) {
1366			ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1367			reply_q = list_entry(reply_q->list.next,
1368			    struct adapter_reply_queue, list);
1369		} else {
1370			if (loop < cpu_grouping) {
1371				ioc->cpu_msix_table[cpu_id] =
1372					reply_q->msix_index;
1373				loop++;
1374			} else {
1375				reply_q = list_entry(reply_q->list.next,
1376				    struct adapter_reply_queue, list);
1377				ioc->cpu_msix_table[cpu_id] =
1378					reply_q->msix_index;
1379				loop = 1;
1380			}
1381		}
1382	}
1383}
1384
1385/**
1386 * _base_disable_msix - disables msix
1387 * @ioc: per adapter object
1388 *
1389 */
1390static void
1391_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1392{
1393	if (ioc->msix_enable) {
1394		pci_disable_msix(ioc->pdev);
1395		ioc->msix_enable = 0;
1396	}
1397}
1398
1399/**
1400 * _base_enable_msix - enables msix, failback to io_apic
1401 * @ioc: per adapter object
1402 *
1403 */
1404static int
1405_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1406{
1407	struct msix_entry *entries, *a;
1408	int r;
1409	int i;
1410	u8 try_msix = 0;
1411
1412	INIT_LIST_HEAD(&ioc->reply_queue_list);
1413
1414	if (msix_disable == -1 || msix_disable == 0)
1415		try_msix = 1;
1416
1417	if (!try_msix)
1418		goto try_ioapic;
1419
1420	if (_base_check_enable_msix(ioc) != 0)
1421		goto try_ioapic;
1422
1423	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1424	    ioc->msix_vector_count);
1425
1426	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1427	    GFP_KERNEL);
1428	if (!entries) {
1429		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1430		    "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1431		    __LINE__, __func__));
1432		goto try_ioapic;
1433	}
1434
1435	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1436		a->entry = i;
1437
1438	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1439	if (r) {
1440		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1441		    "failed (r=%d) !!!\n", ioc->name, r));
1442		kfree(entries);
1443		goto try_ioapic;
1444	}
1445
1446	ioc->msix_enable = 1;
1447	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1448		r = _base_request_irq(ioc, i, a->vector);
1449		if (r) {
1450			_base_free_irq(ioc);
1451			_base_disable_msix(ioc);
1452			kfree(entries);
1453			goto try_ioapic;
1454		}
1455	}
1456
1457	kfree(entries);
1458	return 0;
1459
1460/* failback to io_apic interrupt routing */
1461 try_ioapic:
1462
1463	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1464
1465	return r;
1466}
1467
1468/**
1469 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1470 * @ioc: per adapter object
1471 *
1472 * Returns 0 for success, non-zero for failure.
1473 */
1474int
1475mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1476{
1477	struct pci_dev *pdev = ioc->pdev;
1478	u32 memap_sz;
1479	u32 pio_sz;
1480	int i, r = 0;
1481	u64 pio_chip = 0;
1482	u64 chip_phys = 0;
1483	struct adapter_reply_queue *reply_q;
1484
1485	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1486	    ioc->name, __func__));
1487
1488	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1489	if (pci_enable_device_mem(pdev)) {
1490		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1491		    "failed\n", ioc->name);
1492		return -ENODEV;
1493	}
1494
1495
1496	if (pci_request_selected_regions(pdev, ioc->bars,
1497	    MPT2SAS_DRIVER_NAME)) {
1498		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1499		    "failed\n", ioc->name);
1500		r = -ENODEV;
1501		goto out_fail;
1502	}
1503
1504	/* AER (Advanced Error Reporting) hooks */
1505	pci_enable_pcie_error_reporting(pdev);
1506
1507	pci_set_master(pdev);
1508
1509	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1510		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1511		    ioc->name, pci_name(pdev));
1512		r = -ENODEV;
1513		goto out_fail;
1514	}
1515
1516	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1517		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1518			if (pio_sz)
1519				continue;
1520			pio_chip = (u64)pci_resource_start(pdev, i);
1521			pio_sz = pci_resource_len(pdev, i);
1522		} else {
1523			if (memap_sz)
1524				continue;
1525			/* verify memory resource is valid before using */
1526			if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1527				ioc->chip_phys = pci_resource_start(pdev, i);
1528				chip_phys = (u64)ioc->chip_phys;
1529				memap_sz = pci_resource_len(pdev, i);
1530				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1531				if (ioc->chip == NULL) {
1532					printk(MPT2SAS_ERR_FMT "unable to map "
1533					    "adapter memory!\n", ioc->name);
1534					r = -EINVAL;
1535					goto out_fail;
1536				}
1537			}
1538		}
1539	}
1540
1541	_base_mask_interrupts(ioc);
1542	r = _base_enable_msix(ioc);
1543	if (r)
1544		goto out_fail;
1545
1546	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1547		printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1548		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1549		    "IO-APIC enabled"), reply_q->vector);
1550
1551	printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1552	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1553	printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
1554	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1555
1556	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1557	pci_save_state(pdev);
1558
1559	return 0;
1560
1561 out_fail:
1562	if (ioc->chip_phys)
1563		iounmap(ioc->chip);
1564	ioc->chip_phys = 0;
1565	pci_release_selected_regions(ioc->pdev, ioc->bars);
1566	pci_disable_pcie_error_reporting(pdev);
1567	pci_disable_device(pdev);
1568	return r;
1569}
1570
1571/**
1572 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1573 * @ioc: per adapter object
1574 * @smid: system request message index(smid zero is invalid)
1575 *
1576 * Returns virt pointer to message frame.
1577 */
1578void *
1579mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1580{
1581	return (void *)(ioc->request + (smid * ioc->request_sz));
1582}
1583
1584/**
1585 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1586 * @ioc: per adapter object
1587 * @smid: system request message index
1588 *
1589 * Returns virt pointer to sense buffer.
1590 */
1591void *
1592mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1593{
1594	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1595}
1596
1597/**
1598 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1599 * @ioc: per adapter object
1600 * @smid: system request message index
1601 *
1602 * Returns phys pointer to the low 32bit address of the sense buffer.
1603 */
1604__le32
1605mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1606{
1607	return cpu_to_le32(ioc->sense_dma +
1608			((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1609}
1610
1611/**
1612 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1613 * @ioc: per adapter object
1614 * @phys_addr: lower 32 physical addr of the reply
1615 *
1616 * Converts 32bit lower physical addr into a virt address.
1617 */
1618void *
1619mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1620{
1621	if (!phys_addr)
1622		return NULL;
1623	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1624}
1625
1626/**
1627 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1628 * @ioc: per adapter object
1629 * @cb_idx: callback index
1630 *
1631 * Returns smid (zero is invalid)
1632 */
1633u16
1634mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1635{
1636	unsigned long flags;
1637	struct request_tracker *request;
1638	u16 smid;
1639
1640	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1641	if (list_empty(&ioc->internal_free_list)) {
1642		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1643		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1644		    ioc->name, __func__);
1645		return 0;
1646	}
1647
1648	request = list_entry(ioc->internal_free_list.next,
1649	    struct request_tracker, tracker_list);
1650	request->cb_idx = cb_idx;
1651	smid = request->smid;
1652	list_del(&request->tracker_list);
1653	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1654	return smid;
1655}
1656
1657/**
1658 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1659 * @ioc: per adapter object
1660 * @cb_idx: callback index
1661 * @scmd: pointer to scsi command object
1662 *
1663 * Returns smid (zero is invalid)
1664 */
1665u16
1666mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1667    struct scsi_cmnd *scmd)
1668{
1669	unsigned long flags;
1670	struct scsiio_tracker *request;
1671	u16 smid;
1672
1673	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1674	if (list_empty(&ioc->free_list)) {
1675		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1676		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1677		    ioc->name, __func__);
1678		return 0;
1679	}
1680
1681	request = list_entry(ioc->free_list.next,
1682	    struct scsiio_tracker, tracker_list);
1683	request->scmd = scmd;
1684	request->cb_idx = cb_idx;
1685	smid = request->smid;
1686	list_del(&request->tracker_list);
1687	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1688	return smid;
1689}
1690
1691/**
1692 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1693 * @ioc: per adapter object
1694 * @cb_idx: callback index
1695 *
1696 * Returns smid (zero is invalid)
1697 */
1698u16
1699mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1700{
1701	unsigned long flags;
1702	struct request_tracker *request;
1703	u16 smid;
1704
1705	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1706	if (list_empty(&ioc->hpr_free_list)) {
1707		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1708		return 0;
1709	}
1710
1711	request = list_entry(ioc->hpr_free_list.next,
1712	    struct request_tracker, tracker_list);
1713	request->cb_idx = cb_idx;
1714	smid = request->smid;
1715	list_del(&request->tracker_list);
1716	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1717	return smid;
1718}
1719
1720
1721/**
1722 * mpt2sas_base_free_smid - put smid back on free_list
1723 * @ioc: per adapter object
1724 * @smid: system request message index
1725 *
1726 * Return nothing.
1727 */
1728void
1729mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1730{
1731	unsigned long flags;
1732	int i;
1733	struct chain_tracker *chain_req, *next;
1734
1735	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1736	if (smid < ioc->hi_priority_smid) {
1737		/* scsiio queue */
1738		i = smid - 1;
1739		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
1740			list_for_each_entry_safe(chain_req, next,
1741			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
1742				list_del_init(&chain_req->tracker_list);
1743				list_add_tail(&chain_req->tracker_list,
1744				    &ioc->free_chain_list);
1745			}
1746		}
1747		ioc->scsi_lookup[i].cb_idx = 0xFF;
1748		ioc->scsi_lookup[i].scmd = NULL;
1749		ioc->scsi_lookup[i].direct_io = 0;
1750		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1751		    &ioc->free_list);
1752		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1753
1754		/*
1755		 * See _wait_for_commands_to_complete() call with regards
1756		 * to this code.
1757		 */
1758		if (ioc->shost_recovery && ioc->pending_io_count) {
1759			if (ioc->pending_io_count == 1)
1760				wake_up(&ioc->reset_wq);
1761			ioc->pending_io_count--;
1762		}
1763		return;
1764	} else if (smid < ioc->internal_smid) {
1765		/* hi-priority */
1766		i = smid - ioc->hi_priority_smid;
1767		ioc->hpr_lookup[i].cb_idx = 0xFF;
1768		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1769		    &ioc->hpr_free_list);
1770	} else if (smid <= ioc->hba_queue_depth) {
1771		/* internal queue */
1772		i = smid - ioc->internal_smid;
1773		ioc->internal_lookup[i].cb_idx = 0xFF;
1774		list_add_tail(&ioc->internal_lookup[i].tracker_list,
1775		    &ioc->internal_free_list);
1776	}
1777	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1778}
1779
1780/**
1781 * _base_writeq - 64 bit write to MMIO
1782 * @ioc: per adapter object
1783 * @b: data payload
1784 * @addr: address in MMIO space
1785 * @writeq_lock: spin lock
1786 *
1787 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1788 * care of 32 bit environment where its not quarenteed to send the entire word
1789 * in one transfer.
1790 */
1791#ifndef writeq
1792static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1793    spinlock_t *writeq_lock)
1794{
1795	unsigned long flags;
1796	__u64 data_out = cpu_to_le64(b);
1797
1798	spin_lock_irqsave(writeq_lock, flags);
1799	writel((u32)(data_out), addr);
1800	writel((u32)(data_out >> 32), (addr + 4));
1801	spin_unlock_irqrestore(writeq_lock, flags);
1802}
1803#else
1804static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1805    spinlock_t *writeq_lock)
1806{
1807	writeq(cpu_to_le64(b), addr);
1808}
1809#endif
1810
1811static inline u8
1812_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1813{
1814	return ioc->cpu_msix_table[raw_smp_processor_id()];
1815}
1816
1817/**
1818 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1819 * @ioc: per adapter object
1820 * @smid: system request message index
1821 * @handle: device handle
1822 *
1823 * Return nothing.
1824 */
1825void
1826mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1827{
1828	Mpi2RequestDescriptorUnion_t descriptor;
1829	u64 *request = (u64 *)&descriptor;
1830
1831
1832	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1833	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
1834	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1835	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1836	descriptor.SCSIIO.LMID = 0;
1837	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1838	    &ioc->scsi_lookup_lock);
1839}
1840
1841
1842/**
1843 * mpt2sas_base_put_smid_hi_priority - send Task Management request to firmware
1844 * @ioc: per adapter object
1845 * @smid: system request message index
1846 *
1847 * Return nothing.
1848 */
1849void
1850mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1851{
1852	Mpi2RequestDescriptorUnion_t descriptor;
1853	u64 *request = (u64 *)&descriptor;
1854
1855	descriptor.HighPriority.RequestFlags =
1856	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1857	descriptor.HighPriority.MSIxIndex =  0;
1858	descriptor.HighPriority.SMID = cpu_to_le16(smid);
1859	descriptor.HighPriority.LMID = 0;
1860	descriptor.HighPriority.Reserved1 = 0;
1861	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1862	    &ioc->scsi_lookup_lock);
1863}
1864
1865/**
1866 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1867 * @ioc: per adapter object
1868 * @smid: system request message index
1869 *
1870 * Return nothing.
1871 */
1872void
1873mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1874{
1875	Mpi2RequestDescriptorUnion_t descriptor;
1876	u64 *request = (u64 *)&descriptor;
1877
1878	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1879	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
1880	descriptor.Default.SMID = cpu_to_le16(smid);
1881	descriptor.Default.LMID = 0;
1882	descriptor.Default.DescriptorTypeDependent = 0;
1883	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1884	    &ioc->scsi_lookup_lock);
1885}
1886
1887/**
1888 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1889 * @ioc: per adapter object
1890 * @smid: system request message index
1891 * @io_index: value used to track the IO
1892 *
1893 * Return nothing.
1894 */
1895void
1896mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1897    u16 io_index)
1898{
1899	Mpi2RequestDescriptorUnion_t descriptor;
1900	u64 *request = (u64 *)&descriptor;
1901
1902	descriptor.SCSITarget.RequestFlags =
1903	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1904	descriptor.SCSITarget.MSIxIndex =  _base_get_msix_index(ioc);
1905	descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1906	descriptor.SCSITarget.LMID = 0;
1907	descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1908	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1909	    &ioc->scsi_lookup_lock);
1910}
1911
1912/**
1913 * _base_display_dell_branding - Disply branding string
1914 * @ioc: per adapter object
1915 *
1916 * Return nothing.
1917 */
1918static void
1919_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1920{
1921	char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
1922
1923	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
1924		return;
1925
1926	memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
1927	switch (ioc->pdev->subsystem_device) {
1928	case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
1929		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
1930		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1931		break;
1932	case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
1933		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
1934		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1935		break;
1936	case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
1937		strncpy(dell_branding,
1938		    MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
1939		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1940		break;
1941	case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
1942		strncpy(dell_branding,
1943		    MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
1944		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1945		break;
1946	case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
1947		strncpy(dell_branding,
1948		    MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
1949		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1950		break;
1951	case MPT2SAS_DELL_PERC_H200_SSDID:
1952		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
1953		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1954		break;
1955	case MPT2SAS_DELL_6GBPS_SAS_SSDID:
1956		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
1957		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1958		break;
1959	default:
1960		sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
1961		break;
1962	}
1963
1964	printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
1965	    " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
1966	    ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
1967	    ioc->pdev->subsystem_device);
1968}
1969
1970/**
1971 * _base_display_intel_branding - Display branding string
1972 * @ioc: per adapter object
1973 *
1974 * Return nothing.
1975 */
1976static void
1977_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1978{
1979	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1980		return;
1981
1982	switch (ioc->pdev->device) {
1983	case MPI2_MFGPAGE_DEVID_SAS2008:
1984		switch (ioc->pdev->subsystem_device) {
1985		case MPT2SAS_INTEL_RMS2LL080_SSDID:
1986			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1987			    MPT2SAS_INTEL_RMS2LL080_BRANDING);
1988			break;
1989		case MPT2SAS_INTEL_RMS2LL040_SSDID:
1990			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1991			    MPT2SAS_INTEL_RMS2LL040_BRANDING);
1992			break;
1993		case MPT2SAS_INTEL_SSD910_SSDID:
1994			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1995			    MPT2SAS_INTEL_SSD910_BRANDING);
1996			break;
1997		default:
1998			break;
1999		}
2000	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2001		switch (ioc->pdev->subsystem_device) {
2002		case MPT2SAS_INTEL_RS25GB008_SSDID:
2003			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2004			    MPT2SAS_INTEL_RS25GB008_BRANDING);
2005			break;
2006		case MPT2SAS_INTEL_RMS25JB080_SSDID:
2007			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2008			    MPT2SAS_INTEL_RMS25JB080_BRANDING);
2009			break;
2010		case MPT2SAS_INTEL_RMS25JB040_SSDID:
2011			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2012			    MPT2SAS_INTEL_RMS25JB040_BRANDING);
2013			break;
2014		case MPT2SAS_INTEL_RMS25KB080_SSDID:
2015			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2016			    MPT2SAS_INTEL_RMS25KB080_BRANDING);
2017			break;
2018		case MPT2SAS_INTEL_RMS25KB040_SSDID:
2019			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2020			    MPT2SAS_INTEL_RMS25KB040_BRANDING);
2021			break;
2022		case MPT2SAS_INTEL_RMS25LB040_SSDID:
2023			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2024			    MPT2SAS_INTEL_RMS25LB040_BRANDING);
2025			break;
2026		case MPT2SAS_INTEL_RMS25LB080_SSDID:
2027			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2028			    MPT2SAS_INTEL_RMS25LB080_BRANDING);
2029			break;
2030		default:
2031			break;
2032		}
2033	default:
2034		break;
2035	}
2036}
2037
2038/**
2039 * _base_display_hp_branding - Display branding string
2040 * @ioc: per adapter object
2041 *
2042 * Return nothing.
2043 */
2044static void
2045_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc)
2046{
2047	if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID)
2048		return;
2049
2050	switch (ioc->pdev->device) {
2051	case MPI2_MFGPAGE_DEVID_SAS2004:
2052		switch (ioc->pdev->subsystem_device) {
2053		case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2054			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2055			    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2056			break;
2057		default:
2058			break;
2059		}
2060	case MPI2_MFGPAGE_DEVID_SAS2308_2:
2061		switch (ioc->pdev->subsystem_device) {
2062		case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2063			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2064			    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2065			break;
2066		case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2067			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2068			    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2069			break;
2070		case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2071			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2072			    MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2073			break;
2074		case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2075			printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2076			    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2077			break;
2078		default:
2079			break;
2080		}
2081	default:
2082		break;
2083	}
2084}
2085
2086/**
2087 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2088 * @ioc: per adapter object
2089 *
2090 * Return nothing.
2091 */
2092static void
2093_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2094{
2095	int i = 0;
2096	char desc[16];
2097	u32 iounit_pg1_flags;
2098	u32 bios_version;
2099
2100	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2101	strncpy(desc, ioc->manu_pg0.ChipName, 16);
2102	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
2103	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2104	    ioc->name, desc,
2105	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2106	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2107	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2108	   ioc->facts.FWVersion.Word & 0x000000FF,
2109	   ioc->pdev->revision,
2110	   (bios_version & 0xFF000000) >> 24,
2111	   (bios_version & 0x00FF0000) >> 16,
2112	   (bios_version & 0x0000FF00) >> 8,
2113	    bios_version & 0x000000FF);
2114
2115	_base_display_dell_branding(ioc);
2116	_base_display_intel_branding(ioc);
2117	_base_display_hp_branding(ioc);
2118
2119	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
2120
2121	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2122		printk("Initiator");
2123		i++;
2124	}
2125
2126	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2127		printk("%sTarget", i ? "," : "");
2128		i++;
2129	}
2130
2131	i = 0;
2132	printk("), ");
2133	printk("Capabilities=(");
2134
2135	if (!ioc->hide_ir_msg) {
2136		if (ioc->facts.IOCCapabilities &
2137		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2138			printk("Raid");
2139			i++;
2140		}
2141	}
2142
2143	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2144		printk("%sTLR", i ? "," : "");
2145		i++;
2146	}
2147
2148	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2149		printk("%sMulticast", i ? "," : "");
2150		i++;
2151	}
2152
2153	if (ioc->facts.IOCCapabilities &
2154	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2155		printk("%sBIDI Target", i ? "," : "");
2156		i++;
2157	}
2158
2159	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2160		printk("%sEEDP", i ? "," : "");
2161		i++;
2162	}
2163
2164	if (ioc->facts.IOCCapabilities &
2165	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2166		printk("%sSnapshot Buffer", i ? "," : "");
2167		i++;
2168	}
2169
2170	if (ioc->facts.IOCCapabilities &
2171	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2172		printk("%sDiag Trace Buffer", i ? "," : "");
2173		i++;
2174	}
2175
2176	if (ioc->facts.IOCCapabilities &
2177	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2178		printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : "");
2179		i++;
2180	}
2181
2182	if (ioc->facts.IOCCapabilities &
2183	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2184		printk("%sTask Set Full", i ? "," : "");
2185		i++;
2186	}
2187
2188	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2189	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2190		printk("%sNCQ", i ? "," : "");
2191		i++;
2192	}
2193
2194	printk(")\n");
2195}
2196
2197/**
2198 * mpt2sas_base_update_missing_delay - change the missing delay timers
2199 * @ioc: per adapter object
2200 * @device_missing_delay: amount of time till device is reported missing
2201 * @io_missing_delay: interval IO is returned when there is a missing device
2202 *
2203 * Return nothing.
2204 *
2205 * Passed on the command line, this function will modify the device missing
2206 * delay, as well as the io missing delay. This should be called at driver
2207 * load time.
2208 */
2209void
2210mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2211	u16 device_missing_delay, u8 io_missing_delay)
2212{
2213	u16 dmd, dmd_new, dmd_orignal;
2214	u8 io_missing_delay_original;
2215	u16 sz;
2216	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2217	Mpi2ConfigReply_t mpi_reply;
2218	u8 num_phys = 0;
2219	u16 ioc_status;
2220
2221	mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
2222	if (!num_phys)
2223		return;
2224
2225	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2226	    sizeof(Mpi2SasIOUnit1PhyData_t));
2227	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2228	if (!sas_iounit_pg1) {
2229		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2230		    ioc->name, __FILE__, __LINE__, __func__);
2231		goto out;
2232	}
2233	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2234	    sas_iounit_pg1, sz))) {
2235		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2236		    ioc->name, __FILE__, __LINE__, __func__);
2237		goto out;
2238	}
2239	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2240	    MPI2_IOCSTATUS_MASK;
2241	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2242		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2243		    ioc->name, __FILE__, __LINE__, __func__);
2244		goto out;
2245	}
2246
2247	/* device missing delay */
2248	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2249	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2250		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2251	else
2252		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2253	dmd_orignal = dmd;
2254	if (device_missing_delay > 0x7F) {
2255		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2256		    device_missing_delay;
2257		dmd = dmd / 16;
2258		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2259	} else
2260		dmd = device_missing_delay;
2261	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2262
2263	/* io missing delay */
2264	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2265	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2266
2267	if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2268	    sz)) {
2269		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2270			dmd_new = (dmd &
2271			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2272		else
2273			dmd_new =
2274		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2275		printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
2276		    "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
2277		printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
2278		    "new(%d)\n", ioc->name, io_missing_delay_original,
2279		    io_missing_delay);
2280		ioc->device_missing_delay = dmd_new;
2281		ioc->io_missing_delay = io_missing_delay;
2282	}
2283
2284out:
2285	kfree(sas_iounit_pg1);
2286}
2287
2288/**
2289 * _base_static_config_pages - static start of day config pages
2290 * @ioc: per adapter object
2291 *
2292 * Return nothing.
2293 */
2294static void
2295_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
2296{
2297	Mpi2ConfigReply_t mpi_reply;
2298	u32 iounit_pg1_flags;
2299
2300	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2301	if (ioc->ir_firmware)
2302		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2303		    &ioc->manu_pg10);
2304	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2305	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2306	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2307	mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2308	mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2309	_base_display_ioc_capabilities(ioc);
2310
2311	/*
2312	 * Enable task_set_full handling in iounit_pg1 when the
2313	 * facts capabilities indicate that its supported.
2314	 */
2315	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2316	if ((ioc->facts.IOCCapabilities &
2317	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2318		iounit_pg1_flags &=
2319		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2320	else
2321		iounit_pg1_flags |=
2322		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2323	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2324	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2325
2326}
2327
2328/**
2329 * _base_release_memory_pools - release memory
2330 * @ioc: per adapter object
2331 *
2332 * Free memory allocated from _base_allocate_memory_pools.
2333 *
2334 * Return nothing.
2335 */
2336static void
2337_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2338{
2339	int i;
2340
2341	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2342	    __func__));
2343
2344	if (ioc->request) {
2345		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2346		    ioc->request,  ioc->request_dma);
2347		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
2348		    ": free\n", ioc->name, ioc->request));
2349		ioc->request = NULL;
2350	}
2351
2352	if (ioc->sense) {
2353		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2354		if (ioc->sense_dma_pool)
2355			pci_pool_destroy(ioc->sense_dma_pool);
2356		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
2357		    ": free\n", ioc->name, ioc->sense));
2358		ioc->sense = NULL;
2359	}
2360
2361	if (ioc->reply) {
2362		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2363		if (ioc->reply_dma_pool)
2364			pci_pool_destroy(ioc->reply_dma_pool);
2365		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
2366		     ": free\n", ioc->name, ioc->reply));
2367		ioc->reply = NULL;
2368	}
2369
2370	if (ioc->reply_free) {
2371		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2372		    ioc->reply_free_dma);
2373		if (ioc->reply_free_dma_pool)
2374			pci_pool_destroy(ioc->reply_free_dma_pool);
2375		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
2376		    "(0x%p): free\n", ioc->name, ioc->reply_free));
2377		ioc->reply_free = NULL;
2378	}
2379
2380	if (ioc->reply_post_free) {
2381		pci_pool_free(ioc->reply_post_free_dma_pool,
2382		    ioc->reply_post_free, ioc->reply_post_free_dma);
2383		if (ioc->reply_post_free_dma_pool)
2384			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2385		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2386		    "reply_post_free_pool(0x%p): free\n", ioc->name,
2387		    ioc->reply_post_free));
2388		ioc->reply_post_free = NULL;
2389	}
2390
2391	if (ioc->config_page) {
2392		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2393		    "config_page(0x%p): free\n", ioc->name,
2394		    ioc->config_page));
2395		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2396		    ioc->config_page, ioc->config_page_dma);
2397	}
2398
2399	if (ioc->scsi_lookup) {
2400		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2401		ioc->scsi_lookup = NULL;
2402	}
2403	kfree(ioc->hpr_lookup);
2404	kfree(ioc->internal_lookup);
2405	if (ioc->chain_lookup) {
2406		for (i = 0; i < ioc->chain_depth; i++) {
2407			if (ioc->chain_lookup[i].chain_buffer)
2408				pci_pool_free(ioc->chain_dma_pool,
2409				    ioc->chain_lookup[i].chain_buffer,
2410				    ioc->chain_lookup[i].chain_buffer_dma);
2411		}
2412		if (ioc->chain_dma_pool)
2413			pci_pool_destroy(ioc->chain_dma_pool);
2414		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2415		ioc->chain_lookup = NULL;
2416	}
2417}
2418
2419
2420/**
2421 * _base_allocate_memory_pools - allocate start of day memory pools
2422 * @ioc: per adapter object
2423 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2424 *
2425 * Returns 0 success, anything else error
2426 */
2427static int
2428_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
2429{
2430	struct mpt2sas_facts *facts;
2431	u16 max_sge_elements;
2432	u16 chains_needed_per_io;
2433	u32 sz, total_sz, reply_post_free_sz;
2434	u32 retry_sz;
2435	u16 max_request_credit;
2436	int i;
2437
2438	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2439	    __func__));
2440
2441	retry_sz = 0;
2442	facts = &ioc->facts;
2443
2444	/* command line tunables  for max sgl entries */
2445	if (max_sgl_entries != -1) {
2446		ioc->shost->sg_tablesize = (max_sgl_entries <
2447		    MPT2SAS_SG_DEPTH) ? max_sgl_entries :
2448		    MPT2SAS_SG_DEPTH;
2449	} else {
2450		ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
2451	}
2452
2453	/* command line tunables  for max controller queue depth */
2454	if (max_queue_depth != -1 && max_queue_depth != 0) {
2455		max_request_credit = min_t(u16, max_queue_depth +
2456			ioc->hi_priority_depth + ioc->internal_depth,
2457			facts->RequestCredit);
2458		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2459			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
2460	} else
2461		max_request_credit = min_t(u16, facts->RequestCredit,
2462		    MAX_HBA_QUEUE_DEPTH);
2463
2464	ioc->hba_queue_depth = max_request_credit;
2465	ioc->hi_priority_depth = facts->HighPriorityCredit;
2466	ioc->internal_depth = ioc->hi_priority_depth + 5;
2467
2468	/* request frame size */
2469	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2470
2471	/* reply frame size */
2472	ioc->reply_sz = facts->ReplyFrameSize * 4;
2473
2474 retry_allocation:
2475	total_sz = 0;
2476	/* calculate number of sg elements left over in the 1st frame */
2477	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2478	    sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
2479	ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
2480
2481	/* now do the same for a chain buffer */
2482	max_sge_elements = ioc->request_sz - ioc->sge_size;
2483	ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
2484
2485	ioc->chain_offset_value_for_main_message =
2486	    ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
2487	     (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
2488
2489	/*
2490	 *  MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2491	 */
2492	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2493	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2494	    + 1;
2495	if (chains_needed_per_io > facts->MaxChainDepth) {
2496		chains_needed_per_io = facts->MaxChainDepth;
2497		ioc->shost->sg_tablesize = min_t(u16,
2498		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2499		* chains_needed_per_io), ioc->shost->sg_tablesize);
2500	}
2501	ioc->chains_needed_per_io = chains_needed_per_io;
2502
2503	/* reply free queue sizing - taking into account for 64 FW events */
2504	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2505
2506	/* calculate reply descriptor post queue depth */
2507	ioc->reply_post_queue_depth = ioc->hba_queue_depth +
2508					ioc->reply_free_queue_depth +  1;
2509	/* align the reply post queue on the next 16 count boundary */
2510	if (ioc->reply_post_queue_depth % 16)
2511		ioc->reply_post_queue_depth += 16 -
2512			(ioc->reply_post_queue_depth % 16);
2513
2514
2515	if (ioc->reply_post_queue_depth >
2516	    facts->MaxReplyDescriptorPostQueueDepth) {
2517		ioc->reply_post_queue_depth =
2518			facts->MaxReplyDescriptorPostQueueDepth -
2519		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
2520		ioc->hba_queue_depth =
2521			((ioc->reply_post_queue_depth - 64) / 2) - 1;
2522		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2523	}
2524
2525	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2526	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2527	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2528	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2529	    ioc->chains_needed_per_io));
2530
2531	ioc->scsiio_depth = ioc->hba_queue_depth -
2532	    ioc->hi_priority_depth - ioc->internal_depth;
2533
2534	/* set the scsi host can_queue depth
2535	 * with some internal commands that could be outstanding
2536	 */
2537	ioc->shost->can_queue = ioc->scsiio_depth;
2538	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2539	    "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2540
2541	/* contiguous pool for request and chains, 16 byte align, one extra "
2542	 * "frame for smid=0
2543	 */
2544	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2545	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2546
2547	/* hi-priority queue */
2548	sz += (ioc->hi_priority_depth * ioc->request_sz);
2549
2550	/* internal queue */
2551	sz += (ioc->internal_depth * ioc->request_sz);
2552
2553	ioc->request_dma_sz = sz;
2554	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2555	if (!ioc->request) {
2556		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2557		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2558		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2559		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2560		if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
2561			goto out;
2562		retry_sz += 64;
2563		ioc->hba_queue_depth = max_request_credit - retry_sz;
2564		goto retry_allocation;
2565	}
2566
2567	if (retry_sz)
2568		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2569		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2570		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2571		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2572
2573
2574	/* hi-priority queue */
2575	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2576	    ioc->request_sz);
2577	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2578	    ioc->request_sz);
2579
2580	/* internal queue */
2581	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2582	    ioc->request_sz);
2583	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2584	    ioc->request_sz);
2585
2586
2587	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2588	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2589	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2590	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2591	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2592	    ioc->name, (unsigned long long) ioc->request_dma));
2593	total_sz += sz;
2594
2595	sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2596	ioc->scsi_lookup_pages = get_order(sz);
2597	ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2598	    GFP_KERNEL, ioc->scsi_lookup_pages);
2599	if (!ioc->scsi_lookup) {
2600		printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
2601		    "sz(%d)\n", ioc->name, (int)sz);
2602		goto out;
2603	}
2604
2605	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
2606	    "depth(%d)\n", ioc->name, ioc->request,
2607	    ioc->scsiio_depth));
2608
2609	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2610	sz = ioc->chain_depth * sizeof(struct chain_tracker);
2611	ioc->chain_pages = get_order(sz);
2612
2613	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2614	    GFP_KERNEL, ioc->chain_pages);
2615	if (!ioc->chain_lookup) {
2616		printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
2617		    "sz(%d)\n", ioc->name, (int)sz);
2618		goto out;
2619	}
2620	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2621	    ioc->request_sz, 16, 0);
2622	if (!ioc->chain_dma_pool) {
2623		printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
2624		    "failed\n", ioc->name);
2625		goto out;
2626	}
2627	for (i = 0; i < ioc->chain_depth; i++) {
2628		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2629		    ioc->chain_dma_pool , GFP_KERNEL,
2630		    &ioc->chain_lookup[i].chain_buffer_dma);
2631		if (!ioc->chain_lookup[i].chain_buffer) {
2632			ioc->chain_depth = i;
2633			goto chain_done;
2634		}
2635		total_sz += ioc->request_sz;
2636	}
2637chain_done:
2638	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
2639	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2640	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2641	    ioc->request_sz))/1024));
2642
2643	/* initialize hi-priority queue smid's */
2644	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2645	    sizeof(struct request_tracker), GFP_KERNEL);
2646	if (!ioc->hpr_lookup) {
2647		printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2648		    ioc->name);
2649		goto out;
2650	}
2651	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2652	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2653	    "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2654	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2655
2656	/* initialize internal queue smid's */
2657	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2658	    sizeof(struct request_tracker), GFP_KERNEL);
2659	if (!ioc->internal_lookup) {
2660		printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2661		    ioc->name);
2662		goto out;
2663	}
2664	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2665	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2666	    "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2667	     ioc->internal_depth, ioc->internal_smid));
2668
2669	/* sense buffers, 4 byte align */
2670	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2671	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2672	    0);
2673	if (!ioc->sense_dma_pool) {
2674		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
2675		    ioc->name);
2676		goto out;
2677	}
2678	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2679	    &ioc->sense_dma);
2680	if (!ioc->sense) {
2681		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
2682		    ioc->name);
2683		goto out;
2684	}
2685	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2686	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2687	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2688	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2689	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
2690	    ioc->name, (unsigned long long)ioc->sense_dma));
2691	total_sz += sz;
2692
2693	/* reply pool, 4 byte align */
2694	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2695	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2696	    0);
2697	if (!ioc->reply_dma_pool) {
2698		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
2699		    ioc->name);
2700		goto out;
2701	}
2702	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2703	    &ioc->reply_dma);
2704	if (!ioc->reply) {
2705		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
2706		    ioc->name);
2707		goto out;
2708	}
2709	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2710	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2711	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2712	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2713	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2714	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
2715	    ioc->name, (unsigned long long)ioc->reply_dma));
2716	total_sz += sz;
2717
2718	/* reply free queue, 16 byte align */
2719	sz = ioc->reply_free_queue_depth * 4;
2720	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2721	    ioc->pdev, sz, 16, 0);
2722	if (!ioc->reply_free_dma_pool) {
2723		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
2724		    "failed\n", ioc->name);
2725		goto out;
2726	}
2727	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2728	    &ioc->reply_free_dma);
2729	if (!ioc->reply_free) {
2730		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
2731		    "failed\n", ioc->name);
2732		goto out;
2733	}
2734	memset(ioc->reply_free, 0, sz);
2735	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
2736	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2737	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2738	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
2739	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2740	total_sz += sz;
2741
2742	/* reply post queue, 16 byte align */
2743	reply_post_free_sz = ioc->reply_post_queue_depth *
2744	    sizeof(Mpi2DefaultReplyDescriptor_t);
2745	if (_base_is_controller_msix_enabled(ioc))
2746		sz = reply_post_free_sz * ioc->reply_queue_count;
2747	else
2748		sz = reply_post_free_sz;
2749	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2750	    ioc->pdev, sz, 16, 0);
2751	if (!ioc->reply_post_free_dma_pool) {
2752		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
2753		    "failed\n", ioc->name);
2754		goto out;
2755	}
2756	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2757	    GFP_KERNEL, &ioc->reply_post_free_dma);
2758	if (!ioc->reply_post_free) {
2759		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
2760		    "failed\n", ioc->name);
2761		goto out;
2762	}
2763	memset(ioc->reply_post_free, 0, sz);
2764	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
2765	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2766	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2767	    sz/1024));
2768	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
2769	    "(0x%llx)\n", ioc->name, (unsigned long long)
2770	    ioc->reply_post_free_dma));
2771	total_sz += sz;
2772
2773	ioc->config_page_sz = 512;
2774	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2775	    ioc->config_page_sz, &ioc->config_page_dma);
2776	if (!ioc->config_page) {
2777		printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
2778		    "failed\n", ioc->name);
2779		goto out;
2780	}
2781	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
2782	    "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
2783	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
2784	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
2785	total_sz += ioc->config_page_sz;
2786
2787	printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
2788	    ioc->name, total_sz/1024);
2789	printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
2790	    "Max Controller Queue Depth(%d)\n",
2791	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2792	printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
2793	    ioc->name, ioc->shost->sg_tablesize);
2794	return 0;
2795
2796 out:
2797	return -ENOMEM;
2798}
2799
2800
2801/**
2802 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
2803 * @ioc: Pointer to MPT_ADAPTER structure
2804 * @cooked: Request raw or cooked IOC state
2805 *
2806 * Returns all IOC Doorbell register bits if cooked==0, else just the
2807 * Doorbell bits in MPI_IOC_STATE_MASK.
2808 */
2809u32
2810mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
2811{
2812	u32 s, sc;
2813
2814	s = readl(&ioc->chip->Doorbell);
2815	sc = s & MPI2_IOC_STATE_MASK;
2816	return cooked ? sc : s;
2817}
2818
2819/**
2820 * _base_wait_on_iocstate - waiting on a particular ioc state
2821 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2822 * @timeout: timeout in second
2823 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2824 *
2825 * Returns 0 for success, non-zero for failure.
2826 */
2827static int
2828_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2829    int sleep_flag)
2830{
2831	u32 count, cntdn;
2832	u32 current_state;
2833
2834	count = 0;
2835	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2836	do {
2837		current_state = mpt2sas_base_get_iocstate(ioc, 1);
2838		if (current_state == ioc_state)
2839			return 0;
2840		if (count && current_state == MPI2_IOC_STATE_FAULT)
2841			break;
2842		if (sleep_flag == CAN_SLEEP)
2843			msleep(1);
2844		else
2845			udelay(500);
2846		count++;
2847	} while (--cntdn);
2848
2849	return current_state;
2850}
2851
2852/**
2853 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2854 * a write to the doorbell)
2855 * @ioc: per adapter object
2856 * @timeout: timeout in second
2857 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2858 *
2859 * Returns 0 for success, non-zero for failure.
2860 *
2861 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2862 */
2863static int
2864_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2865    int sleep_flag)
2866{
2867	u32 cntdn, count;
2868	u32 int_status;
2869
2870	count = 0;
2871	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2872	do {
2873		int_status = readl(&ioc->chip->HostInterruptStatus);
2874		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2875			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2876			    "successful count(%d), timeout(%d)\n", ioc->name,
2877			    __func__, count, timeout));
2878			return 0;
2879		}
2880		if (sleep_flag == CAN_SLEEP)
2881			msleep(1);
2882		else
2883			udelay(500);
2884		count++;
2885	} while (--cntdn);
2886
2887	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2888	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2889	return -EFAULT;
2890}
2891
2892/**
2893 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2894 * @ioc: per adapter object
2895 * @timeout: timeout in second
2896 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2897 *
2898 * Returns 0 for success, non-zero for failure.
2899 *
2900 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
2901 * doorbell.
2902 */
2903static int
2904_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2905    int sleep_flag)
2906{
2907	u32 cntdn, count;
2908	u32 int_status;
2909	u32 doorbell;
2910
2911	count = 0;
2912	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2913	do {
2914		int_status = readl(&ioc->chip->HostInterruptStatus);
2915		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2916			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2917			    "successful count(%d), timeout(%d)\n", ioc->name,
2918			    __func__, count, timeout));
2919			return 0;
2920		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2921			doorbell = readl(&ioc->chip->Doorbell);
2922			if ((doorbell & MPI2_IOC_STATE_MASK) ==
2923			    MPI2_IOC_STATE_FAULT) {
2924				mpt2sas_base_fault_info(ioc , doorbell);
2925				return -EFAULT;
2926			}
2927		} else if (int_status == 0xFFFFFFFF)
2928			goto out;
2929
2930		if (sleep_flag == CAN_SLEEP)
2931			msleep(1);
2932		else
2933			udelay(500);
2934		count++;
2935	} while (--cntdn);
2936
2937 out:
2938	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2939	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2940	return -EFAULT;
2941}
2942
2943/**
2944 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
2945 * @ioc: per adapter object
2946 * @timeout: timeout in second
2947 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2948 *
2949 * Returns 0 for success, non-zero for failure.
2950 *
2951 */
2952static int
2953_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2954    int sleep_flag)
2955{
2956	u32 cntdn, count;
2957	u32 doorbell_reg;
2958
2959	count = 0;
2960	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2961	do {
2962		doorbell_reg = readl(&ioc->chip->Doorbell);
2963		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2964			dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2965			    "successful count(%d), timeout(%d)\n", ioc->name,
2966			    __func__, count, timeout));
2967			return 0;
2968		}
2969		if (sleep_flag == CAN_SLEEP)
2970			msleep(1);
2971		else
2972			udelay(500);
2973		count++;
2974	} while (--cntdn);
2975
2976	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2977	    "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
2978	return -EFAULT;
2979}
2980
2981/**
2982 * _base_send_ioc_reset - send doorbell reset
2983 * @ioc: per adapter object
2984 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
2985 * @timeout: timeout in second
2986 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2987 *
2988 * Returns 0 for success, non-zero for failure.
2989 */
2990static int
2991_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
2992    int sleep_flag)
2993{
2994	u32 ioc_state;
2995	int r = 0;
2996
2997	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
2998		printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
2999		    ioc->name, __func__);
3000		return -EFAULT;
3001	}
3002
3003	if (!(ioc->facts.IOCCapabilities &
3004	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3005		return -EFAULT;
3006
3007	printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
3008
3009	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3010	    &ioc->chip->Doorbell);
3011	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3012		r = -EFAULT;
3013		goto out;
3014	}
3015	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3016	    timeout, sleep_flag);
3017	if (ioc_state) {
3018		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3019		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3020		r = -EFAULT;
3021		goto out;
3022	}
3023 out:
3024	printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
3025	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3026	return r;
3027}
3028
3029/**
3030 * _base_handshake_req_reply_wait - send request thru doorbell interface
3031 * @ioc: per adapter object
3032 * @request_bytes: request length
3033 * @request: pointer having request payload
3034 * @reply_bytes: reply length
3035 * @reply: pointer to reply payload
3036 * @timeout: timeout in second
3037 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3038 *
3039 * Returns 0 for success, non-zero for failure.
3040 */
3041static int
3042_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
3043    u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3044{
3045	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3046	int i;
3047	u8 failed;
3048	u16 dummy;
3049	__le32 *mfp;
3050
3051	/* make sure doorbell is not in use */
3052	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3053		printk(MPT2SAS_ERR_FMT "doorbell is in use "
3054		    " (line=%d)\n", ioc->name, __LINE__);
3055		return -EFAULT;
3056	}
3057
3058	/* clear pending doorbell interrupts from previous state changes */
3059	if (readl(&ioc->chip->HostInterruptStatus) &
3060	    MPI2_HIS_IOC2SYS_DB_STATUS)
3061		writel(0, &ioc->chip->HostInterruptStatus);
3062
3063	/* send message to ioc */
3064	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3065	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3066	    &ioc->chip->Doorbell);
3067
3068	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3069		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3070		   "int failed (line=%d)\n", ioc->name, __LINE__);
3071		return -EFAULT;
3072	}
3073	writel(0, &ioc->chip->HostInterruptStatus);
3074
3075	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3076		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3077		    "ack failed (line=%d)\n", ioc->name, __LINE__);
3078		return -EFAULT;
3079	}
3080
3081	/* send message 32-bits at a time */
3082	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3083		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3084		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3085			failed = 1;
3086	}
3087
3088	if (failed) {
3089		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3090		    "sending request failed (line=%d)\n", ioc->name, __LINE__);
3091		return -EFAULT;
3092	}
3093
3094	/* now wait for the reply */
3095	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3096		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3097		   "int failed (line=%d)\n", ioc->name, __LINE__);
3098		return -EFAULT;
3099	}
3100
3101	/* read the first two 16-bits, it gives the total length of the reply */
3102	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3103	    & MPI2_DOORBELL_DATA_MASK);
3104	writel(0, &ioc->chip->HostInterruptStatus);
3105	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3106		printk(MPT2SAS_ERR_FMT "doorbell handshake "
3107		   "int failed (line=%d)\n", ioc->name, __LINE__);
3108		return -EFAULT;
3109	}
3110	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3111	    & MPI2_DOORBELL_DATA_MASK);
3112	writel(0, &ioc->chip->HostInterruptStatus);
3113
3114	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3115		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3116			printk(MPT2SAS_ERR_FMT "doorbell "
3117			    "handshake int failed (line=%d)\n", ioc->name,
3118			    __LINE__);
3119			return -EFAULT;
3120		}
3121		if (i >=  reply_bytes/2) /* overflow case */
3122			dummy = readl(&ioc->chip->Doorbell);
3123		else
3124			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3125			    & MPI2_DOORBELL_DATA_MASK);
3126		writel(0, &ioc->chip->HostInterruptStatus);
3127	}
3128
3129	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3130	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3131		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
3132		    " (line=%d)\n", ioc->name, __LINE__));
3133	}
3134	writel(0, &ioc->chip->HostInterruptStatus);
3135
3136	if (ioc->logging_level & MPT_DEBUG_INIT) {
3137		mfp = (__le32 *)reply;
3138		printk(KERN_INFO "\toffset:data\n");
3139		for (i = 0; i < reply_bytes/4; i++)
3140			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3141			    le32_to_cpu(mfp[i]));
3142	}
3143	return 0;
3144}
3145
3146/**
3147 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
3148 * @ioc: per adapter object
3149 * @mpi_reply: the reply payload from FW
3150 * @mpi_request: the request payload sent to FW
3151 *
3152 * The SAS IO Unit Control Request message allows the host to perform low-level
3153 * operations, such as resets on the PHYs of the IO Unit, also allows the host
3154 * to obtain the IOC assigned device handles for a device if it has other
3155 * identifying information about the device, in addition allows the host to
3156 * remove IOC resources associated with the device.
3157 *
3158 * Returns 0 for success, non-zero for failure.
3159 */
3160int
3161mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
3162    Mpi2SasIoUnitControlReply_t *mpi_reply,
3163    Mpi2SasIoUnitControlRequest_t *mpi_request)
3164{
3165	u16 smid;
3166	u32 ioc_state;
3167	unsigned long timeleft;
3168	u8 issue_reset;
3169	int rc;
3170	void *request;
3171	u16 wait_state_count;
3172
3173	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3174	    __func__));
3175
3176	mutex_lock(&ioc->base_cmds.mutex);
3177
3178	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3179		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3180		    ioc->name, __func__);
3181		rc = -EAGAIN;
3182		goto out;
3183	}
3184
3185	wait_state_count = 0;
3186	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3187	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3188		if (wait_state_count++ == 10) {
3189			printk(MPT2SAS_ERR_FMT
3190			    "%s: failed due to ioc not operational\n",
3191			    ioc->name, __func__);
3192			rc = -EFAULT;
3193			goto out;
3194		}
3195		ssleep(1);
3196		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3197		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3198		    "operational state(count=%d)\n", ioc->name,
3199		    __func__, wait_state_count);
3200	}
3201
3202	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3203	if (!smid) {
3204		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3205		    ioc->name, __func__);
3206		rc = -EAGAIN;
3207		goto out;
3208	}
3209
3210	rc = 0;
3211	ioc->base_cmds.status = MPT2_CMD_PENDING;
3212	request = mpt2sas_base_get_msg_frame(ioc, smid);
3213	ioc->base_cmds.smid = smid;
3214	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3215	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3216	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3217		ioc->ioc_link_reset_in_progress = 1;
3218	init_completion(&ioc->base_cmds.done);
3219	mpt2sas_base_put_smid_default(ioc, smid);
3220	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3221	    msecs_to_jiffies(10000));
3222	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3223	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3224	    ioc->ioc_link_reset_in_progress)
3225		ioc->ioc_link_reset_in_progress = 0;
3226	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3227		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3228		    ioc->name, __func__);
3229		_debug_dump_mf(mpi_request,
3230		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3231		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3232			issue_reset = 1;
3233		goto issue_host_reset;
3234	}
3235	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3236		memcpy(mpi_reply, ioc->base_cmds.reply,
3237		    sizeof(Mpi2SasIoUnitControlReply_t));
3238	else
3239		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3240	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3241	goto out;
3242
3243 issue_host_reset:
3244	if (issue_reset)
3245		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3246		    FORCE_BIG_HAMMER);
3247	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3248	rc = -EFAULT;
3249 out:
3250	mutex_unlock(&ioc->base_cmds.mutex);
3251	return rc;
3252}
3253
3254
3255/**
3256 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
3257 * @ioc: per adapter object
3258 * @mpi_reply: the reply payload from FW
3259 * @mpi_request: the request payload sent to FW
3260 *
3261 * The SCSI Enclosure Processor request message causes the IOC to
3262 * communicate with SES devices to control LED status signals.
3263 *
3264 * Returns 0 for success, non-zero for failure.
3265 */
3266int
3267mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
3268    Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3269{
3270	u16 smid;
3271	u32 ioc_state;
3272	unsigned long timeleft;
3273	u8 issue_reset;
3274	int rc;
3275	void *request;
3276	u16 wait_state_count;
3277
3278	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3279	    __func__));
3280
3281	mutex_lock(&ioc->base_cmds.mutex);
3282
3283	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
3284		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
3285		    ioc->name, __func__);
3286		rc = -EAGAIN;
3287		goto out;
3288	}
3289
3290	wait_state_count = 0;
3291	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3292	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3293		if (wait_state_count++ == 10) {
3294			printk(MPT2SAS_ERR_FMT
3295			    "%s: failed due to ioc not operational\n",
3296			    ioc->name, __func__);
3297			rc = -EFAULT;
3298			goto out;
3299		}
3300		ssleep(1);
3301		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
3302		printk(MPT2SAS_INFO_FMT "%s: waiting for "
3303		    "operational state(count=%d)\n", ioc->name,
3304		    __func__, wait_state_count);
3305	}
3306
3307	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3308	if (!smid) {
3309		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3310		    ioc->name, __func__);
3311		rc = -EAGAIN;
3312		goto out;
3313	}
3314
3315	rc = 0;
3316	ioc->base_cmds.status = MPT2_CMD_PENDING;
3317	request = mpt2sas_base_get_msg_frame(ioc, smid);
3318	ioc->base_cmds.smid = smid;
3319	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3320	init_completion(&ioc->base_cmds.done);
3321	mpt2sas_base_put_smid_default(ioc, smid);
3322	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3323	    msecs_to_jiffies(10000));
3324	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3325		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3326		    ioc->name, __func__);
3327		_debug_dump_mf(mpi_request,
3328		    sizeof(Mpi2SepRequest_t)/4);
3329		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
3330			issue_reset = 1;
3331		goto issue_host_reset;
3332	}
3333	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
3334		memcpy(mpi_reply, ioc->base_cmds.reply,
3335		    sizeof(Mpi2SepReply_t));
3336	else
3337		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3338	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3339	goto out;
3340
3341 issue_host_reset:
3342	if (issue_reset)
3343		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3344		    FORCE_BIG_HAMMER);
3345	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3346	rc = -EFAULT;
3347 out:
3348	mutex_unlock(&ioc->base_cmds.mutex);
3349	return rc;
3350}
3351
3352/**
3353 * _base_get_port_facts - obtain port facts reply and save in ioc
3354 * @ioc: per adapter object
3355 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3356 *
3357 * Returns 0 for success, non-zero for failure.
3358 */
3359static int
3360_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3361{
3362	Mpi2PortFactsRequest_t mpi_request;
3363	Mpi2PortFactsReply_t mpi_reply;
3364	struct mpt2sas_port_facts *pfacts;
3365	int mpi_reply_sz, mpi_request_sz, r;
3366
3367	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3368	    __func__));
3369
3370	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3371	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3372	memset(&mpi_request, 0, mpi_request_sz);
3373	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3374	mpi_request.PortNumber = port;
3375	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3376	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3377
3378	if (r != 0) {
3379		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3380		    ioc->name, __func__, r);
3381		return r;
3382	}
3383
3384	pfacts = &ioc->pfacts[port];
3385	memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
3386	pfacts->PortNumber = mpi_reply.PortNumber;
3387	pfacts->VP_ID = mpi_reply.VP_ID;
3388	pfacts->VF_ID = mpi_reply.VF_ID;
3389	pfacts->MaxPostedCmdBuffers =
3390	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3391
3392	return 0;
3393}
3394
3395/**
3396 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3397 * @ioc: per adapter object
3398 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3399 *
3400 * Returns 0 for success, non-zero for failure.
3401 */
3402static int
3403_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3404{
3405	Mpi2IOCFactsRequest_t mpi_request;
3406	Mpi2IOCFactsReply_t mpi_reply;
3407	struct mpt2sas_facts *facts;
3408	int mpi_reply_sz, mpi_request_sz, r;
3409
3410	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3411	    __func__));
3412
3413	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3414	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3415	memset(&mpi_request, 0, mpi_request_sz);
3416	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3417	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3418	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3419
3420	if (r != 0) {
3421		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3422		    ioc->name, __func__, r);
3423		return r;
3424	}
3425
3426	facts = &ioc->facts;
3427	memset(facts, 0, sizeof(struct mpt2sas_facts));
3428	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3429	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3430	facts->VP_ID = mpi_reply.VP_ID;
3431	facts->VF_ID = mpi_reply.VF_ID;
3432	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3433	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3434	facts->WhoInit = mpi_reply.WhoInit;
3435	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3436	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3437	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3438	facts->MaxReplyDescriptorPostQueueDepth =
3439	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3440	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3441	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3442	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3443		ioc->ir_firmware = 1;
3444	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3445	facts->IOCRequestFrameSize =
3446	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3447	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3448	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3449	ioc->shost->max_id = -1;
3450	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3451	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3452	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3453	facts->HighPriorityCredit =
3454	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3455	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3456	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3457
3458	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
3459	    "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
3460	    facts->MaxChainDepth));
3461	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
3462	    "reply frame size(%d)\n", ioc->name,
3463	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3464	return 0;
3465}
3466
3467/**
3468 * _base_send_ioc_init - send ioc_init to firmware
3469 * @ioc: per adapter object
3470 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3471 *
3472 * Returns 0 for success, non-zero for failure.
3473 */
3474static int
3475_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3476{
3477	Mpi2IOCInitRequest_t mpi_request;
3478	Mpi2IOCInitReply_t mpi_reply;
3479	int r;
3480	struct timeval current_time;
3481	u16 ioc_status;
3482
3483	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3484	    __func__));
3485
3486	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3487	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3488	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3489	mpi_request.VF_ID = 0; /* TODO */
3490	mpi_request.VP_ID = 0;
3491	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3492	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3493
3494	if (_base_is_controller_msix_enabled(ioc))
3495		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3496	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3497	mpi_request.ReplyDescriptorPostQueueDepth =
3498	    cpu_to_le16(ioc->reply_post_queue_depth);
3499	mpi_request.ReplyFreeQueueDepth =
3500	    cpu_to_le16(ioc->reply_free_queue_depth);
3501
3502	mpi_request.SenseBufferAddressHigh =
3503	    cpu_to_le32((u64)ioc->sense_dma >> 32);
3504	mpi_request.SystemReplyAddressHigh =
3505	    cpu_to_le32((u64)ioc->reply_dma >> 32);
3506	mpi_request.SystemRequestFrameBaseAddress =
3507	    cpu_to_le64((u64)ioc->request_dma);
3508	mpi_request.ReplyFreeQueueAddress =
3509	    cpu_to_le64((u64)ioc->reply_free_dma);
3510	mpi_request.ReplyDescriptorPostQueueAddress =
3511	    cpu_to_le64((u64)ioc->reply_post_free_dma);
3512
3513
3514	/* This time stamp specifies number of milliseconds
3515	 * since epoch ~ midnight January 1, 1970.
3516	 */
3517	do_gettimeofday(&current_time);
3518	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3519	    (current_time.tv_usec / 1000));
3520
3521	if (ioc->logging_level & MPT_DEBUG_INIT) {
3522		__le32 *mfp;
3523		int i;
3524
3525		mfp = (__le32 *)&mpi_request;
3526		printk(KERN_INFO "\toffset:data\n");
3527		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3528			printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
3529			    le32_to_cpu(mfp[i]));
3530	}
3531
3532	r = _base_handshake_req_reply_wait(ioc,
3533	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3534	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3535	    sleep_flag);
3536
3537	if (r != 0) {
3538		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3539		    ioc->name, __func__, r);
3540		return r;
3541	}
3542
3543	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3544	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3545	    mpi_reply.IOCLogInfo) {
3546		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
3547		r = -EIO;
3548	}
3549
3550	return 0;
3551}
3552
3553/**
3554 * mpt2sas_port_enable_done - command completion routine for port enable
3555 * @ioc: per adapter object
3556 * @smid: system request message index
3557 * @msix_index: MSIX table index supplied by the OS
3558 * @reply: reply message frame(lower 32bit addr)
3559 *
3560 * Return 1 meaning mf should be freed from _base_interrupt
3561 *        0 means the mf is freed from this function.
3562 */
3563u8
3564mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3565	u32 reply)
3566{
3567	MPI2DefaultReply_t *mpi_reply;
3568	u16 ioc_status;
3569
3570	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3571	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
3572		return 1;
3573
3574	if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED)
3575		return 1;
3576
3577	ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE;
3578	if (mpi_reply) {
3579		ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID;
3580		memcpy(ioc->port_enable_cmds.reply, mpi_reply,
3581		    mpi_reply->MsgLength*4);
3582	}
3583	ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING;
3584
3585	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3586
3587	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3588		ioc->port_enable_failed = 1;
3589
3590	if (ioc->is_driver_loading) {
3591		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3592			mpt2sas_port_enable_complete(ioc);
3593			return 1;
3594		} else {
3595			ioc->start_scan_failed = ioc_status;
3596			ioc->start_scan = 0;
3597			return 1;
3598		}
3599	}
3600	complete(&ioc->port_enable_cmds.done);
3601	return 1;
3602}
3603
3604
3605/**
3606 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3607 * @ioc: per adapter object
3608 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3609 *
3610 * Returns 0 for success, non-zero for failure.
3611 */
3612static int
3613_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3614{
3615	Mpi2PortEnableRequest_t *mpi_request;
3616	Mpi2PortEnableReply_t *mpi_reply;
3617	unsigned long timeleft;
3618	int r = 0;
3619	u16 smid;
3620	u16 ioc_status;
3621
3622	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3623
3624	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3625		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3626		    ioc->name, __func__);
3627		return -EAGAIN;
3628	}
3629
3630	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3631	if (!smid) {
3632		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3633		    ioc->name, __func__);
3634		return -EAGAIN;
3635	}
3636
3637	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3638	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3639	ioc->port_enable_cmds.smid = smid;
3640	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3641	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3642
3643	init_completion(&ioc->port_enable_cmds.done);
3644	mpt2sas_base_put_smid_default(ioc, smid);
3645	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3646	    300*HZ);
3647	if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) {
3648		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3649		    ioc->name, __func__);
3650		_debug_dump_mf(mpi_request,
3651		    sizeof(Mpi2PortEnableRequest_t)/4);
3652		if (ioc->port_enable_cmds.status & MPT2_CMD_RESET)
3653			r = -EFAULT;
3654		else
3655			r = -ETIME;
3656		goto out;
3657	}
3658	mpi_reply = ioc->port_enable_cmds.reply;
3659
3660	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3661	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3662		printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n",
3663		    ioc->name, __func__, ioc_status);
3664		r = -EFAULT;
3665		goto out;
3666	}
3667 out:
3668	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
3669	printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3670	    "SUCCESS" : "FAILED"));
3671	return r;
3672}
3673
3674/**
3675 * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply)
3676 * @ioc: per adapter object
3677 *
3678 * Returns 0 for success, non-zero for failure.
3679 */
3680int
3681mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc)
3682{
3683	Mpi2PortEnableRequest_t *mpi_request;
3684	u16 smid;
3685
3686	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3687
3688	if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
3689		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3690		    ioc->name, __func__);
3691		return -EAGAIN;
3692	}
3693
3694	smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3695	if (!smid) {
3696		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3697		    ioc->name, __func__);
3698		return -EAGAIN;
3699	}
3700
3701	ioc->port_enable_cmds.status = MPT2_CMD_PENDING;
3702	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3703	ioc->port_enable_cmds.smid = smid;
3704	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3705	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3706
3707	mpt2sas_base_put_smid_default(ioc, smid);
3708	return 0;
3709}
3710
3711/**
3712 * _base_determine_wait_on_discovery - desposition
3713 * @ioc: per adapter object
3714 *
3715 * Decide whether to wait on discovery to complete. Used to either
3716 * locate boot device, or report volumes ahead of physical devices.
3717 *
3718 * Returns 1 for wait, 0 for don't wait
3719 */
3720static int
3721_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc)
3722{
3723	/* We wait for discovery to complete if IR firmware is loaded.
3724	 * The sas topology events arrive before PD events, so we need time to
3725	 * turn on the bit in ioc->pd_handles to indicate PD
3726	 * Also, it maybe required to report Volumes ahead of physical
3727	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3728	 */
3729	if (ioc->ir_firmware)
3730		return 1;
3731
3732	/* if no Bios, then we don't need to wait */
3733	if (!ioc->bios_pg3.BiosVersion)
3734		return 0;
3735
3736	/* Bios is present, then we drop down here.
3737	 *
3738	 * If there any entries in the Bios Page 2, then we wait
3739	 * for discovery to complete.
3740	 */
3741
3742	/* Current Boot Device */
3743	if ((ioc->bios_pg2.CurrentBootDeviceForm &
3744	    MPI2_BIOSPAGE2_FORM_MASK) ==
3745	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3746	/* Request Boot Device */
3747	   (ioc->bios_pg2.ReqBootDeviceForm &
3748	    MPI2_BIOSPAGE2_FORM_MASK) ==
3749	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3750	/* Alternate Request Boot Device */
3751	   (ioc->bios_pg2.ReqAltBootDeviceForm &
3752	    MPI2_BIOSPAGE2_FORM_MASK) ==
3753	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3754		return 0;
3755
3756	return 1;
3757}
3758
3759
3760/**
3761 * _base_unmask_events - turn on notification for this event
3762 * @ioc: per adapter object
3763 * @event: firmware event
3764 *
3765 * The mask is stored in ioc->event_masks.
3766 */
3767static void
3768_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
3769{
3770	u32 desired_event;
3771
3772	if (event >= 128)
3773		return;
3774
3775	desired_event = (1 << (event % 32));
3776
3777	if (event < 32)
3778		ioc->event_masks[0] &= ~desired_event;
3779	else if (event < 64)
3780		ioc->event_masks[1] &= ~desired_event;
3781	else if (event < 96)
3782		ioc->event_masks[2] &= ~desired_event;
3783	else if (event < 128)
3784		ioc->event_masks[3] &= ~desired_event;
3785}
3786
3787/**
3788 * _base_event_notification - send event notification
3789 * @ioc: per adapter object
3790 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3791 *
3792 * Returns 0 for success, non-zero for failure.
3793 */
3794static int
3795_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3796{
3797	Mpi2EventNotificationRequest_t *mpi_request;
3798	unsigned long timeleft;
3799	u16 smid;
3800	int r = 0;
3801	int i;
3802
3803	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3804	    __func__));
3805
3806	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3807		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3808		    ioc->name, __func__);
3809		return -EAGAIN;
3810	}
3811
3812	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3813	if (!smid) {
3814		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3815		    ioc->name, __func__);
3816		return -EAGAIN;
3817	}
3818	ioc->base_cmds.status = MPT2_CMD_PENDING;
3819	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3820	ioc->base_cmds.smid = smid;
3821	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3822	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3823	mpi_request->VF_ID = 0; /* TODO */
3824	mpi_request->VP_ID = 0;
3825	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3826		mpi_request->EventMasks[i] =
3827		    cpu_to_le32(ioc->event_masks[i]);
3828	init_completion(&ioc->base_cmds.done);
3829	mpt2sas_base_put_smid_default(ioc, smid);
3830	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3831	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3832		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3833		    ioc->name, __func__);
3834		_debug_dump_mf(mpi_request,
3835		    sizeof(Mpi2EventNotificationRequest_t)/4);
3836		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3837			r = -EFAULT;
3838		else
3839			r = -ETIME;
3840	} else
3841		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n",
3842		    ioc->name, __func__));
3843	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3844	return r;
3845}
3846
3847/**
3848 * mpt2sas_base_validate_event_type - validating event types
3849 * @ioc: per adapter object
3850 * @event: firmware event
3851 *
3852 * This will turn on firmware event notification when application
3853 * ask for that event. We don't mask events that are already enabled.
3854 */
3855void
3856mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
3857{
3858	int i, j;
3859	u32 event_mask, desired_event;
3860	u8 send_update_to_fw;
3861
3862	for (i = 0, send_update_to_fw = 0; i <
3863	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
3864		event_mask = ~event_type[i];
3865		desired_event = 1;
3866		for (j = 0; j < 32; j++) {
3867			if (!(event_mask & desired_event) &&
3868			    (ioc->event_masks[i] & desired_event)) {
3869				ioc->event_masks[i] &= ~desired_event;
3870				send_update_to_fw = 1;
3871			}
3872			desired_event = (desired_event << 1);
3873		}
3874	}
3875
3876	if (!send_update_to_fw)
3877		return;
3878
3879	mutex_lock(&ioc->base_cmds.mutex);
3880	_base_event_notification(ioc, CAN_SLEEP);
3881	mutex_unlock(&ioc->base_cmds.mutex);
3882}
3883
3884/**
3885 * _base_diag_reset - the "big hammer" start of day reset
3886 * @ioc: per adapter object
3887 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3888 *
3889 * Returns 0 for success, non-zero for failure.
3890 */
3891static int
3892_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3893{
3894	u32 host_diagnostic;
3895	u32 ioc_state;
3896	u32 count;
3897	u32 hcb_size;
3898
3899	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3900	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3901	    ioc->name));
3902
3903	count = 0;
3904	do {
3905		/* Write magic sequence to WriteSequence register
3906		 * Loop until in diagnostic mode
3907		 */
3908		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic "
3909		    "sequence\n", ioc->name));
3910		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3911		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
3912		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
3913		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
3914		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
3915		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
3916		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
3917
3918		/* wait 100 msec */
3919		if (sleep_flag == CAN_SLEEP)
3920			msleep(100);
3921		else
3922			mdelay(100);
3923
3924		if (count++ > 20)
3925			goto out;
3926
3927		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3928		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic "
3929		    "sequence: count(%d), host_diagnostic(0x%08x)\n",
3930		    ioc->name, count, host_diagnostic));
3931
3932	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
3933
3934	hcb_size = readl(&ioc->chip->HCBSize);
3935
3936	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n",
3937	    ioc->name));
3938	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
3939	     &ioc->chip->HostDiagnostic);
3940
3941	/* This delay allows the chip PCIe hardware time to finish reset tasks*/
3942	if (sleep_flag == CAN_SLEEP)
3943		msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
3944	else
3945		mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
3946
3947	/* Approximately 300 second max wait */
3948	for (count = 0; count < (300000000 /
3949	    MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
3950
3951		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3952
3953		if (host_diagnostic == 0xFFFFFFFF)
3954			goto out;
3955		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
3956			break;
3957
3958		/* Wait to pass the second read delay window */
3959		if (sleep_flag == CAN_SLEEP)
3960			msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
3961			       /1000);
3962		else
3963			mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
3964			       /1000);
3965	}
3966
3967	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
3968
3969		drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter "
3970		    "assuming the HCB Address points to good F/W\n",
3971		    ioc->name));
3972		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
3973		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
3974		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
3975
3976		drsprintk(ioc, printk(MPT2SAS_INFO_FMT
3977		    "re-enable the HCDW\n", ioc->name));
3978		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
3979		    &ioc->chip->HCBSize);
3980	}
3981
3982	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n",
3983	    ioc->name));
3984	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
3985	    &ioc->chip->HostDiagnostic);
3986
3987	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the "
3988	    "diagnostic register\n", ioc->name));
3989	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3990
3991	drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the "
3992	    "READY state\n", ioc->name));
3993	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
3994	    sleep_flag);
3995	if (ioc_state) {
3996		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3997		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3998		goto out;
3999	}
4000
4001	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
4002	return 0;
4003
4004 out:
4005	printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
4006	return -EFAULT;
4007}
4008
4009/**
4010 * _base_make_ioc_ready - put controller in READY state
4011 * @ioc: per adapter object
4012 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4013 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4014 *
4015 * Returns 0 for success, non-zero for failure.
4016 */
4017static int
4018_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4019    enum reset_type type)
4020{
4021	u32 ioc_state;
4022	int rc;
4023
4024	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4025	    __func__));
4026
4027	if (ioc->pci_error_recovery)
4028		return 0;
4029
4030	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4031	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
4032	    ioc->name, __func__, ioc_state));
4033
4034	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4035		return 0;
4036
4037	if (ioc_state & MPI2_DOORBELL_USED) {
4038		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
4039		    "active!\n", ioc->name));
4040		goto issue_diag_reset;
4041	}
4042
4043	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4044		mpt2sas_base_fault_info(ioc, ioc_state &
4045		    MPI2_DOORBELL_DATA_MASK);
4046		goto issue_diag_reset;
4047	}
4048
4049	if (type == FORCE_BIG_HAMMER)
4050		goto issue_diag_reset;
4051
4052	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4053		if (!(_base_send_ioc_reset(ioc,
4054		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4055			ioc->ioc_reset_count++;
4056			return 0;
4057	}
4058
4059 issue_diag_reset:
4060	rc = _base_diag_reset(ioc, CAN_SLEEP);
4061	ioc->ioc_reset_count++;
4062	return rc;
4063}
4064
4065/**
4066 * _base_make_ioc_operational - put controller in OPERATIONAL state
4067 * @ioc: per adapter object
4068 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4069 *
4070 * Returns 0 for success, non-zero for failure.
4071 */
4072static int
4073_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4074{
4075	int r, i;
4076	unsigned long	flags;
4077	u32 reply_address;
4078	u16 smid;
4079	struct _tr_list *delayed_tr, *delayed_tr_next;
4080	u8 hide_flag;
4081	struct adapter_reply_queue *reply_q;
4082	long reply_post_free;
4083	u32 reply_post_free_sz;
4084
4085	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4086	    __func__));
4087
4088	/* clean the delayed target reset list */
4089	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4090	    &ioc->delayed_tr_list, list) {
4091		list_del(&delayed_tr->list);
4092		kfree(delayed_tr);
4093	}
4094
4095	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4096	    &ioc->delayed_tr_volume_list, list) {
4097		list_del(&delayed_tr->list);
4098		kfree(delayed_tr);
4099	}
4100
4101	/* initialize the scsi lookup free list */
4102	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4103	INIT_LIST_HEAD(&ioc->free_list);
4104	smid = 1;
4105	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4106		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4107		ioc->scsi_lookup[i].cb_idx = 0xFF;
4108		ioc->scsi_lookup[i].smid = smid;
4109		ioc->scsi_lookup[i].scmd = NULL;
4110		ioc->scsi_lookup[i].direct_io = 0;
4111		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4112		    &ioc->free_list);
4113	}
4114
4115	/* hi-priority queue */
4116	INIT_LIST_HEAD(&ioc->hpr_free_list);
4117	smid = ioc->hi_priority_smid;
4118	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4119		ioc->hpr_lookup[i].cb_idx = 0xFF;
4120		ioc->hpr_lookup[i].smid = smid;
4121		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4122		    &ioc->hpr_free_list);
4123	}
4124
4125	/* internal queue */
4126	INIT_LIST_HEAD(&ioc->internal_free_list);
4127	smid = ioc->internal_smid;
4128	for (i = 0; i < ioc->internal_depth; i++, smid++) {
4129		ioc->internal_lookup[i].cb_idx = 0xFF;
4130		ioc->internal_lookup[i].smid = smid;
4131		list_add_tail(&ioc->internal_lookup[i].tracker_list,
4132		    &ioc->internal_free_list);
4133	}
4134
4135	/* chain pool */
4136	INIT_LIST_HEAD(&ioc->free_chain_list);
4137	for (i = 0; i < ioc->chain_depth; i++)
4138		list_add_tail(&ioc->chain_lookup[i].tracker_list,
4139		    &ioc->free_chain_list);
4140
4141	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4142
4143	/* initialize Reply Free Queue */
4144	for (i = 0, reply_address = (u32)ioc->reply_dma ;
4145	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
4146	    ioc->reply_sz)
4147		ioc->reply_free[i] = cpu_to_le32(reply_address);
4148
4149	/* initialize reply queues */
4150	if (ioc->is_driver_loading)
4151		_base_assign_reply_queues(ioc);
4152
4153	/* initialize Reply Post Free Queue */
4154	reply_post_free = (long)ioc->reply_post_free;
4155	reply_post_free_sz = ioc->reply_post_queue_depth *
4156	    sizeof(Mpi2DefaultReplyDescriptor_t);
4157	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4158		reply_q->reply_post_host_index = 0;
4159		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4160		    reply_post_free;
4161		for (i = 0; i < ioc->reply_post_queue_depth; i++)
4162			reply_q->reply_post_free[i].Words =
4163							cpu_to_le64(ULLONG_MAX);
4164		if (!_base_is_controller_msix_enabled(ioc))
4165			goto skip_init_reply_post_free_queue;
4166		reply_post_free += reply_post_free_sz;
4167	}
4168 skip_init_reply_post_free_queue:
4169
4170	r = _base_send_ioc_init(ioc, sleep_flag);
4171	if (r)
4172		return r;
4173
4174	/* initialize reply free host index */
4175	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4176	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4177
4178	/* initialize reply post host index */
4179	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4180		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4181		    &ioc->chip->ReplyPostHostIndex);
4182		if (!_base_is_controller_msix_enabled(ioc))
4183			goto skip_init_reply_post_host_index;
4184	}
4185
4186 skip_init_reply_post_host_index:
4187
4188	_base_unmask_interrupts(ioc);
4189
4190	r = _base_event_notification(ioc, sleep_flag);
4191	if (r)
4192		return r;
4193
4194	if (sleep_flag == CAN_SLEEP)
4195		_base_static_config_pages(ioc);
4196
4197
4198	if (ioc->is_driver_loading) {
4199		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
4200		    == 0x80) {
4201			hide_flag = (u8) (
4202			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
4203			    MFG_PAGE10_HIDE_SSDS_MASK);
4204			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
4205				ioc->mfg_pg10_hide_flag = hide_flag;
4206		}
4207		ioc->wait_for_discovery_to_complete =
4208		    _base_determine_wait_on_discovery(ioc);
4209		return r; /* scan_start and scan_finished support */
4210	}
4211	r = _base_send_port_enable(ioc, sleep_flag);
4212	if (r)
4213		return r;
4214
4215	return r;
4216}
4217
4218/**
4219 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
4220 * @ioc: per adapter object
4221 *
4222 * Return nothing.
4223 */
4224void
4225mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4226{
4227	struct pci_dev *pdev = ioc->pdev;
4228
4229	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4230	    __func__));
4231
4232	_base_mask_interrupts(ioc);
4233	ioc->shost_recovery = 1;
4234	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4235	ioc->shost_recovery = 0;
4236	_base_free_irq(ioc);
4237	_base_disable_msix(ioc);
4238	if (ioc->chip_phys)
4239		iounmap(ioc->chip);
4240	ioc->chip_phys = 0;
4241	pci_release_selected_regions(ioc->pdev, ioc->bars);
4242	pci_disable_pcie_error_reporting(pdev);
4243	pci_disable_device(pdev);
4244	return;
4245}
4246
4247/**
4248 * mpt2sas_base_attach - attach controller instance
4249 * @ioc: per adapter object
4250 *
4251 * Returns 0 for success, non-zero for failure.
4252 */
4253int
4254mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4255{
4256	int r, i;
4257	int cpu_id, last_cpu_id = 0;
4258
4259	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4260	    __func__));
4261
4262	/* setup cpu_msix_table */
4263	ioc->cpu_count = num_online_cpus();
4264	for_each_online_cpu(cpu_id)
4265		last_cpu_id = cpu_id;
4266	ioc->cpu_msix_table_sz = last_cpu_id + 1;
4267	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4268	ioc->reply_queue_count = 1;
4269	if (!ioc->cpu_msix_table) {
4270		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
4271		    "cpu_msix_table failed!!!\n", ioc->name));
4272		r = -ENOMEM;
4273		goto out_free_resources;
4274	}
4275
4276	if (ioc->is_warpdrive) {
4277		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
4278		    sizeof(resource_size_t *), GFP_KERNEL);
4279		if (!ioc->reply_post_host_index) {
4280			dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
4281				"for cpu_msix_table failed!!!\n", ioc->name));
4282			r = -ENOMEM;
4283			goto out_free_resources;
4284		}
4285	}
4286
4287	r = mpt2sas_base_map_resources(ioc);
4288	if (r)
4289		goto out_free_resources;
4290
4291	if (ioc->is_warpdrive) {
4292		ioc->reply_post_host_index[0] =
4293		    (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
4294
4295		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
4296			ioc->reply_post_host_index[i] = (resource_size_t *)
4297			((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
4298			* 4)));
4299	}
4300
4301	pci_set_drvdata(ioc->pdev, ioc->shost);
4302	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4303	if (r)
4304		goto out_free_resources;
4305
4306	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4307	if (r)
4308		goto out_free_resources;
4309
4310	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4311	    sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
4312	if (!ioc->pfacts) {
4313		r = -ENOMEM;
4314		goto out_free_resources;
4315	}
4316
4317	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4318		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4319		if (r)
4320			goto out_free_resources;
4321	}
4322
4323	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4324	if (r)
4325		goto out_free_resources;
4326
4327	init_waitqueue_head(&ioc->reset_wq);
4328	/* allocate memory pd handle bitmask list */
4329	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4330	if (ioc->facts.MaxDevHandle % 8)
4331		ioc->pd_handles_sz++;
4332	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4333	    GFP_KERNEL);
4334	if (!ioc->pd_handles) {
4335		r = -ENOMEM;
4336		goto out_free_resources;
4337	}
4338	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4339	    GFP_KERNEL);
4340	if (!ioc->blocking_handles) {
4341		r = -ENOMEM;
4342		goto out_free_resources;
4343	}
4344	ioc->fwfault_debug = mpt2sas_fwfault_debug;
4345
4346	/* base internal command bits */
4347	mutex_init(&ioc->base_cmds.mutex);
4348	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4349	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
4350
4351	/* port_enable command bits */
4352	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4353	ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED;
4354
4355	/* transport internal command bits */
4356	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4357	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
4358	mutex_init(&ioc->transport_cmds.mutex);
4359
4360	/* scsih internal command bits */
4361	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4362	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
4363	mutex_init(&ioc->scsih_cmds.mutex);
4364
4365	/* task management internal command bits */
4366	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4367	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4368	mutex_init(&ioc->tm_cmds.mutex);
4369
4370	/* config page internal command bits */
4371	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4372	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
4373	mutex_init(&ioc->config_cmds.mutex);
4374
4375	/* ctl module internal command bits */
4376	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4377	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4378	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
4379	mutex_init(&ioc->ctl_cmds.mutex);
4380
4381	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4382	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4383	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4384	    !ioc->ctl_cmds.sense) {
4385		r = -ENOMEM;
4386		goto out_free_resources;
4387	}
4388
4389	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4390	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4391	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
4392		r = -ENOMEM;
4393		goto out_free_resources;
4394	}
4395
4396	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4397		ioc->event_masks[i] = -1;
4398
4399	/* here we enable the events we care about */
4400	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4401	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4402	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4403	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4404	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4405	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4406	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4407	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4408	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4409	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4410	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4411	if (r)
4412		goto out_free_resources;
4413
4414	ioc->non_operational_loop = 0;
4415
4416	return 0;
4417
4418 out_free_resources:
4419
4420	ioc->remove_host = 1;
4421	mpt2sas_base_free_resources(ioc);
4422	_base_release_memory_pools(ioc);
4423	pci_set_drvdata(ioc->pdev, NULL);
4424	kfree(ioc->cpu_msix_table);
4425	if (ioc->is_warpdrive)
4426		kfree(ioc->reply_post_host_index);
4427	kfree(ioc->pd_handles);
4428	kfree(ioc->blocking_handles);
4429	kfree(ioc->tm_cmds.reply);
4430	kfree(ioc->transport_cmds.reply);
4431	kfree(ioc->scsih_cmds.reply);
4432	kfree(ioc->config_cmds.reply);
4433	kfree(ioc->base_cmds.reply);
4434	kfree(ioc->port_enable_cmds.reply);
4435	kfree(ioc->ctl_cmds.reply);
4436	kfree(ioc->ctl_cmds.sense);
4437	kfree(ioc->pfacts);
4438	ioc->ctl_cmds.reply = NULL;
4439	ioc->base_cmds.reply = NULL;
4440	ioc->tm_cmds.reply = NULL;
4441	ioc->scsih_cmds.reply = NULL;
4442	ioc->transport_cmds.reply = NULL;
4443	ioc->config_cmds.reply = NULL;
4444	ioc->pfacts = NULL;
4445	return r;
4446}
4447
4448
4449/**
4450 * mpt2sas_base_detach - remove controller instance
4451 * @ioc: per adapter object
4452 *
4453 * Return nothing.
4454 */
4455void
4456mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4457{
4458
4459	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4460	    __func__));
4461
4462	mpt2sas_base_stop_watchdog(ioc);
4463	mpt2sas_base_free_resources(ioc);
4464	_base_release_memory_pools(ioc);
4465	pci_set_drvdata(ioc->pdev, NULL);
4466	kfree(ioc->cpu_msix_table);
4467	if (ioc->is_warpdrive)
4468		kfree(ioc->reply_post_host_index);
4469	kfree(ioc->pd_handles);
4470	kfree(ioc->blocking_handles);
4471	kfree(ioc->pfacts);
4472	kfree(ioc->ctl_cmds.reply);
4473	kfree(ioc->ctl_cmds.sense);
4474	kfree(ioc->base_cmds.reply);
4475	kfree(ioc->port_enable_cmds.reply);
4476	kfree(ioc->tm_cmds.reply);
4477	kfree(ioc->transport_cmds.reply);
4478	kfree(ioc->scsih_cmds.reply);
4479	kfree(ioc->config_cmds.reply);
4480}
4481
4482/**
4483 * _base_reset_handler - reset callback handler (for base)
4484 * @ioc: per adapter object
4485 * @reset_phase: phase
4486 *
4487 * The handler for doing any required cleanup or initialization.
4488 *
4489 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
4490 * MPT2_IOC_DONE_RESET
4491 *
4492 * Return nothing.
4493 */
4494static void
4495_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
4496{
4497	mpt2sas_scsih_reset_handler(ioc, reset_phase);
4498	mpt2sas_ctl_reset_handler(ioc, reset_phase);
4499	switch (reset_phase) {
4500	case MPT2_IOC_PRE_RESET:
4501		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4502		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
4503		break;
4504	case MPT2_IOC_AFTER_RESET:
4505		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4506		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
4507		if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
4508			ioc->transport_cmds.status |= MPT2_CMD_RESET;
4509			mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4510			complete(&ioc->transport_cmds.done);
4511		}
4512		if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
4513			ioc->base_cmds.status |= MPT2_CMD_RESET;
4514			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
4515			complete(&ioc->base_cmds.done);
4516		}
4517		if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) {
4518			ioc->port_enable_failed = 1;
4519			ioc->port_enable_cmds.status |= MPT2_CMD_RESET;
4520			mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4521			if (ioc->is_driver_loading) {
4522				ioc->start_scan_failed =
4523				    MPI2_IOCSTATUS_INTERNAL_ERROR;
4524				ioc->start_scan = 0;
4525				ioc->port_enable_cmds.status =
4526						MPT2_CMD_NOT_USED;
4527			} else
4528				complete(&ioc->port_enable_cmds.done);
4529
4530		}
4531		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
4532			ioc->config_cmds.status |= MPT2_CMD_RESET;
4533			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
4534			ioc->config_cmds.smid = USHRT_MAX;
4535			complete(&ioc->config_cmds.done);
4536		}
4537		break;
4538	case MPT2_IOC_DONE_RESET:
4539		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4540		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
4541		break;
4542	}
4543}
4544
4545/**
4546 * _wait_for_commands_to_complete - reset controller
4547 * @ioc: Pointer to MPT_ADAPTER structure
4548 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4549 *
4550 * This function waiting(3s) for all pending commands to complete
4551 * prior to putting controller in reset.
4552 */
4553static void
4554_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4555{
4556	u32 ioc_state;
4557	unsigned long flags;
4558	u16 i;
4559
4560	ioc->pending_io_count = 0;
4561	if (sleep_flag != CAN_SLEEP)
4562		return;
4563
4564	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
4565	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4566		return;
4567
4568	/* pending command count */
4569	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4570	for (i = 0; i < ioc->scsiio_depth; i++)
4571		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4572			ioc->pending_io_count++;
4573	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4574
4575	if (!ioc->pending_io_count)
4576		return;
4577
4578	/* wait for pending commands to complete */
4579	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4580}
4581
4582/**
4583 * mpt2sas_base_hard_reset_handler - reset controller
4584 * @ioc: Pointer to MPT_ADAPTER structure
4585 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4586 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4587 *
4588 * Returns 0 for success, non-zero for failure.
4589 */
4590int
4591mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4592    enum reset_type type)
4593{
4594	int r;
4595	unsigned long flags;
4596
4597	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4598	    __func__));
4599
4600	if (ioc->pci_error_recovery) {
4601		printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
4602		    ioc->name, __func__);
4603		r = 0;
4604		goto out_unlocked;
4605	}
4606
4607	if (mpt2sas_fwfault_debug)
4608		mpt2sas_halt_firmware(ioc);
4609
4610	/* TODO - What we really should be doing is pulling
4611	 * out all the code associated with NO_SLEEP; its never used.
4612	 * That is legacy code from mpt fusion driver, ported over.
4613	 * I will leave this BUG_ON here for now till its been resolved.
4614	 */
4615	BUG_ON(sleep_flag == NO_SLEEP);
4616
4617	/* wait for an active reset in progress to complete */
4618	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4619		do {
4620			ssleep(1);
4621		} while (ioc->shost_recovery == 1);
4622		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4623		    __func__));
4624		return ioc->ioc_reset_in_progress_status;
4625	}
4626
4627	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4628	ioc->shost_recovery = 1;
4629	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4630
4631	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
4632	_wait_for_commands_to_complete(ioc, sleep_flag);
4633	_base_mask_interrupts(ioc);
4634	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4635	if (r)
4636		goto out;
4637	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4638
4639	/* If this hard reset is called while port enable is active, then
4640	 * there is no reason to call make_ioc_operational
4641	 */
4642	if (ioc->is_driver_loading && ioc->port_enable_failed) {
4643		ioc->remove_host = 1;
4644		r = -EFAULT;
4645		goto out;
4646	}
4647	r = _base_make_ioc_operational(ioc, sleep_flag);
4648	if (!r)
4649		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
4650 out:
4651	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n",
4652	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4653
4654	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4655	ioc->ioc_reset_in_progress_status = r;
4656	ioc->shost_recovery = 0;
4657	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4658	mutex_unlock(&ioc->reset_in_progress_mutex);
4659
4660 out_unlocked:
4661	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
4662	    __func__));
4663	return r;
4664}
4665