mpt2sas_base.c revision 35f805b52c94f8e6cb22907ef32517132a15cb96
1/* 2 * This is the Fusion MPT base driver providing common API layer interface 3 * for access to MPT (Message Passing Technology) firmware. 4 * 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 6 * Copyright (C) 2007-2010 LSI Corporation 7 * (mailto:DL-MPTFusionLinux@lsi.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45#include <linux/version.h> 46#include <linux/kernel.h> 47#include <linux/module.h> 48#include <linux/errno.h> 49#include <linux/init.h> 50#include <linux/slab.h> 51#include <linux/types.h> 52#include <linux/pci.h> 53#include <linux/kdev_t.h> 54#include <linux/blkdev.h> 55#include <linux/delay.h> 56#include <linux/interrupt.h> 57#include <linux/dma-mapping.h> 58#include <linux/sort.h> 59#include <linux/io.h> 60#include <linux/time.h> 61#include <linux/aer.h> 62 63#include "mpt2sas_base.h" 64 65static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 66 67#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 68 69static int max_queue_depth = -1; 70module_param(max_queue_depth, int, 0); 71MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); 72 73static int max_sgl_entries = -1; 74module_param(max_sgl_entries, int, 0); 75MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); 76 77static int msix_disable = -1; 78module_param(msix_disable, int, 0); 79MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 80 81static int missing_delay[2] = {-1, -1}; 82module_param_array(missing_delay, int, NULL, 0); 83MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); 84 85/* diag_buffer_enable is bitwise 86 * bit 0 set = TRACE 87 * bit 1 set = SNAPSHOT 88 * bit 2 set = EXTENDED 89 * 90 * Either bit can be set, or both 91 */ 92static int diag_buffer_enable; 93module_param(diag_buffer_enable, int, 0); 94MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " 95 "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 96 97int mpt2sas_fwfault_debug; 98MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " 99 "and halt firmware - (default=0)"); 100 101static int disable_discovery = -1; 102module_param(disable_discovery, int, 0); 103MODULE_PARM_DESC(disable_discovery, " disable discovery "); 104 105/** 106 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 107 * 108 */ 109static int 110_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) 111{ 112 int ret = param_set_int(val, kp); 113 struct MPT2SAS_ADAPTER *ioc; 114 115 if (ret) 116 return ret; 117 118 printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug); 119 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) 120 ioc->fwfault_debug = mpt2sas_fwfault_debug; 121 return 0; 122} 123module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug, 124 param_get_int, &mpt2sas_fwfault_debug, 0644); 125 126/** 127 * _base_fault_reset_work - workq handling ioc fault conditions 128 * @work: input argument, used to derive ioc 129 * Context: sleep. 130 * 131 * Return nothing. 132 */ 133static void 134_base_fault_reset_work(struct work_struct *work) 135{ 136 struct MPT2SAS_ADAPTER *ioc = 137 container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work); 138 unsigned long flags; 139 u32 doorbell; 140 int rc; 141 142 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 143 if (ioc->shost_recovery) 144 goto rearm_timer; 145 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 146 147 doorbell = mpt2sas_base_get_iocstate(ioc, 0); 148 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 149 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 150 FORCE_BIG_HAMMER); 151 printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name, 152 __func__, (rc == 0) ? "success" : "failed"); 153 doorbell = mpt2sas_base_get_iocstate(ioc, 0); 154 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 155 mpt2sas_base_fault_info(ioc, doorbell & 156 MPI2_DOORBELL_DATA_MASK); 157 } 158 159 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 160 rearm_timer: 161 if (ioc->fault_reset_work_q) 162 queue_delayed_work(ioc->fault_reset_work_q, 163 &ioc->fault_reset_work, 164 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 165 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 166} 167 168/** 169 * mpt2sas_base_start_watchdog - start the fault_reset_work_q 170 * @ioc: per adapter object 171 * Context: sleep. 172 * 173 * Return nothing. 174 */ 175void 176mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc) 177{ 178 unsigned long flags; 179 180 if (ioc->fault_reset_work_q) 181 return; 182 183 /* initialize fault polling */ 184 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); 185 snprintf(ioc->fault_reset_work_q_name, 186 sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); 187 ioc->fault_reset_work_q = 188 create_singlethread_workqueue(ioc->fault_reset_work_q_name); 189 if (!ioc->fault_reset_work_q) { 190 printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n", 191 ioc->name, __func__, __LINE__); 192 return; 193 } 194 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 195 if (ioc->fault_reset_work_q) 196 queue_delayed_work(ioc->fault_reset_work_q, 197 &ioc->fault_reset_work, 198 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 199 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 200} 201 202/** 203 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q 204 * @ioc: per adapter object 205 * Context: sleep. 206 * 207 * Return nothing. 208 */ 209void 210mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc) 211{ 212 unsigned long flags; 213 struct workqueue_struct *wq; 214 215 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 216 wq = ioc->fault_reset_work_q; 217 ioc->fault_reset_work_q = NULL; 218 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 219 if (wq) { 220 if (!cancel_delayed_work(&ioc->fault_reset_work)) 221 flush_workqueue(wq); 222 destroy_workqueue(wq); 223 } 224} 225 226/** 227 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code 228 * @ioc: per adapter object 229 * @fault_code: fault code 230 * 231 * Return nothing. 232 */ 233void 234mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code) 235{ 236 printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n", 237 ioc->name, fault_code); 238} 239 240/** 241 * mpt2sas_halt_firmware - halt's mpt controller firmware 242 * @ioc: per adapter object 243 * 244 * For debugging timeout related issues. Writing 0xCOFFEE00 245 * to the doorbell register will halt controller firmware. With 246 * the purpose to stop both driver and firmware, the enduser can 247 * obtain a ring buffer from controller UART. 248 */ 249void 250mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc) 251{ 252 u32 doorbell; 253 254 if (!ioc->fwfault_debug) 255 return; 256 257 dump_stack(); 258 259 doorbell = readl(&ioc->chip->Doorbell); 260 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 261 mpt2sas_base_fault_info(ioc , doorbell); 262 else { 263 writel(0xC0FFEE00, &ioc->chip->Doorbell); 264 printk(MPT2SAS_ERR_FMT "Firmware is halted due to command " 265 "timeout\n", ioc->name); 266 } 267 268 panic("panic in %s\n", __func__); 269} 270 271#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 272/** 273 * _base_sas_ioc_info - verbose translation of the ioc status 274 * @ioc: per adapter object 275 * @mpi_reply: reply mf payload returned from firmware 276 * @request_hdr: request mf 277 * 278 * Return nothing. 279 */ 280static void 281_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, 282 MPI2RequestHeader_t *request_hdr) 283{ 284 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 285 MPI2_IOCSTATUS_MASK; 286 char *desc = NULL; 287 u16 frame_sz; 288 char *func_str = NULL; 289 290 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ 291 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 292 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 293 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) 294 return; 295 296 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 297 return; 298 299 switch (ioc_status) { 300 301/**************************************************************************** 302* Common IOCStatus values for all replies 303****************************************************************************/ 304 305 case MPI2_IOCSTATUS_INVALID_FUNCTION: 306 desc = "invalid function"; 307 break; 308 case MPI2_IOCSTATUS_BUSY: 309 desc = "busy"; 310 break; 311 case MPI2_IOCSTATUS_INVALID_SGL: 312 desc = "invalid sgl"; 313 break; 314 case MPI2_IOCSTATUS_INTERNAL_ERROR: 315 desc = "internal error"; 316 break; 317 case MPI2_IOCSTATUS_INVALID_VPID: 318 desc = "invalid vpid"; 319 break; 320 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 321 desc = "insufficient resources"; 322 break; 323 case MPI2_IOCSTATUS_INVALID_FIELD: 324 desc = "invalid field"; 325 break; 326 case MPI2_IOCSTATUS_INVALID_STATE: 327 desc = "invalid state"; 328 break; 329 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 330 desc = "op state not supported"; 331 break; 332 333/**************************************************************************** 334* Config IOCStatus values 335****************************************************************************/ 336 337 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: 338 desc = "config invalid action"; 339 break; 340 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: 341 desc = "config invalid type"; 342 break; 343 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: 344 desc = "config invalid page"; 345 break; 346 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: 347 desc = "config invalid data"; 348 break; 349 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: 350 desc = "config no defaults"; 351 break; 352 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: 353 desc = "config cant commit"; 354 break; 355 356/**************************************************************************** 357* SCSI IO Reply 358****************************************************************************/ 359 360 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 361 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 362 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 363 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 364 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 365 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 366 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 367 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 368 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 369 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 370 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 371 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 372 break; 373 374/**************************************************************************** 375* For use by SCSI Initiator and SCSI Target end-to-end data protection 376****************************************************************************/ 377 378 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 379 desc = "eedp guard error"; 380 break; 381 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 382 desc = "eedp ref tag error"; 383 break; 384 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 385 desc = "eedp app tag error"; 386 break; 387 388/**************************************************************************** 389* SCSI Target values 390****************************************************************************/ 391 392 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: 393 desc = "target invalid io index"; 394 break; 395 case MPI2_IOCSTATUS_TARGET_ABORTED: 396 desc = "target aborted"; 397 break; 398 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: 399 desc = "target no conn retryable"; 400 break; 401 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: 402 desc = "target no connection"; 403 break; 404 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: 405 desc = "target xfer count mismatch"; 406 break; 407 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: 408 desc = "target data offset error"; 409 break; 410 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: 411 desc = "target too much write data"; 412 break; 413 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: 414 desc = "target iu too short"; 415 break; 416 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: 417 desc = "target ack nak timeout"; 418 break; 419 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: 420 desc = "target nak received"; 421 break; 422 423/**************************************************************************** 424* Serial Attached SCSI values 425****************************************************************************/ 426 427 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: 428 desc = "smp request failed"; 429 break; 430 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: 431 desc = "smp data overrun"; 432 break; 433 434/**************************************************************************** 435* Diagnostic Buffer Post / Diagnostic Release values 436****************************************************************************/ 437 438 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: 439 desc = "diagnostic released"; 440 break; 441 default: 442 break; 443 } 444 445 if (!desc) 446 return; 447 448 switch (request_hdr->Function) { 449 case MPI2_FUNCTION_CONFIG: 450 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; 451 func_str = "config_page"; 452 break; 453 case MPI2_FUNCTION_SCSI_TASK_MGMT: 454 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); 455 func_str = "task_mgmt"; 456 break; 457 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 458 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); 459 func_str = "sas_iounit_ctl"; 460 break; 461 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 462 frame_sz = sizeof(Mpi2SepRequest_t); 463 func_str = "enclosure"; 464 break; 465 case MPI2_FUNCTION_IOC_INIT: 466 frame_sz = sizeof(Mpi2IOCInitRequest_t); 467 func_str = "ioc_init"; 468 break; 469 case MPI2_FUNCTION_PORT_ENABLE: 470 frame_sz = sizeof(Mpi2PortEnableRequest_t); 471 func_str = "port_enable"; 472 break; 473 case MPI2_FUNCTION_SMP_PASSTHROUGH: 474 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; 475 func_str = "smp_passthru"; 476 break; 477 default: 478 frame_sz = 32; 479 func_str = "unknown"; 480 break; 481 } 482 483 printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p)," 484 " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str); 485 486 _debug_dump_mf(request_hdr, frame_sz/4); 487} 488 489/** 490 * _base_display_event_data - verbose translation of firmware asyn events 491 * @ioc: per adapter object 492 * @mpi_reply: reply mf payload returned from firmware 493 * 494 * Return nothing. 495 */ 496static void 497_base_display_event_data(struct MPT2SAS_ADAPTER *ioc, 498 Mpi2EventNotificationReply_t *mpi_reply) 499{ 500 char *desc = NULL; 501 u16 event; 502 503 if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) 504 return; 505 506 event = le16_to_cpu(mpi_reply->Event); 507 508 switch (event) { 509 case MPI2_EVENT_LOG_DATA: 510 desc = "Log Data"; 511 break; 512 case MPI2_EVENT_STATE_CHANGE: 513 desc = "Status Change"; 514 break; 515 case MPI2_EVENT_HARD_RESET_RECEIVED: 516 desc = "Hard Reset Received"; 517 break; 518 case MPI2_EVENT_EVENT_CHANGE: 519 desc = "Event Change"; 520 break; 521 case MPI2_EVENT_TASK_SET_FULL: 522 desc = "Task Set Full"; 523 break; 524 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 525 desc = "Device Status Change"; 526 break; 527 case MPI2_EVENT_IR_OPERATION_STATUS: 528 desc = "IR Operation Status"; 529 break; 530 case MPI2_EVENT_SAS_DISCOVERY: 531 { 532 Mpi2EventDataSasDiscovery_t *event_data = 533 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; 534 printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name, 535 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 536 "start" : "stop"); 537 if (event_data->DiscoveryStatus) 538 printk("discovery_status(0x%08x)", 539 le32_to_cpu(event_data->DiscoveryStatus)); 540 printk("\n"); 541 return; 542 } 543 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 544 desc = "SAS Broadcast Primitive"; 545 break; 546 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 547 desc = "SAS Init Device Status Change"; 548 break; 549 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: 550 desc = "SAS Init Table Overflow"; 551 break; 552 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 553 desc = "SAS Topology Change List"; 554 break; 555 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 556 desc = "SAS Enclosure Device Status Change"; 557 break; 558 case MPI2_EVENT_IR_VOLUME: 559 desc = "IR Volume"; 560 break; 561 case MPI2_EVENT_IR_PHYSICAL_DISK: 562 desc = "IR Physical Disk"; 563 break; 564 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 565 desc = "IR Configuration Change List"; 566 break; 567 case MPI2_EVENT_LOG_ENTRY_ADDED: 568 desc = "Log Entry Added"; 569 break; 570 } 571 572 if (!desc) 573 return; 574 575 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc); 576} 577#endif 578 579/** 580 * _base_sas_log_info - verbose translation of firmware log info 581 * @ioc: per adapter object 582 * @log_info: log info 583 * 584 * Return nothing. 585 */ 586static void 587_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) 588{ 589 union loginfo_type { 590 u32 loginfo; 591 struct { 592 u32 subcode:16; 593 u32 code:8; 594 u32 originator:4; 595 u32 bus_type:4; 596 } dw; 597 }; 598 union loginfo_type sas_loginfo; 599 char *originator_str = NULL; 600 601 sas_loginfo.loginfo = log_info; 602 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 603 return; 604 605 /* each nexus loss loginfo */ 606 if (log_info == 0x31170000) 607 return; 608 609 /* eat the loginfos associated with task aborts */ 610 if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == 611 0x31140000 || log_info == 0x31130000)) 612 return; 613 614 switch (sas_loginfo.dw.originator) { 615 case 0: 616 originator_str = "IOP"; 617 break; 618 case 1: 619 originator_str = "PL"; 620 break; 621 case 2: 622 originator_str = "IR"; 623 break; 624 } 625 626 printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), " 627 "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info, 628 originator_str, sas_loginfo.dw.code, 629 sas_loginfo.dw.subcode); 630} 631 632/** 633 * _base_display_reply_info - 634 * @ioc: per adapter object 635 * @smid: system request message index 636 * @msix_index: MSIX table index supplied by the OS 637 * @reply: reply message frame(lower 32bit addr) 638 * 639 * Return nothing. 640 */ 641static void 642_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 643 u32 reply) 644{ 645 MPI2DefaultReply_t *mpi_reply; 646 u16 ioc_status; 647 648 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 649 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 650#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 651 if ((ioc_status & MPI2_IOCSTATUS_MASK) && 652 (ioc->logging_level & MPT_DEBUG_REPLY)) { 653 _base_sas_ioc_info(ioc , mpi_reply, 654 mpt2sas_base_get_msg_frame(ioc, smid)); 655 } 656#endif 657 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 658 _base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo)); 659} 660 661/** 662 * mpt2sas_base_done - base internal command completion routine 663 * @ioc: per adapter object 664 * @smid: system request message index 665 * @msix_index: MSIX table index supplied by the OS 666 * @reply: reply message frame(lower 32bit addr) 667 * 668 * Return 1 meaning mf should be freed from _base_interrupt 669 * 0 means the mf is freed from this function. 670 */ 671u8 672mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 673 u32 reply) 674{ 675 MPI2DefaultReply_t *mpi_reply; 676 677 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 678 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) 679 return 1; 680 681 if (ioc->base_cmds.status == MPT2_CMD_NOT_USED) 682 return 1; 683 684 ioc->base_cmds.status |= MPT2_CMD_COMPLETE; 685 if (mpi_reply) { 686 ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID; 687 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 688 } 689 ioc->base_cmds.status &= ~MPT2_CMD_PENDING; 690 complete(&ioc->base_cmds.done); 691 return 1; 692} 693 694/** 695 * _base_async_event - main callback handler for firmware asyn events 696 * @ioc: per adapter object 697 * @msix_index: MSIX table index supplied by the OS 698 * @reply: reply message frame(lower 32bit addr) 699 * 700 * Return 1 meaning mf should be freed from _base_interrupt 701 * 0 means the mf is freed from this function. 702 */ 703static u8 704_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) 705{ 706 Mpi2EventNotificationReply_t *mpi_reply; 707 Mpi2EventAckRequest_t *ack_request; 708 u16 smid; 709 710 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 711 if (!mpi_reply) 712 return 1; 713 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 714 return 1; 715#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 716 _base_display_event_data(ioc, mpi_reply); 717#endif 718 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) 719 goto out; 720 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx); 721 if (!smid) { 722 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 723 ioc->name, __func__); 724 goto out; 725 } 726 727 ack_request = mpt2sas_base_get_msg_frame(ioc, smid); 728 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 729 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 730 ack_request->Event = mpi_reply->Event; 731 ack_request->EventContext = mpi_reply->EventContext; 732 ack_request->VF_ID = 0; /* TODO */ 733 ack_request->VP_ID = 0; 734 mpt2sas_base_put_smid_default(ioc, smid); 735 736 out: 737 738 /* scsih callback handler */ 739 mpt2sas_scsih_event_callback(ioc, msix_index, reply); 740 741 /* ctl callback handler */ 742 mpt2sas_ctl_event_callback(ioc, msix_index, reply); 743 744 return 1; 745} 746 747/** 748 * _base_get_cb_idx - obtain the callback index 749 * @ioc: per adapter object 750 * @smid: system request message index 751 * 752 * Return callback index. 753 */ 754static u8 755_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid) 756{ 757 int i; 758 u8 cb_idx = 0xFF; 759 760 if (smid >= ioc->hi_priority_smid) { 761 if (smid < ioc->internal_smid) { 762 i = smid - ioc->hi_priority_smid; 763 cb_idx = ioc->hpr_lookup[i].cb_idx; 764 } else if (smid <= ioc->hba_queue_depth) { 765 i = smid - ioc->internal_smid; 766 cb_idx = ioc->internal_lookup[i].cb_idx; 767 } 768 } else { 769 i = smid - 1; 770 cb_idx = ioc->scsi_lookup[i].cb_idx; 771 } 772 return cb_idx; 773} 774 775/** 776 * _base_mask_interrupts - disable interrupts 777 * @ioc: per adapter object 778 * 779 * Disabling ResetIRQ, Reply and Doorbell Interrupts 780 * 781 * Return nothing. 782 */ 783static void 784_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc) 785{ 786 u32 him_register; 787 788 ioc->mask_interrupts = 1; 789 him_register = readl(&ioc->chip->HostInterruptMask); 790 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; 791 writel(him_register, &ioc->chip->HostInterruptMask); 792 readl(&ioc->chip->HostInterruptMask); 793} 794 795/** 796 * _base_unmask_interrupts - enable interrupts 797 * @ioc: per adapter object 798 * 799 * Enabling only Reply Interrupts 800 * 801 * Return nothing. 802 */ 803static void 804_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc) 805{ 806 u32 him_register; 807 808 him_register = readl(&ioc->chip->HostInterruptMask); 809 him_register &= ~MPI2_HIM_RIM; 810 writel(him_register, &ioc->chip->HostInterruptMask); 811 ioc->mask_interrupts = 0; 812} 813 814union reply_descriptor { 815 u64 word; 816 struct { 817 u32 low; 818 u32 high; 819 } u; 820}; 821 822/** 823 * _base_interrupt - MPT adapter (IOC) specific interrupt handler. 824 * @irq: irq number (not used) 825 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure 826 * @r: pt_regs pointer (not used) 827 * 828 * Return IRQ_HANDLE if processed, else IRQ_NONE. 829 */ 830static irqreturn_t 831_base_interrupt(int irq, void *bus_id) 832{ 833 union reply_descriptor rd; 834 u32 completed_cmds; 835 u8 request_desript_type; 836 u16 smid; 837 u8 cb_idx; 838 u32 reply; 839 u8 msix_index; 840 struct MPT2SAS_ADAPTER *ioc = bus_id; 841 Mpi2ReplyDescriptorsUnion_t *rpf; 842 u8 rc; 843 844 if (ioc->mask_interrupts) 845 return IRQ_NONE; 846 847 rpf = &ioc->reply_post_free[ioc->reply_post_host_index]; 848 request_desript_type = rpf->Default.ReplyFlags 849 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 850 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 851 return IRQ_NONE; 852 853 completed_cmds = 0; 854 cb_idx = 0xFF; 855 do { 856 rd.word = rpf->Words; 857 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 858 goto out; 859 reply = 0; 860 cb_idx = 0xFF; 861 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 862 msix_index = rpf->Default.MSIxIndex; 863 if (request_desript_type == 864 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 865 reply = le32_to_cpu 866 (rpf->AddressReply.ReplyFrameAddress); 867 if (reply > ioc->reply_dma_max_address || 868 reply < ioc->reply_dma_min_address) 869 reply = 0; 870 } else if (request_desript_type == 871 MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER) 872 goto next; 873 else if (request_desript_type == 874 MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS) 875 goto next; 876 if (smid) 877 cb_idx = _base_get_cb_idx(ioc, smid); 878 if (smid && cb_idx != 0xFF) { 879 rc = mpt_callbacks[cb_idx](ioc, smid, msix_index, 880 reply); 881 if (reply) 882 _base_display_reply_info(ioc, smid, msix_index, 883 reply); 884 if (rc) 885 mpt2sas_base_free_smid(ioc, smid); 886 } 887 if (!smid) 888 _base_async_event(ioc, msix_index, reply); 889 890 /* reply free queue handling */ 891 if (reply) { 892 ioc->reply_free_host_index = 893 (ioc->reply_free_host_index == 894 (ioc->reply_free_queue_depth - 1)) ? 895 0 : ioc->reply_free_host_index + 1; 896 ioc->reply_free[ioc->reply_free_host_index] = 897 cpu_to_le32(reply); 898 wmb(); 899 writel(ioc->reply_free_host_index, 900 &ioc->chip->ReplyFreeHostIndex); 901 } 902 903 next: 904 905 rpf->Words = ULLONG_MAX; 906 ioc->reply_post_host_index = (ioc->reply_post_host_index == 907 (ioc->reply_post_queue_depth - 1)) ? 0 : 908 ioc->reply_post_host_index + 1; 909 request_desript_type = 910 ioc->reply_post_free[ioc->reply_post_host_index].Default. 911 ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 912 completed_cmds++; 913 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 914 goto out; 915 if (!ioc->reply_post_host_index) 916 rpf = ioc->reply_post_free; 917 else 918 rpf++; 919 } while (1); 920 921 out: 922 923 if (!completed_cmds) 924 return IRQ_NONE; 925 926 wmb(); 927 writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex); 928 return IRQ_HANDLED; 929} 930 931/** 932 * mpt2sas_base_release_callback_handler - clear interupt callback handler 933 * @cb_idx: callback index 934 * 935 * Return nothing. 936 */ 937void 938mpt2sas_base_release_callback_handler(u8 cb_idx) 939{ 940 mpt_callbacks[cb_idx] = NULL; 941} 942 943/** 944 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler 945 * @cb_func: callback function 946 * 947 * Returns cb_func. 948 */ 949u8 950mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func) 951{ 952 u8 cb_idx; 953 954 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) 955 if (mpt_callbacks[cb_idx] == NULL) 956 break; 957 958 mpt_callbacks[cb_idx] = cb_func; 959 return cb_idx; 960} 961 962/** 963 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler 964 * 965 * Return nothing. 966 */ 967void 968mpt2sas_base_initialize_callback_handler(void) 969{ 970 u8 cb_idx; 971 972 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) 973 mpt2sas_base_release_callback_handler(cb_idx); 974} 975 976/** 977 * mpt2sas_base_build_zero_len_sge - build zero length sg entry 978 * @ioc: per adapter object 979 * @paddr: virtual address for SGE 980 * 981 * Create a zero length scatter gather entry to insure the IOCs hardware has 982 * something to use if the target device goes brain dead and tries 983 * to send data even when none is asked for. 984 * 985 * Return nothing. 986 */ 987void 988mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr) 989{ 990 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | 991 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | 992 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << 993 MPI2_SGE_FLAGS_SHIFT); 994 ioc->base_add_sg_single(paddr, flags_length, -1); 995} 996 997/** 998 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. 999 * @paddr: virtual address for SGE 1000 * @flags_length: SGE flags and data transfer length 1001 * @dma_addr: Physical address 1002 * 1003 * Return nothing. 1004 */ 1005static void 1006_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1007{ 1008 Mpi2SGESimple32_t *sgel = paddr; 1009 1010 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | 1011 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1012 sgel->FlagsLength = cpu_to_le32(flags_length); 1013 sgel->Address = cpu_to_le32(dma_addr); 1014} 1015 1016 1017/** 1018 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. 1019 * @paddr: virtual address for SGE 1020 * @flags_length: SGE flags and data transfer length 1021 * @dma_addr: Physical address 1022 * 1023 * Return nothing. 1024 */ 1025static void 1026_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1027{ 1028 Mpi2SGESimple64_t *sgel = paddr; 1029 1030 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | 1031 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1032 sgel->FlagsLength = cpu_to_le32(flags_length); 1033 sgel->Address = cpu_to_le64(dma_addr); 1034} 1035 1036#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) 1037 1038/** 1039 * _base_config_dma_addressing - set dma addressing 1040 * @ioc: per adapter object 1041 * @pdev: PCI device struct 1042 * 1043 * Returns 0 for success, non-zero for failure. 1044 */ 1045static int 1046_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) 1047{ 1048 struct sysinfo s; 1049 char *desc = NULL; 1050 1051 if (sizeof(dma_addr_t) > 4) { 1052 const uint64_t required_mask = 1053 dma_get_required_mask(&pdev->dev); 1054 if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev, 1055 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev, 1056 DMA_BIT_MASK(64))) { 1057 ioc->base_add_sg_single = &_base_add_sg_single_64; 1058 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 1059 desc = "64"; 1060 goto out; 1061 } 1062 } 1063 1064 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1065 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1066 ioc->base_add_sg_single = &_base_add_sg_single_32; 1067 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 1068 desc = "32"; 1069 } else 1070 return -ENODEV; 1071 1072 out: 1073 si_meminfo(&s); 1074 printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, " 1075 "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram)); 1076 1077 return 0; 1078} 1079 1080/** 1081 * _base_save_msix_table - backup msix vector table 1082 * @ioc: per adapter object 1083 * 1084 * This address an errata where diag reset clears out the table 1085 */ 1086static void 1087_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc) 1088{ 1089 int i; 1090 1091 if (!ioc->msix_enable || ioc->msix_table_backup == NULL) 1092 return; 1093 1094 for (i = 0; i < ioc->msix_vector_count; i++) 1095 ioc->msix_table_backup[i] = ioc->msix_table[i]; 1096} 1097 1098/** 1099 * _base_restore_msix_table - this restores the msix vector table 1100 * @ioc: per adapter object 1101 * 1102 */ 1103static void 1104_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc) 1105{ 1106 int i; 1107 1108 if (!ioc->msix_enable || ioc->msix_table_backup == NULL) 1109 return; 1110 1111 for (i = 0; i < ioc->msix_vector_count; i++) 1112 ioc->msix_table[i] = ioc->msix_table_backup[i]; 1113} 1114 1115/** 1116 * _base_check_enable_msix - checks MSIX capabable. 1117 * @ioc: per adapter object 1118 * 1119 * Check to see if card is capable of MSIX, and set number 1120 * of avaliable msix vectors 1121 */ 1122static int 1123_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) 1124{ 1125 int base; 1126 u16 message_control; 1127 u32 msix_table_offset; 1128 1129 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 1130 if (!base) { 1131 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " 1132 "supported\n", ioc->name)); 1133 return -EINVAL; 1134 } 1135 1136 /* get msix vector count */ 1137 pci_read_config_word(ioc->pdev, base + 2, &message_control); 1138 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 1139 1140 /* get msix table */ 1141 pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset); 1142 msix_table_offset &= 0xFFFFFFF8; 1143 ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset); 1144 1145 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " 1146 "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name, 1147 ioc->msix_vector_count, msix_table_offset, ioc->msix_table)); 1148 return 0; 1149} 1150 1151/** 1152 * _base_disable_msix - disables msix 1153 * @ioc: per adapter object 1154 * 1155 */ 1156static void 1157_base_disable_msix(struct MPT2SAS_ADAPTER *ioc) 1158{ 1159 if (ioc->msix_enable) { 1160 pci_disable_msix(ioc->pdev); 1161 kfree(ioc->msix_table_backup); 1162 ioc->msix_table_backup = NULL; 1163 ioc->msix_enable = 0; 1164 } 1165} 1166 1167/** 1168 * _base_enable_msix - enables msix, failback to io_apic 1169 * @ioc: per adapter object 1170 * 1171 */ 1172static int 1173_base_enable_msix(struct MPT2SAS_ADAPTER *ioc) 1174{ 1175 struct msix_entry entries; 1176 int r; 1177 u8 try_msix = 0; 1178 1179 if (msix_disable == -1 || msix_disable == 0) 1180 try_msix = 1; 1181 1182 if (!try_msix) 1183 goto try_ioapic; 1184 1185 if (_base_check_enable_msix(ioc) != 0) 1186 goto try_ioapic; 1187 1188 ioc->msix_table_backup = kcalloc(ioc->msix_vector_count, 1189 sizeof(u32), GFP_KERNEL); 1190 if (!ioc->msix_table_backup) { 1191 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " 1192 "msix_table_backup failed!!!\n", ioc->name)); 1193 goto try_ioapic; 1194 } 1195 1196 memset(&entries, 0, sizeof(struct msix_entry)); 1197 r = pci_enable_msix(ioc->pdev, &entries, 1); 1198 if (r) { 1199 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix " 1200 "failed (r=%d) !!!\n", ioc->name, r)); 1201 goto try_ioapic; 1202 } 1203 1204 r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED, 1205 ioc->name, ioc); 1206 if (r) { 1207 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate " 1208 "interrupt %d !!!\n", ioc->name, entries.vector)); 1209 pci_disable_msix(ioc->pdev); 1210 goto try_ioapic; 1211 } 1212 1213 ioc->pci_irq = entries.vector; 1214 ioc->msix_enable = 1; 1215 return 0; 1216 1217/* failback to io_apic interrupt routing */ 1218 try_ioapic: 1219 1220 r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED, 1221 ioc->name, ioc); 1222 if (r) { 1223 printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n", 1224 ioc->name, ioc->pdev->irq); 1225 r = -EBUSY; 1226 goto out_fail; 1227 } 1228 1229 ioc->pci_irq = ioc->pdev->irq; 1230 return 0; 1231 1232 out_fail: 1233 return r; 1234} 1235 1236/** 1237 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap) 1238 * @ioc: per adapter object 1239 * 1240 * Returns 0 for success, non-zero for failure. 1241 */ 1242int 1243mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) 1244{ 1245 struct pci_dev *pdev = ioc->pdev; 1246 u32 memap_sz; 1247 u32 pio_sz; 1248 int i, r = 0; 1249 u64 pio_chip = 0; 1250 u64 chip_phys = 0; 1251 1252 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", 1253 ioc->name, __func__)); 1254 1255 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1256 if (pci_enable_device_mem(pdev)) { 1257 printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: " 1258 "failed\n", ioc->name); 1259 return -ENODEV; 1260 } 1261 1262 1263 if (pci_request_selected_regions(pdev, ioc->bars, 1264 MPT2SAS_DRIVER_NAME)) { 1265 printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: " 1266 "failed\n", ioc->name); 1267 r = -ENODEV; 1268 goto out_fail; 1269 } 1270 1271 /* AER (Advanced Error Reporting) hooks */ 1272 pci_enable_pcie_error_reporting(pdev); 1273 1274 pci_set_master(pdev); 1275 1276 if (_base_config_dma_addressing(ioc, pdev) != 0) { 1277 printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n", 1278 ioc->name, pci_name(pdev)); 1279 r = -ENODEV; 1280 goto out_fail; 1281 } 1282 1283 for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { 1284 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1285 if (pio_sz) 1286 continue; 1287 pio_chip = (u64)pci_resource_start(pdev, i); 1288 pio_sz = pci_resource_len(pdev, i); 1289 } else { 1290 if (memap_sz) 1291 continue; 1292 /* verify memory resource is valid before using */ 1293 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 1294 ioc->chip_phys = pci_resource_start(pdev, i); 1295 chip_phys = (u64)ioc->chip_phys; 1296 memap_sz = pci_resource_len(pdev, i); 1297 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 1298 if (ioc->chip == NULL) { 1299 printk(MPT2SAS_ERR_FMT "unable to map " 1300 "adapter memory!\n", ioc->name); 1301 r = -EINVAL; 1302 goto out_fail; 1303 } 1304 } 1305 } 1306 } 1307 1308 _base_mask_interrupts(ioc); 1309 r = _base_enable_msix(ioc); 1310 if (r) 1311 goto out_fail; 1312 1313 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", 1314 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 1315 "IO-APIC enabled"), ioc->pci_irq); 1316 printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 1317 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); 1318 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", 1319 ioc->name, (unsigned long long)pio_chip, pio_sz); 1320 1321 /* Save PCI configuration state for recovery from PCI AER/EEH errors */ 1322 pci_save_state(pdev); 1323 1324 return 0; 1325 1326 out_fail: 1327 if (ioc->chip_phys) 1328 iounmap(ioc->chip); 1329 ioc->chip_phys = 0; 1330 ioc->pci_irq = -1; 1331 pci_release_selected_regions(ioc->pdev, ioc->bars); 1332 pci_disable_pcie_error_reporting(pdev); 1333 pci_disable_device(pdev); 1334 return r; 1335} 1336 1337/** 1338 * mpt2sas_base_get_msg_frame - obtain request mf pointer 1339 * @ioc: per adapter object 1340 * @smid: system request message index(smid zero is invalid) 1341 * 1342 * Returns virt pointer to message frame. 1343 */ 1344void * 1345mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1346{ 1347 return (void *)(ioc->request + (smid * ioc->request_sz)); 1348} 1349 1350/** 1351 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request 1352 * @ioc: per adapter object 1353 * @smid: system request message index 1354 * 1355 * Returns virt pointer to sense buffer. 1356 */ 1357void * 1358mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1359{ 1360 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); 1361} 1362 1363/** 1364 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request 1365 * @ioc: per adapter object 1366 * @smid: system request message index 1367 * 1368 * Returns phys pointer to the low 32bit address of the sense buffer. 1369 */ 1370__le32 1371mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1372{ 1373 return cpu_to_le32(ioc->sense_dma + 1374 ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); 1375} 1376 1377/** 1378 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address 1379 * @ioc: per adapter object 1380 * @phys_addr: lower 32 physical addr of the reply 1381 * 1382 * Converts 32bit lower physical addr into a virt address. 1383 */ 1384void * 1385mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr) 1386{ 1387 if (!phys_addr) 1388 return NULL; 1389 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); 1390} 1391 1392/** 1393 * mpt2sas_base_get_smid - obtain a free smid from internal queue 1394 * @ioc: per adapter object 1395 * @cb_idx: callback index 1396 * 1397 * Returns smid (zero is invalid) 1398 */ 1399u16 1400mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx) 1401{ 1402 unsigned long flags; 1403 struct request_tracker *request; 1404 u16 smid; 1405 1406 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1407 if (list_empty(&ioc->internal_free_list)) { 1408 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1409 printk(MPT2SAS_ERR_FMT "%s: smid not available\n", 1410 ioc->name, __func__); 1411 return 0; 1412 } 1413 1414 request = list_entry(ioc->internal_free_list.next, 1415 struct request_tracker, tracker_list); 1416 request->cb_idx = cb_idx; 1417 smid = request->smid; 1418 list_del(&request->tracker_list); 1419 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1420 return smid; 1421} 1422 1423/** 1424 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue 1425 * @ioc: per adapter object 1426 * @cb_idx: callback index 1427 * @scmd: pointer to scsi command object 1428 * 1429 * Returns smid (zero is invalid) 1430 */ 1431u16 1432mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx, 1433 struct scsi_cmnd *scmd) 1434{ 1435 unsigned long flags; 1436 struct request_tracker *request; 1437 u16 smid; 1438 1439 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1440 if (list_empty(&ioc->free_list)) { 1441 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1442 printk(MPT2SAS_ERR_FMT "%s: smid not available\n", 1443 ioc->name, __func__); 1444 return 0; 1445 } 1446 1447 request = list_entry(ioc->free_list.next, 1448 struct request_tracker, tracker_list); 1449 request->scmd = scmd; 1450 request->cb_idx = cb_idx; 1451 smid = request->smid; 1452 list_del(&request->tracker_list); 1453 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1454 return smid; 1455} 1456 1457/** 1458 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue 1459 * @ioc: per adapter object 1460 * @cb_idx: callback index 1461 * 1462 * Returns smid (zero is invalid) 1463 */ 1464u16 1465mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx) 1466{ 1467 unsigned long flags; 1468 struct request_tracker *request; 1469 u16 smid; 1470 1471 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1472 if (list_empty(&ioc->hpr_free_list)) { 1473 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1474 return 0; 1475 } 1476 1477 request = list_entry(ioc->hpr_free_list.next, 1478 struct request_tracker, tracker_list); 1479 request->cb_idx = cb_idx; 1480 smid = request->smid; 1481 list_del(&request->tracker_list); 1482 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1483 return smid; 1484} 1485 1486 1487/** 1488 * mpt2sas_base_free_smid - put smid back on free_list 1489 * @ioc: per adapter object 1490 * @smid: system request message index 1491 * 1492 * Return nothing. 1493 */ 1494void 1495mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1496{ 1497 unsigned long flags; 1498 int i; 1499 struct chain_tracker *chain_req, *next; 1500 1501 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1502 if (smid >= ioc->hi_priority_smid) { 1503 if (smid < ioc->internal_smid) { 1504 /* hi-priority */ 1505 i = smid - ioc->hi_priority_smid; 1506 ioc->hpr_lookup[i].cb_idx = 0xFF; 1507 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 1508 &ioc->hpr_free_list); 1509 } else { 1510 /* internal queue */ 1511 i = smid - ioc->internal_smid; 1512 ioc->internal_lookup[i].cb_idx = 0xFF; 1513 list_add_tail(&ioc->internal_lookup[i].tracker_list, 1514 &ioc->internal_free_list); 1515 } 1516 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1517 return; 1518 } 1519 1520 /* scsiio queue */ 1521 i = smid - 1; 1522 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { 1523 list_for_each_entry_safe(chain_req, next, 1524 &ioc->scsi_lookup[i].chain_list, tracker_list) { 1525 list_del_init(&chain_req->tracker_list); 1526 list_add_tail(&chain_req->tracker_list, 1527 &ioc->free_chain_list); 1528 } 1529 } 1530 ioc->scsi_lookup[i].cb_idx = 0xFF; 1531 ioc->scsi_lookup[i].scmd = NULL; 1532 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 1533 &ioc->free_list); 1534 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1535 1536 /* 1537 * See _wait_for_commands_to_complete() call with regards to this code. 1538 */ 1539 if (ioc->shost_recovery && ioc->pending_io_count) { 1540 if (ioc->pending_io_count == 1) 1541 wake_up(&ioc->reset_wq); 1542 ioc->pending_io_count--; 1543 } 1544} 1545 1546/** 1547 * _base_writeq - 64 bit write to MMIO 1548 * @ioc: per adapter object 1549 * @b: data payload 1550 * @addr: address in MMIO space 1551 * @writeq_lock: spin lock 1552 * 1553 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes 1554 * care of 32 bit environment where its not quarenteed to send the entire word 1555 * in one transfer. 1556 */ 1557#ifndef writeq 1558static inline void _base_writeq(__u64 b, volatile void __iomem *addr, 1559 spinlock_t *writeq_lock) 1560{ 1561 unsigned long flags; 1562 __u64 data_out = cpu_to_le64(b); 1563 1564 spin_lock_irqsave(writeq_lock, flags); 1565 writel((u32)(data_out), addr); 1566 writel((u32)(data_out >> 32), (addr + 4)); 1567 spin_unlock_irqrestore(writeq_lock, flags); 1568} 1569#else 1570static inline void _base_writeq(__u64 b, volatile void __iomem *addr, 1571 spinlock_t *writeq_lock) 1572{ 1573 writeq(cpu_to_le64(b), addr); 1574} 1575#endif 1576 1577/** 1578 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware 1579 * @ioc: per adapter object 1580 * @smid: system request message index 1581 * @handle: device handle 1582 * 1583 * Return nothing. 1584 */ 1585void 1586mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle) 1587{ 1588 Mpi2RequestDescriptorUnion_t descriptor; 1589 u64 *request = (u64 *)&descriptor; 1590 1591 1592 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1593 descriptor.SCSIIO.MSIxIndex = 0; /* TODO */ 1594 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 1595 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 1596 descriptor.SCSIIO.LMID = 0; 1597 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 1598 &ioc->scsi_lookup_lock); 1599} 1600 1601 1602/** 1603 * mpt2sas_base_put_smid_hi_priority - send Task Managment request to firmware 1604 * @ioc: per adapter object 1605 * @smid: system request message index 1606 * 1607 * Return nothing. 1608 */ 1609void 1610mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1611{ 1612 Mpi2RequestDescriptorUnion_t descriptor; 1613 u64 *request = (u64 *)&descriptor; 1614 1615 descriptor.HighPriority.RequestFlags = 1616 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1617 descriptor.HighPriority.MSIxIndex = 0; /* TODO */ 1618 descriptor.HighPriority.SMID = cpu_to_le16(smid); 1619 descriptor.HighPriority.LMID = 0; 1620 descriptor.HighPriority.Reserved1 = 0; 1621 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 1622 &ioc->scsi_lookup_lock); 1623} 1624 1625/** 1626 * mpt2sas_base_put_smid_default - Default, primarily used for config pages 1627 * @ioc: per adapter object 1628 * @smid: system request message index 1629 * 1630 * Return nothing. 1631 */ 1632void 1633mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid) 1634{ 1635 Mpi2RequestDescriptorUnion_t descriptor; 1636 u64 *request = (u64 *)&descriptor; 1637 1638 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1639 descriptor.Default.MSIxIndex = 0; /* TODO */ 1640 descriptor.Default.SMID = cpu_to_le16(smid); 1641 descriptor.Default.LMID = 0; 1642 descriptor.Default.DescriptorTypeDependent = 0; 1643 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 1644 &ioc->scsi_lookup_lock); 1645} 1646 1647/** 1648 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware 1649 * @ioc: per adapter object 1650 * @smid: system request message index 1651 * @io_index: value used to track the IO 1652 * 1653 * Return nothing. 1654 */ 1655void 1656mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid, 1657 u16 io_index) 1658{ 1659 Mpi2RequestDescriptorUnion_t descriptor; 1660 u64 *request = (u64 *)&descriptor; 1661 1662 descriptor.SCSITarget.RequestFlags = 1663 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; 1664 descriptor.SCSITarget.MSIxIndex = 0; /* TODO */ 1665 descriptor.SCSITarget.SMID = cpu_to_le16(smid); 1666 descriptor.SCSITarget.LMID = 0; 1667 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index); 1668 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 1669 &ioc->scsi_lookup_lock); 1670} 1671 1672/** 1673 * _base_display_dell_branding - Disply branding string 1674 * @ioc: per adapter object 1675 * 1676 * Return nothing. 1677 */ 1678static void 1679_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc) 1680{ 1681 char dell_branding[MPT2SAS_DELL_BRANDING_SIZE]; 1682 1683 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL) 1684 return; 1685 1686 memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE); 1687 switch (ioc->pdev->subsystem_device) { 1688 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: 1689 strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING, 1690 MPT2SAS_DELL_BRANDING_SIZE - 1); 1691 break; 1692 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: 1693 strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING, 1694 MPT2SAS_DELL_BRANDING_SIZE - 1); 1695 break; 1696 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: 1697 strncpy(dell_branding, 1698 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING, 1699 MPT2SAS_DELL_BRANDING_SIZE - 1); 1700 break; 1701 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: 1702 strncpy(dell_branding, 1703 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING, 1704 MPT2SAS_DELL_BRANDING_SIZE - 1); 1705 break; 1706 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: 1707 strncpy(dell_branding, 1708 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING, 1709 MPT2SAS_DELL_BRANDING_SIZE - 1); 1710 break; 1711 case MPT2SAS_DELL_PERC_H200_SSDID: 1712 strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING, 1713 MPT2SAS_DELL_BRANDING_SIZE - 1); 1714 break; 1715 case MPT2SAS_DELL_6GBPS_SAS_SSDID: 1716 strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING, 1717 MPT2SAS_DELL_BRANDING_SIZE - 1); 1718 break; 1719 default: 1720 sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device); 1721 break; 1722 } 1723 1724 printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X)," 1725 " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding, 1726 ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor, 1727 ioc->pdev->subsystem_device); 1728} 1729 1730/** 1731 * _base_display_ioc_capabilities - Disply IOC's capabilities. 1732 * @ioc: per adapter object 1733 * 1734 * Return nothing. 1735 */ 1736static void 1737_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) 1738{ 1739 int i = 0; 1740 char desc[16]; 1741 u8 revision; 1742 u32 iounit_pg1_flags; 1743 1744 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); 1745 strncpy(desc, ioc->manu_pg0.ChipName, 16); 1746 printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " 1747 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", 1748 ioc->name, desc, 1749 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 1750 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 1751 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 1752 ioc->facts.FWVersion.Word & 0x000000FF, 1753 revision, 1754 (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24, 1755 (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16, 1756 (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8, 1757 ioc->bios_pg3.BiosVersion & 0x000000FF); 1758 1759 _base_display_dell_branding(ioc); 1760 1761 printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); 1762 1763 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 1764 printk("Initiator"); 1765 i++; 1766 } 1767 1768 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { 1769 printk("%sTarget", i ? "," : ""); 1770 i++; 1771 } 1772 1773 i = 0; 1774 printk("), "); 1775 printk("Capabilities=("); 1776 1777 if (ioc->facts.IOCCapabilities & 1778 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 1779 printk("Raid"); 1780 i++; 1781 } 1782 1783 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 1784 printk("%sTLR", i ? "," : ""); 1785 i++; 1786 } 1787 1788 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { 1789 printk("%sMulticast", i ? "," : ""); 1790 i++; 1791 } 1792 1793 if (ioc->facts.IOCCapabilities & 1794 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { 1795 printk("%sBIDI Target", i ? "," : ""); 1796 i++; 1797 } 1798 1799 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { 1800 printk("%sEEDP", i ? "," : ""); 1801 i++; 1802 } 1803 1804 if (ioc->facts.IOCCapabilities & 1805 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { 1806 printk("%sSnapshot Buffer", i ? "," : ""); 1807 i++; 1808 } 1809 1810 if (ioc->facts.IOCCapabilities & 1811 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { 1812 printk("%sDiag Trace Buffer", i ? "," : ""); 1813 i++; 1814 } 1815 1816 if (ioc->facts.IOCCapabilities & 1817 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { 1818 printk(KERN_INFO "%sDiag Extended Buffer", i ? "," : ""); 1819 i++; 1820 } 1821 1822 if (ioc->facts.IOCCapabilities & 1823 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 1824 printk("%sTask Set Full", i ? "," : ""); 1825 i++; 1826 } 1827 1828 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 1829 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { 1830 printk("%sNCQ", i ? "," : ""); 1831 i++; 1832 } 1833 1834 printk(")\n"); 1835} 1836 1837/** 1838 * _base_update_missing_delay - change the missing delay timers 1839 * @ioc: per adapter object 1840 * @device_missing_delay: amount of time till device is reported missing 1841 * @io_missing_delay: interval IO is returned when there is a missing device 1842 * 1843 * Return nothing. 1844 * 1845 * Passed on the command line, this function will modify the device missing 1846 * delay, as well as the io missing delay. This should be called at driver 1847 * load time. 1848 */ 1849static void 1850_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc, 1851 u16 device_missing_delay, u8 io_missing_delay) 1852{ 1853 u16 dmd, dmd_new, dmd_orignal; 1854 u8 io_missing_delay_original; 1855 u16 sz; 1856 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 1857 Mpi2ConfigReply_t mpi_reply; 1858 u8 num_phys = 0; 1859 u16 ioc_status; 1860 1861 mpt2sas_config_get_number_hba_phys(ioc, &num_phys); 1862 if (!num_phys) 1863 return; 1864 1865 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * 1866 sizeof(Mpi2SasIOUnit1PhyData_t)); 1867 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 1868 if (!sas_iounit_pg1) { 1869 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1870 ioc->name, __FILE__, __LINE__, __func__); 1871 goto out; 1872 } 1873 if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 1874 sas_iounit_pg1, sz))) { 1875 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1876 ioc->name, __FILE__, __LINE__, __func__); 1877 goto out; 1878 } 1879 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 1880 MPI2_IOCSTATUS_MASK; 1881 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 1882 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 1883 ioc->name, __FILE__, __LINE__, __func__); 1884 goto out; 1885 } 1886 1887 /* device missing delay */ 1888 dmd = sas_iounit_pg1->ReportDeviceMissingDelay; 1889 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 1890 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 1891 else 1892 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 1893 dmd_orignal = dmd; 1894 if (device_missing_delay > 0x7F) { 1895 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : 1896 device_missing_delay; 1897 dmd = dmd / 16; 1898 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; 1899 } else 1900 dmd = device_missing_delay; 1901 sas_iounit_pg1->ReportDeviceMissingDelay = dmd; 1902 1903 /* io missing delay */ 1904 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; 1905 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; 1906 1907 if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, 1908 sz)) { 1909 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 1910 dmd_new = (dmd & 1911 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 1912 else 1913 dmd_new = 1914 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 1915 printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), " 1916 "new(%d)\n", ioc->name, dmd_orignal, dmd_new); 1917 printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), " 1918 "new(%d)\n", ioc->name, io_missing_delay_original, 1919 io_missing_delay); 1920 ioc->device_missing_delay = dmd_new; 1921 ioc->io_missing_delay = io_missing_delay; 1922 } 1923 1924out: 1925 kfree(sas_iounit_pg1); 1926} 1927 1928/** 1929 * _base_static_config_pages - static start of day config pages 1930 * @ioc: per adapter object 1931 * 1932 * Return nothing. 1933 */ 1934static void 1935_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) 1936{ 1937 Mpi2ConfigReply_t mpi_reply; 1938 u32 iounit_pg1_flags; 1939 1940 mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); 1941 if (ioc->ir_firmware) 1942 mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply, 1943 &ioc->manu_pg10); 1944 mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 1945 mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 1946 mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); 1947 mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); 1948 mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 1949 _base_display_ioc_capabilities(ioc); 1950 1951 /* 1952 * Enable task_set_full handling in iounit_pg1 when the 1953 * facts capabilities indicate that its supported. 1954 */ 1955 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 1956 if ((ioc->facts.IOCCapabilities & 1957 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) 1958 iounit_pg1_flags &= 1959 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 1960 else 1961 iounit_pg1_flags |= 1962 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 1963 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 1964 mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 1965 1966} 1967 1968/** 1969 * _base_release_memory_pools - release memory 1970 * @ioc: per adapter object 1971 * 1972 * Free memory allocated from _base_allocate_memory_pools. 1973 * 1974 * Return nothing. 1975 */ 1976static void 1977_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) 1978{ 1979 int i; 1980 1981 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1982 __func__)); 1983 1984 if (ioc->request) { 1985 pci_free_consistent(ioc->pdev, ioc->request_dma_sz, 1986 ioc->request, ioc->request_dma); 1987 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)" 1988 ": free\n", ioc->name, ioc->request)); 1989 ioc->request = NULL; 1990 } 1991 1992 if (ioc->sense) { 1993 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); 1994 if (ioc->sense_dma_pool) 1995 pci_pool_destroy(ioc->sense_dma_pool); 1996 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)" 1997 ": free\n", ioc->name, ioc->sense)); 1998 ioc->sense = NULL; 1999 } 2000 2001 if (ioc->reply) { 2002 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); 2003 if (ioc->reply_dma_pool) 2004 pci_pool_destroy(ioc->reply_dma_pool); 2005 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)" 2006 ": free\n", ioc->name, ioc->reply)); 2007 ioc->reply = NULL; 2008 } 2009 2010 if (ioc->reply_free) { 2011 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, 2012 ioc->reply_free_dma); 2013 if (ioc->reply_free_dma_pool) 2014 pci_pool_destroy(ioc->reply_free_dma_pool); 2015 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool" 2016 "(0x%p): free\n", ioc->name, ioc->reply_free)); 2017 ioc->reply_free = NULL; 2018 } 2019 2020 if (ioc->reply_post_free) { 2021 pci_pool_free(ioc->reply_post_free_dma_pool, 2022 ioc->reply_post_free, ioc->reply_post_free_dma); 2023 if (ioc->reply_post_free_dma_pool) 2024 pci_pool_destroy(ioc->reply_post_free_dma_pool); 2025 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT 2026 "reply_post_free_pool(0x%p): free\n", ioc->name, 2027 ioc->reply_post_free)); 2028 ioc->reply_post_free = NULL; 2029 } 2030 2031 if (ioc->config_page) { 2032 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT 2033 "config_page(0x%p): free\n", ioc->name, 2034 ioc->config_page)); 2035 pci_free_consistent(ioc->pdev, ioc->config_page_sz, 2036 ioc->config_page, ioc->config_page_dma); 2037 } 2038 2039 if (ioc->scsi_lookup) { 2040 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages); 2041 ioc->scsi_lookup = NULL; 2042 } 2043 kfree(ioc->hpr_lookup); 2044 kfree(ioc->internal_lookup); 2045 if (ioc->chain_lookup) { 2046 for (i = 0; i < ioc->chain_depth; i++) { 2047 if (ioc->chain_lookup[i].chain_buffer) 2048 pci_pool_free(ioc->chain_dma_pool, 2049 ioc->chain_lookup[i].chain_buffer, 2050 ioc->chain_lookup[i].chain_buffer_dma); 2051 } 2052 if (ioc->chain_dma_pool) 2053 pci_pool_destroy(ioc->chain_dma_pool); 2054 } 2055 if (ioc->chain_lookup) { 2056 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); 2057 ioc->chain_lookup = NULL; 2058 } 2059} 2060 2061 2062/** 2063 * _base_allocate_memory_pools - allocate start of day memory pools 2064 * @ioc: per adapter object 2065 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2066 * 2067 * Returns 0 success, anything else error 2068 */ 2069static int 2070_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 2071{ 2072 Mpi2IOCFactsReply_t *facts; 2073 u32 queue_size, queue_diff; 2074 u16 max_sge_elements; 2075 u16 num_of_reply_frames; 2076 u16 chains_needed_per_io; 2077 u32 sz, total_sz; 2078 u32 retry_sz; 2079 u16 max_request_credit; 2080 int i; 2081 2082 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 2083 __func__)); 2084 2085 retry_sz = 0; 2086 facts = &ioc->facts; 2087 2088 /* command line tunables for max sgl entries */ 2089 if (max_sgl_entries != -1) { 2090 ioc->shost->sg_tablesize = (max_sgl_entries < 2091 MPT2SAS_SG_DEPTH) ? max_sgl_entries : 2092 MPT2SAS_SG_DEPTH; 2093 } else { 2094 ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH; 2095 } 2096 2097 /* command line tunables for max controller queue depth */ 2098 if (max_queue_depth != -1) 2099 max_request_credit = (max_queue_depth < facts->RequestCredit) 2100 ? max_queue_depth : facts->RequestCredit; 2101 else 2102 max_request_credit = facts->RequestCredit; 2103 2104 ioc->hba_queue_depth = max_request_credit; 2105 ioc->hi_priority_depth = facts->HighPriorityCredit; 2106 ioc->internal_depth = ioc->hi_priority_depth + 5; 2107 2108 /* request frame size */ 2109 ioc->request_sz = facts->IOCRequestFrameSize * 4; 2110 2111 /* reply frame size */ 2112 ioc->reply_sz = facts->ReplyFrameSize * 4; 2113 2114 retry_allocation: 2115 total_sz = 0; 2116 /* calculate number of sg elements left over in the 1st frame */ 2117 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - 2118 sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size); 2119 ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size; 2120 2121 /* now do the same for a chain buffer */ 2122 max_sge_elements = ioc->request_sz - ioc->sge_size; 2123 ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size; 2124 2125 ioc->chain_offset_value_for_main_message = 2126 ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) + 2127 (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4; 2128 2129 /* 2130 * MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE 2131 */ 2132 chains_needed_per_io = ((ioc->shost->sg_tablesize - 2133 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) 2134 + 1; 2135 if (chains_needed_per_io > facts->MaxChainDepth) { 2136 chains_needed_per_io = facts->MaxChainDepth; 2137 ioc->shost->sg_tablesize = min_t(u16, 2138 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message 2139 * chains_needed_per_io), ioc->shost->sg_tablesize); 2140 } 2141 ioc->chains_needed_per_io = chains_needed_per_io; 2142 2143 /* reply free queue sizing - taking into account for events */ 2144 num_of_reply_frames = ioc->hba_queue_depth + 32; 2145 2146 /* number of replies frames can't be a multiple of 16 */ 2147 /* decrease number of reply frames by 1 */ 2148 if (!(num_of_reply_frames % 16)) 2149 num_of_reply_frames--; 2150 2151 /* calculate number of reply free queue entries 2152 * (must be multiple of 16) 2153 */ 2154 2155 /* (we know reply_free_queue_depth is not a multiple of 16) */ 2156 queue_size = num_of_reply_frames; 2157 queue_size += 16 - (queue_size % 16); 2158 ioc->reply_free_queue_depth = queue_size; 2159 2160 /* reply descriptor post queue sizing */ 2161 /* this size should be the number of request frames + number of reply 2162 * frames 2163 */ 2164 2165 queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1; 2166 /* round up to 16 byte boundary */ 2167 if (queue_size % 16) 2168 queue_size += 16 - (queue_size % 16); 2169 2170 /* check against IOC maximum reply post queue depth */ 2171 if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) { 2172 queue_diff = queue_size - 2173 facts->MaxReplyDescriptorPostQueueDepth; 2174 2175 /* round queue_diff up to multiple of 16 */ 2176 if (queue_diff % 16) 2177 queue_diff += 16 - (queue_diff % 16); 2178 2179 /* adjust hba_queue_depth, reply_free_queue_depth, 2180 * and queue_size 2181 */ 2182 ioc->hba_queue_depth -= queue_diff; 2183 ioc->reply_free_queue_depth -= queue_diff; 2184 queue_size -= queue_diff; 2185 } 2186 ioc->reply_post_queue_depth = queue_size; 2187 2188 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: " 2189 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 2190 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, 2191 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 2192 ioc->chains_needed_per_io)); 2193 2194 ioc->scsiio_depth = ioc->hba_queue_depth - 2195 ioc->hi_priority_depth - ioc->internal_depth; 2196 2197 /* set the scsi host can_queue depth 2198 * with some internal commands that could be outstanding 2199 */ 2200 ioc->shost->can_queue = ioc->scsiio_depth - (2); 2201 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: " 2202 "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); 2203 2204 /* contiguous pool for request and chains, 16 byte align, one extra " 2205 * "frame for smid=0 2206 */ 2207 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; 2208 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); 2209 2210 /* hi-priority queue */ 2211 sz += (ioc->hi_priority_depth * ioc->request_sz); 2212 2213 /* internal queue */ 2214 sz += (ioc->internal_depth * ioc->request_sz); 2215 2216 ioc->request_dma_sz = sz; 2217 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); 2218 if (!ioc->request) { 2219 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent " 2220 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 2221 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, 2222 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 2223 if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH) 2224 goto out; 2225 retry_sz += 64; 2226 ioc->hba_queue_depth = max_request_credit - retry_sz; 2227 goto retry_allocation; 2228 } 2229 2230 if (retry_sz) 2231 printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent " 2232 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 2233 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, 2234 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 2235 2236 2237 /* hi-priority queue */ 2238 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * 2239 ioc->request_sz); 2240 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * 2241 ioc->request_sz); 2242 2243 /* internal queue */ 2244 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * 2245 ioc->request_sz); 2246 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 2247 ioc->request_sz); 2248 2249 2250 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): " 2251 "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, 2252 ioc->request, ioc->hba_queue_depth, ioc->request_sz, 2253 (ioc->hba_queue_depth * ioc->request_sz)/1024)); 2254 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n", 2255 ioc->name, (unsigned long long) ioc->request_dma)); 2256 total_sz += sz; 2257 2258 sz = ioc->scsiio_depth * sizeof(struct request_tracker); 2259 ioc->scsi_lookup_pages = get_order(sz); 2260 ioc->scsi_lookup = (struct request_tracker *)__get_free_pages( 2261 GFP_KERNEL, ioc->scsi_lookup_pages); 2262 if (!ioc->scsi_lookup) { 2263 printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, " 2264 "sz(%d)\n", ioc->name, (int)sz); 2265 goto out; 2266 } 2267 2268 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): " 2269 "depth(%d)\n", ioc->name, ioc->request, 2270 ioc->scsiio_depth)); 2271 2272 /* loop till the allocation succeeds */ 2273 do { 2274 sz = ioc->chain_depth * sizeof(struct chain_tracker); 2275 ioc->chain_pages = get_order(sz); 2276 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( 2277 GFP_KERNEL, ioc->chain_pages); 2278 if (ioc->chain_lookup == NULL) 2279 ioc->chain_depth -= 100; 2280 } while (ioc->chain_lookup == NULL); 2281 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, 2282 ioc->request_sz, 16, 0); 2283 if (!ioc->chain_dma_pool) { 2284 printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create " 2285 "failed\n", ioc->name); 2286 goto out; 2287 } 2288 for (i = 0; i < ioc->chain_depth; i++) { 2289 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc( 2290 ioc->chain_dma_pool , GFP_KERNEL, 2291 &ioc->chain_lookup[i].chain_buffer_dma); 2292 if (!ioc->chain_lookup[i].chain_buffer) { 2293 ioc->chain_depth = i; 2294 goto chain_done; 2295 } 2296 total_sz += ioc->request_sz; 2297 } 2298chain_done: 2299 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth" 2300 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, 2301 ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth * 2302 ioc->request_sz))/1024)); 2303 2304 /* initialize hi-priority queue smid's */ 2305 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 2306 sizeof(struct request_tracker), GFP_KERNEL); 2307 if (!ioc->hpr_lookup) { 2308 printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n", 2309 ioc->name); 2310 goto out; 2311 } 2312 ioc->hi_priority_smid = ioc->scsiio_depth + 1; 2313 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): " 2314 "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority, 2315 ioc->hi_priority_depth, ioc->hi_priority_smid)); 2316 2317 /* initialize internal queue smid's */ 2318 ioc->internal_lookup = kcalloc(ioc->internal_depth, 2319 sizeof(struct request_tracker), GFP_KERNEL); 2320 if (!ioc->internal_lookup) { 2321 printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n", 2322 ioc->name); 2323 goto out; 2324 } 2325 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; 2326 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): " 2327 "depth(%d), start smid(%d)\n", ioc->name, ioc->internal, 2328 ioc->internal_depth, ioc->internal_smid)); 2329 2330 /* sense buffers, 4 byte align */ 2331 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 2332 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, 2333 0); 2334 if (!ioc->sense_dma_pool) { 2335 printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n", 2336 ioc->name); 2337 goto out; 2338 } 2339 ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, 2340 &ioc->sense_dma); 2341 if (!ioc->sense) { 2342 printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n", 2343 ioc->name); 2344 goto out; 2345 } 2346 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT 2347 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 2348 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, 2349 SCSI_SENSE_BUFFERSIZE, sz/1024)); 2350 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n", 2351 ioc->name, (unsigned long long)ioc->sense_dma)); 2352 total_sz += sz; 2353 2354 /* reply pool, 4 byte align */ 2355 sz = ioc->reply_free_queue_depth * ioc->reply_sz; 2356 ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, 2357 0); 2358 if (!ioc->reply_dma_pool) { 2359 printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n", 2360 ioc->name); 2361 goto out; 2362 } 2363 ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, 2364 &ioc->reply_dma); 2365 if (!ioc->reply) { 2366 printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n", 2367 ioc->name); 2368 goto out; 2369 } 2370 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); 2371 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; 2372 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth" 2373 "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply, 2374 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); 2375 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n", 2376 ioc->name, (unsigned long long)ioc->reply_dma)); 2377 total_sz += sz; 2378 2379 /* reply free queue, 16 byte align */ 2380 sz = ioc->reply_free_queue_depth * 4; 2381 ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", 2382 ioc->pdev, sz, 16, 0); 2383 if (!ioc->reply_free_dma_pool) { 2384 printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create " 2385 "failed\n", ioc->name); 2386 goto out; 2387 } 2388 ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, 2389 &ioc->reply_free_dma); 2390 if (!ioc->reply_free) { 2391 printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc " 2392 "failed\n", ioc->name); 2393 goto out; 2394 } 2395 memset(ioc->reply_free, 0, sz); 2396 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): " 2397 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, 2398 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); 2399 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma" 2400 "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma)); 2401 total_sz += sz; 2402 2403 /* reply post queue, 16 byte align */ 2404 sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t); 2405 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", 2406 ioc->pdev, sz, 16, 0); 2407 if (!ioc->reply_post_free_dma_pool) { 2408 printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create " 2409 "failed\n", ioc->name); 2410 goto out; 2411 } 2412 ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool , 2413 GFP_KERNEL, &ioc->reply_post_free_dma); 2414 if (!ioc->reply_post_free) { 2415 printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc " 2416 "failed\n", ioc->name); 2417 goto out; 2418 } 2419 memset(ioc->reply_post_free, 0, sz); 2420 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool" 2421 "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", 2422 ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8, 2423 sz/1024)); 2424 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = " 2425 "(0x%llx)\n", ioc->name, (unsigned long long) 2426 ioc->reply_post_free_dma)); 2427 total_sz += sz; 2428 2429 ioc->config_page_sz = 512; 2430 ioc->config_page = pci_alloc_consistent(ioc->pdev, 2431 ioc->config_page_sz, &ioc->config_page_dma); 2432 if (!ioc->config_page) { 2433 printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc " 2434 "failed\n", ioc->name); 2435 goto out; 2436 } 2437 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size" 2438 "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz)); 2439 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma" 2440 "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma)); 2441 total_sz += ioc->config_page_sz; 2442 2443 printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n", 2444 ioc->name, total_sz/1024); 2445 printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), " 2446 "Max Controller Queue Depth(%d)\n", 2447 ioc->name, ioc->shost->can_queue, facts->RequestCredit); 2448 printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n", 2449 ioc->name, ioc->shost->sg_tablesize); 2450 return 0; 2451 2452 out: 2453 return -ENOMEM; 2454} 2455 2456 2457/** 2458 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter. 2459 * @ioc: Pointer to MPT_ADAPTER structure 2460 * @cooked: Request raw or cooked IOC state 2461 * 2462 * Returns all IOC Doorbell register bits if cooked==0, else just the 2463 * Doorbell bits in MPI_IOC_STATE_MASK. 2464 */ 2465u32 2466mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked) 2467{ 2468 u32 s, sc; 2469 2470 s = readl(&ioc->chip->Doorbell); 2471 sc = s & MPI2_IOC_STATE_MASK; 2472 return cooked ? sc : s; 2473} 2474 2475/** 2476 * _base_wait_on_iocstate - waiting on a particular ioc state 2477 * @ioc_state: controller state { READY, OPERATIONAL, or RESET } 2478 * @timeout: timeout in second 2479 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2480 * 2481 * Returns 0 for success, non-zero for failure. 2482 */ 2483static int 2484_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout, 2485 int sleep_flag) 2486{ 2487 u32 count, cntdn; 2488 u32 current_state; 2489 2490 count = 0; 2491 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 2492 do { 2493 current_state = mpt2sas_base_get_iocstate(ioc, 1); 2494 if (current_state == ioc_state) 2495 return 0; 2496 if (count && current_state == MPI2_IOC_STATE_FAULT) 2497 break; 2498 if (sleep_flag == CAN_SLEEP) 2499 msleep(1); 2500 else 2501 udelay(500); 2502 count++; 2503 } while (--cntdn); 2504 2505 return current_state; 2506} 2507 2508/** 2509 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by 2510 * a write to the doorbell) 2511 * @ioc: per adapter object 2512 * @timeout: timeout in second 2513 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2514 * 2515 * Returns 0 for success, non-zero for failure. 2516 * 2517 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 2518 */ 2519static int 2520_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout, 2521 int sleep_flag) 2522{ 2523 u32 cntdn, count; 2524 u32 int_status; 2525 2526 count = 0; 2527 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 2528 do { 2529 int_status = readl(&ioc->chip->HostInterruptStatus); 2530 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 2531 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 2532 "successfull count(%d), timeout(%d)\n", ioc->name, 2533 __func__, count, timeout)); 2534 return 0; 2535 } 2536 if (sleep_flag == CAN_SLEEP) 2537 msleep(1); 2538 else 2539 udelay(500); 2540 count++; 2541 } while (--cntdn); 2542 2543 printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), " 2544 "int_status(%x)!\n", ioc->name, __func__, count, int_status); 2545 return -EFAULT; 2546} 2547 2548/** 2549 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. 2550 * @ioc: per adapter object 2551 * @timeout: timeout in second 2552 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2553 * 2554 * Returns 0 for success, non-zero for failure. 2555 * 2556 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to 2557 * doorbell. 2558 */ 2559static int 2560_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout, 2561 int sleep_flag) 2562{ 2563 u32 cntdn, count; 2564 u32 int_status; 2565 u32 doorbell; 2566 2567 count = 0; 2568 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 2569 do { 2570 int_status = readl(&ioc->chip->HostInterruptStatus); 2571 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 2572 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 2573 "successfull count(%d), timeout(%d)\n", ioc->name, 2574 __func__, count, timeout)); 2575 return 0; 2576 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 2577 doorbell = readl(&ioc->chip->Doorbell); 2578 if ((doorbell & MPI2_IOC_STATE_MASK) == 2579 MPI2_IOC_STATE_FAULT) { 2580 mpt2sas_base_fault_info(ioc , doorbell); 2581 return -EFAULT; 2582 } 2583 } else if (int_status == 0xFFFFFFFF) 2584 goto out; 2585 2586 if (sleep_flag == CAN_SLEEP) 2587 msleep(1); 2588 else 2589 udelay(500); 2590 count++; 2591 } while (--cntdn); 2592 2593 out: 2594 printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), " 2595 "int_status(%x)!\n", ioc->name, __func__, count, int_status); 2596 return -EFAULT; 2597} 2598 2599/** 2600 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use 2601 * @ioc: per adapter object 2602 * @timeout: timeout in second 2603 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2604 * 2605 * Returns 0 for success, non-zero for failure. 2606 * 2607 */ 2608static int 2609_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout, 2610 int sleep_flag) 2611{ 2612 u32 cntdn, count; 2613 u32 doorbell_reg; 2614 2615 count = 0; 2616 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 2617 do { 2618 doorbell_reg = readl(&ioc->chip->Doorbell); 2619 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 2620 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 2621 "successfull count(%d), timeout(%d)\n", ioc->name, 2622 __func__, count, timeout)); 2623 return 0; 2624 } 2625 if (sleep_flag == CAN_SLEEP) 2626 msleep(1); 2627 else 2628 udelay(500); 2629 count++; 2630 } while (--cntdn); 2631 2632 printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), " 2633 "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg); 2634 return -EFAULT; 2635} 2636 2637/** 2638 * _base_send_ioc_reset - send doorbell reset 2639 * @ioc: per adapter object 2640 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET 2641 * @timeout: timeout in second 2642 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2643 * 2644 * Returns 0 for success, non-zero for failure. 2645 */ 2646static int 2647_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout, 2648 int sleep_flag) 2649{ 2650 u32 ioc_state; 2651 int r = 0; 2652 2653 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { 2654 printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n", 2655 ioc->name, __func__); 2656 return -EFAULT; 2657 } 2658 2659 if (!(ioc->facts.IOCCapabilities & 2660 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 2661 return -EFAULT; 2662 2663 printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name); 2664 2665 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 2666 &ioc->chip->Doorbell); 2667 if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) { 2668 r = -EFAULT; 2669 goto out; 2670 } 2671 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 2672 timeout, sleep_flag); 2673 if (ioc_state) { 2674 printk(MPT2SAS_ERR_FMT "%s: failed going to ready state " 2675 " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); 2676 r = -EFAULT; 2677 goto out; 2678 } 2679 out: 2680 printk(MPT2SAS_INFO_FMT "message unit reset: %s\n", 2681 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); 2682 return r; 2683} 2684 2685/** 2686 * _base_handshake_req_reply_wait - send request thru doorbell interface 2687 * @ioc: per adapter object 2688 * @request_bytes: request length 2689 * @request: pointer having request payload 2690 * @reply_bytes: reply length 2691 * @reply: pointer to reply payload 2692 * @timeout: timeout in second 2693 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2694 * 2695 * Returns 0 for success, non-zero for failure. 2696 */ 2697static int 2698_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes, 2699 u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) 2700{ 2701 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; 2702 int i; 2703 u8 failed; 2704 u16 dummy; 2705 u32 *mfp; 2706 2707 /* make sure doorbell is not in use */ 2708 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 2709 printk(MPT2SAS_ERR_FMT "doorbell is in use " 2710 " (line=%d)\n", ioc->name, __LINE__); 2711 return -EFAULT; 2712 } 2713 2714 /* clear pending doorbell interrupts from previous state changes */ 2715 if (readl(&ioc->chip->HostInterruptStatus) & 2716 MPI2_HIS_IOC2SYS_DB_STATUS) 2717 writel(0, &ioc->chip->HostInterruptStatus); 2718 2719 /* send message to ioc */ 2720 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | 2721 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 2722 &ioc->chip->Doorbell); 2723 2724 if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) { 2725 printk(MPT2SAS_ERR_FMT "doorbell handshake " 2726 "int failed (line=%d)\n", ioc->name, __LINE__); 2727 return -EFAULT; 2728 } 2729 writel(0, &ioc->chip->HostInterruptStatus); 2730 2731 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) { 2732 printk(MPT2SAS_ERR_FMT "doorbell handshake " 2733 "ack failed (line=%d)\n", ioc->name, __LINE__); 2734 return -EFAULT; 2735 } 2736 2737 /* send message 32-bits at a time */ 2738 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 2739 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 2740 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) 2741 failed = 1; 2742 } 2743 2744 if (failed) { 2745 printk(MPT2SAS_ERR_FMT "doorbell handshake " 2746 "sending request failed (line=%d)\n", ioc->name, __LINE__); 2747 return -EFAULT; 2748 } 2749 2750 /* now wait for the reply */ 2751 if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) { 2752 printk(MPT2SAS_ERR_FMT "doorbell handshake " 2753 "int failed (line=%d)\n", ioc->name, __LINE__); 2754 return -EFAULT; 2755 } 2756 2757 /* read the first two 16-bits, it gives the total length of the reply */ 2758 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 2759 & MPI2_DOORBELL_DATA_MASK); 2760 writel(0, &ioc->chip->HostInterruptStatus); 2761 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { 2762 printk(MPT2SAS_ERR_FMT "doorbell handshake " 2763 "int failed (line=%d)\n", ioc->name, __LINE__); 2764 return -EFAULT; 2765 } 2766 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 2767 & MPI2_DOORBELL_DATA_MASK); 2768 writel(0, &ioc->chip->HostInterruptStatus); 2769 2770 for (i = 2; i < default_reply->MsgLength * 2; i++) { 2771 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { 2772 printk(MPT2SAS_ERR_FMT "doorbell " 2773 "handshake int failed (line=%d)\n", ioc->name, 2774 __LINE__); 2775 return -EFAULT; 2776 } 2777 if (i >= reply_bytes/2) /* overflow case */ 2778 dummy = readl(&ioc->chip->Doorbell); 2779 else 2780 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 2781 & MPI2_DOORBELL_DATA_MASK); 2782 writel(0, &ioc->chip->HostInterruptStatus); 2783 } 2784 2785 _base_wait_for_doorbell_int(ioc, 5, sleep_flag); 2786 if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) { 2787 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use " 2788 " (line=%d)\n", ioc->name, __LINE__)); 2789 } 2790 writel(0, &ioc->chip->HostInterruptStatus); 2791 2792 if (ioc->logging_level & MPT_DEBUG_INIT) { 2793 mfp = (u32 *)reply; 2794 printk(KERN_INFO "\toffset:data\n"); 2795 for (i = 0; i < reply_bytes/4; i++) 2796 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, 2797 le32_to_cpu(mfp[i])); 2798 } 2799 return 0; 2800} 2801 2802/** 2803 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW 2804 * @ioc: per adapter object 2805 * @mpi_reply: the reply payload from FW 2806 * @mpi_request: the request payload sent to FW 2807 * 2808 * The SAS IO Unit Control Request message allows the host to perform low-level 2809 * operations, such as resets on the PHYs of the IO Unit, also allows the host 2810 * to obtain the IOC assigned device handles for a device if it has other 2811 * identifying information about the device, in addition allows the host to 2812 * remove IOC resources associated with the device. 2813 * 2814 * Returns 0 for success, non-zero for failure. 2815 */ 2816int 2817mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc, 2818 Mpi2SasIoUnitControlReply_t *mpi_reply, 2819 Mpi2SasIoUnitControlRequest_t *mpi_request) 2820{ 2821 u16 smid; 2822 u32 ioc_state; 2823 unsigned long timeleft; 2824 u8 issue_reset; 2825 int rc; 2826 void *request; 2827 u16 wait_state_count; 2828 2829 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 2830 __func__)); 2831 2832 mutex_lock(&ioc->base_cmds.mutex); 2833 2834 if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) { 2835 printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n", 2836 ioc->name, __func__); 2837 rc = -EAGAIN; 2838 goto out; 2839 } 2840 2841 wait_state_count = 0; 2842 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 2843 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 2844 if (wait_state_count++ == 10) { 2845 printk(MPT2SAS_ERR_FMT 2846 "%s: failed due to ioc not operational\n", 2847 ioc->name, __func__); 2848 rc = -EFAULT; 2849 goto out; 2850 } 2851 ssleep(1); 2852 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 2853 printk(MPT2SAS_INFO_FMT "%s: waiting for " 2854 "operational state(count=%d)\n", ioc->name, 2855 __func__, wait_state_count); 2856 } 2857 2858 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx); 2859 if (!smid) { 2860 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 2861 ioc->name, __func__); 2862 rc = -EAGAIN; 2863 goto out; 2864 } 2865 2866 rc = 0; 2867 ioc->base_cmds.status = MPT2_CMD_PENDING; 2868 request = mpt2sas_base_get_msg_frame(ioc, smid); 2869 ioc->base_cmds.smid = smid; 2870 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); 2871 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 2872 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 2873 ioc->ioc_link_reset_in_progress = 1; 2874 mpt2sas_base_put_smid_default(ioc, smid); 2875 init_completion(&ioc->base_cmds.done); 2876 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 2877 msecs_to_jiffies(10000)); 2878 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 2879 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && 2880 ioc->ioc_link_reset_in_progress) 2881 ioc->ioc_link_reset_in_progress = 0; 2882 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 2883 printk(MPT2SAS_ERR_FMT "%s: timeout\n", 2884 ioc->name, __func__); 2885 _debug_dump_mf(mpi_request, 2886 sizeof(Mpi2SasIoUnitControlRequest_t)/4); 2887 if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) 2888 issue_reset = 1; 2889 goto issue_host_reset; 2890 } 2891 if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) 2892 memcpy(mpi_reply, ioc->base_cmds.reply, 2893 sizeof(Mpi2SasIoUnitControlReply_t)); 2894 else 2895 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); 2896 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 2897 goto out; 2898 2899 issue_host_reset: 2900 if (issue_reset) 2901 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2902 FORCE_BIG_HAMMER); 2903 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 2904 rc = -EFAULT; 2905 out: 2906 mutex_unlock(&ioc->base_cmds.mutex); 2907 return rc; 2908} 2909 2910 2911/** 2912 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device 2913 * @ioc: per adapter object 2914 * @mpi_reply: the reply payload from FW 2915 * @mpi_request: the request payload sent to FW 2916 * 2917 * The SCSI Enclosure Processor request message causes the IOC to 2918 * communicate with SES devices to control LED status signals. 2919 * 2920 * Returns 0 for success, non-zero for failure. 2921 */ 2922int 2923mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc, 2924 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) 2925{ 2926 u16 smid; 2927 u32 ioc_state; 2928 unsigned long timeleft; 2929 u8 issue_reset; 2930 int rc; 2931 void *request; 2932 u16 wait_state_count; 2933 2934 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 2935 __func__)); 2936 2937 mutex_lock(&ioc->base_cmds.mutex); 2938 2939 if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) { 2940 printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n", 2941 ioc->name, __func__); 2942 rc = -EAGAIN; 2943 goto out; 2944 } 2945 2946 wait_state_count = 0; 2947 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 2948 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 2949 if (wait_state_count++ == 10) { 2950 printk(MPT2SAS_ERR_FMT 2951 "%s: failed due to ioc not operational\n", 2952 ioc->name, __func__); 2953 rc = -EFAULT; 2954 goto out; 2955 } 2956 ssleep(1); 2957 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 2958 printk(MPT2SAS_INFO_FMT "%s: waiting for " 2959 "operational state(count=%d)\n", ioc->name, 2960 __func__, wait_state_count); 2961 } 2962 2963 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx); 2964 if (!smid) { 2965 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 2966 ioc->name, __func__); 2967 rc = -EAGAIN; 2968 goto out; 2969 } 2970 2971 rc = 0; 2972 ioc->base_cmds.status = MPT2_CMD_PENDING; 2973 request = mpt2sas_base_get_msg_frame(ioc, smid); 2974 ioc->base_cmds.smid = smid; 2975 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 2976 mpt2sas_base_put_smid_default(ioc, smid); 2977 init_completion(&ioc->base_cmds.done); 2978 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 2979 msecs_to_jiffies(10000)); 2980 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 2981 printk(MPT2SAS_ERR_FMT "%s: timeout\n", 2982 ioc->name, __func__); 2983 _debug_dump_mf(mpi_request, 2984 sizeof(Mpi2SepRequest_t)/4); 2985 if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) 2986 issue_reset = 1; 2987 goto issue_host_reset; 2988 } 2989 if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) 2990 memcpy(mpi_reply, ioc->base_cmds.reply, 2991 sizeof(Mpi2SepReply_t)); 2992 else 2993 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); 2994 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 2995 goto out; 2996 2997 issue_host_reset: 2998 if (issue_reset) 2999 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 3000 FORCE_BIG_HAMMER); 3001 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 3002 rc = -EFAULT; 3003 out: 3004 mutex_unlock(&ioc->base_cmds.mutex); 3005 return rc; 3006} 3007 3008/** 3009 * _base_get_port_facts - obtain port facts reply and save in ioc 3010 * @ioc: per adapter object 3011 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3012 * 3013 * Returns 0 for success, non-zero for failure. 3014 */ 3015static int 3016_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag) 3017{ 3018 Mpi2PortFactsRequest_t mpi_request; 3019 Mpi2PortFactsReply_t mpi_reply, *pfacts; 3020 int mpi_reply_sz, mpi_request_sz, r; 3021 3022 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3023 __func__)); 3024 3025 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 3026 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); 3027 memset(&mpi_request, 0, mpi_request_sz); 3028 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; 3029 mpi_request.PortNumber = port; 3030 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 3031 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); 3032 3033 if (r != 0) { 3034 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n", 3035 ioc->name, __func__, r); 3036 return r; 3037 } 3038 3039 pfacts = &ioc->pfacts[port]; 3040 memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t)); 3041 pfacts->PortNumber = mpi_reply.PortNumber; 3042 pfacts->VP_ID = mpi_reply.VP_ID; 3043 pfacts->VF_ID = mpi_reply.VF_ID; 3044 pfacts->MaxPostedCmdBuffers = 3045 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); 3046 3047 return 0; 3048} 3049 3050/** 3051 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 3052 * @ioc: per adapter object 3053 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3054 * 3055 * Returns 0 for success, non-zero for failure. 3056 */ 3057static int 3058_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3059{ 3060 Mpi2IOCFactsRequest_t mpi_request; 3061 Mpi2IOCFactsReply_t mpi_reply, *facts; 3062 int mpi_reply_sz, mpi_request_sz, r; 3063 3064 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3065 __func__)); 3066 3067 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 3068 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 3069 memset(&mpi_request, 0, mpi_request_sz); 3070 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; 3071 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 3072 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); 3073 3074 if (r != 0) { 3075 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n", 3076 ioc->name, __func__, r); 3077 return r; 3078 } 3079 3080 facts = &ioc->facts; 3081 memset(facts, 0, sizeof(Mpi2IOCFactsReply_t)); 3082 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); 3083 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); 3084 facts->VP_ID = mpi_reply.VP_ID; 3085 facts->VF_ID = mpi_reply.VF_ID; 3086 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); 3087 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 3088 facts->WhoInit = mpi_reply.WhoInit; 3089 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 3090 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 3091 facts->MaxReplyDescriptorPostQueueDepth = 3092 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 3093 facts->ProductID = le16_to_cpu(mpi_reply.ProductID); 3094 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 3095 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 3096 ioc->ir_firmware = 1; 3097 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 3098 facts->IOCRequestFrameSize = 3099 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 3100 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); 3101 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); 3102 ioc->shost->max_id = -1; 3103 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); 3104 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); 3105 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); 3106 facts->HighPriorityCredit = 3107 le16_to_cpu(mpi_reply.HighPriorityCredit); 3108 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; 3109 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); 3110 3111 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), " 3112 "max chains per io(%d)\n", ioc->name, facts->RequestCredit, 3113 facts->MaxChainDepth)); 3114 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), " 3115 "reply frame size(%d)\n", ioc->name, 3116 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); 3117 return 0; 3118} 3119 3120/** 3121 * _base_send_ioc_init - send ioc_init to firmware 3122 * @ioc: per adapter object 3123 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3124 * 3125 * Returns 0 for success, non-zero for failure. 3126 */ 3127static int 3128_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3129{ 3130 Mpi2IOCInitRequest_t mpi_request; 3131 Mpi2IOCInitReply_t mpi_reply; 3132 int r; 3133 struct timeval current_time; 3134 u16 ioc_status; 3135 3136 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3137 __func__)); 3138 3139 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 3140 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 3141 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 3142 mpi_request.VF_ID = 0; /* TODO */ 3143 mpi_request.VP_ID = 0; 3144 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 3145 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 3146 3147 /* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was 3148 * removed and made reserved. For those with older firmware will need 3149 * this fix. It was decided that the Reply and Request frame sizes are 3150 * the same. 3151 */ 3152 if ((ioc->facts.HeaderVersion >> 8) < 0xA) { 3153 mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz); 3154/* mpi_request.SystemReplyFrameSize = 3155 * cpu_to_le16(ioc->reply_sz); 3156 */ 3157 } 3158 3159 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 3160 mpi_request.ReplyDescriptorPostQueueDepth = 3161 cpu_to_le16(ioc->reply_post_queue_depth); 3162 mpi_request.ReplyFreeQueueDepth = 3163 cpu_to_le16(ioc->reply_free_queue_depth); 3164 3165#if BITS_PER_LONG > 32 3166 mpi_request.SenseBufferAddressHigh = 3167 cpu_to_le32(ioc->sense_dma >> 32); 3168 mpi_request.SystemReplyAddressHigh = 3169 cpu_to_le32(ioc->reply_dma >> 32); 3170 mpi_request.SystemRequestFrameBaseAddress = 3171 cpu_to_le64(ioc->request_dma); 3172 mpi_request.ReplyFreeQueueAddress = 3173 cpu_to_le64(ioc->reply_free_dma); 3174 mpi_request.ReplyDescriptorPostQueueAddress = 3175 cpu_to_le64(ioc->reply_post_free_dma); 3176#else 3177 mpi_request.SystemRequestFrameBaseAddress = 3178 cpu_to_le32(ioc->request_dma); 3179 mpi_request.ReplyFreeQueueAddress = 3180 cpu_to_le32(ioc->reply_free_dma); 3181 mpi_request.ReplyDescriptorPostQueueAddress = 3182 cpu_to_le32(ioc->reply_post_free_dma); 3183#endif 3184 3185 /* This time stamp specifies number of milliseconds 3186 * since epoch ~ midnight January 1, 1970. 3187 */ 3188 do_gettimeofday(¤t_time); 3189 mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + 3190 (current_time.tv_usec / 1000)); 3191 3192 if (ioc->logging_level & MPT_DEBUG_INIT) { 3193 u32 *mfp; 3194 int i; 3195 3196 mfp = (u32 *)&mpi_request; 3197 printk(KERN_INFO "\toffset:data\n"); 3198 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 3199 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, 3200 le32_to_cpu(mfp[i])); 3201 } 3202 3203 r = _base_handshake_req_reply_wait(ioc, 3204 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 3205 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10, 3206 sleep_flag); 3207 3208 if (r != 0) { 3209 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n", 3210 ioc->name, __func__, r); 3211 return r; 3212 } 3213 3214 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 3215 if (ioc_status != MPI2_IOCSTATUS_SUCCESS || 3216 mpi_reply.IOCLogInfo) { 3217 printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__); 3218 r = -EIO; 3219 } 3220 3221 return 0; 3222} 3223 3224/** 3225 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 3226 * @ioc: per adapter object 3227 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3228 * 3229 * Returns 0 for success, non-zero for failure. 3230 */ 3231static int 3232_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3233{ 3234 Mpi2PortEnableRequest_t *mpi_request; 3235 u32 ioc_state; 3236 unsigned long timeleft; 3237 int r = 0; 3238 u16 smid; 3239 3240 printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name); 3241 3242 if (ioc->base_cmds.status & MPT2_CMD_PENDING) { 3243 printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n", 3244 ioc->name, __func__); 3245 return -EAGAIN; 3246 } 3247 3248 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx); 3249 if (!smid) { 3250 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 3251 ioc->name, __func__); 3252 return -EAGAIN; 3253 } 3254 3255 ioc->base_cmds.status = MPT2_CMD_PENDING; 3256 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 3257 ioc->base_cmds.smid = smid; 3258 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 3259 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 3260 mpi_request->VF_ID = 0; /* TODO */ 3261 mpi_request->VP_ID = 0; 3262 3263 mpt2sas_base_put_smid_default(ioc, smid); 3264 init_completion(&ioc->base_cmds.done); 3265 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 3266 300*HZ); 3267 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 3268 printk(MPT2SAS_ERR_FMT "%s: timeout\n", 3269 ioc->name, __func__); 3270 _debug_dump_mf(mpi_request, 3271 sizeof(Mpi2PortEnableRequest_t)/4); 3272 if (ioc->base_cmds.status & MPT2_CMD_RESET) 3273 r = -EFAULT; 3274 else 3275 r = -ETIME; 3276 goto out; 3277 } else 3278 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n", 3279 ioc->name, __func__)); 3280 3281 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL, 3282 60, sleep_flag); 3283 if (ioc_state) { 3284 printk(MPT2SAS_ERR_FMT "%s: failed going to operational state " 3285 " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); 3286 r = -EFAULT; 3287 } 3288 out: 3289 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 3290 printk(MPT2SAS_INFO_FMT "port enable: %s\n", 3291 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); 3292 return r; 3293} 3294 3295/** 3296 * _base_unmask_events - turn on notification for this event 3297 * @ioc: per adapter object 3298 * @event: firmware event 3299 * 3300 * The mask is stored in ioc->event_masks. 3301 */ 3302static void 3303_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event) 3304{ 3305 u32 desired_event; 3306 3307 if (event >= 128) 3308 return; 3309 3310 desired_event = (1 << (event % 32)); 3311 3312 if (event < 32) 3313 ioc->event_masks[0] &= ~desired_event; 3314 else if (event < 64) 3315 ioc->event_masks[1] &= ~desired_event; 3316 else if (event < 96) 3317 ioc->event_masks[2] &= ~desired_event; 3318 else if (event < 128) 3319 ioc->event_masks[3] &= ~desired_event; 3320} 3321 3322/** 3323 * _base_event_notification - send event notification 3324 * @ioc: per adapter object 3325 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3326 * 3327 * Returns 0 for success, non-zero for failure. 3328 */ 3329static int 3330_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3331{ 3332 Mpi2EventNotificationRequest_t *mpi_request; 3333 unsigned long timeleft; 3334 u16 smid; 3335 int r = 0; 3336 int i; 3337 3338 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3339 __func__)); 3340 3341 if (ioc->base_cmds.status & MPT2_CMD_PENDING) { 3342 printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n", 3343 ioc->name, __func__); 3344 return -EAGAIN; 3345 } 3346 3347 smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx); 3348 if (!smid) { 3349 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 3350 ioc->name, __func__); 3351 return -EAGAIN; 3352 } 3353 ioc->base_cmds.status = MPT2_CMD_PENDING; 3354 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 3355 ioc->base_cmds.smid = smid; 3356 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); 3357 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 3358 mpi_request->VF_ID = 0; /* TODO */ 3359 mpi_request->VP_ID = 0; 3360 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3361 mpi_request->EventMasks[i] = 3362 cpu_to_le32(ioc->event_masks[i]); 3363 mpt2sas_base_put_smid_default(ioc, smid); 3364 init_completion(&ioc->base_cmds.done); 3365 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 3366 if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { 3367 printk(MPT2SAS_ERR_FMT "%s: timeout\n", 3368 ioc->name, __func__); 3369 _debug_dump_mf(mpi_request, 3370 sizeof(Mpi2EventNotificationRequest_t)/4); 3371 if (ioc->base_cmds.status & MPT2_CMD_RESET) 3372 r = -EFAULT; 3373 else 3374 r = -ETIME; 3375 } else 3376 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n", 3377 ioc->name, __func__)); 3378 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 3379 return r; 3380} 3381 3382/** 3383 * mpt2sas_base_validate_event_type - validating event types 3384 * @ioc: per adapter object 3385 * @event: firmware event 3386 * 3387 * This will turn on firmware event notification when application 3388 * ask for that event. We don't mask events that are already enabled. 3389 */ 3390void 3391mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type) 3392{ 3393 int i, j; 3394 u32 event_mask, desired_event; 3395 u8 send_update_to_fw; 3396 3397 for (i = 0, send_update_to_fw = 0; i < 3398 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { 3399 event_mask = ~event_type[i]; 3400 desired_event = 1; 3401 for (j = 0; j < 32; j++) { 3402 if (!(event_mask & desired_event) && 3403 (ioc->event_masks[i] & desired_event)) { 3404 ioc->event_masks[i] &= ~desired_event; 3405 send_update_to_fw = 1; 3406 } 3407 desired_event = (desired_event << 1); 3408 } 3409 } 3410 3411 if (!send_update_to_fw) 3412 return; 3413 3414 mutex_lock(&ioc->base_cmds.mutex); 3415 _base_event_notification(ioc, CAN_SLEEP); 3416 mutex_unlock(&ioc->base_cmds.mutex); 3417} 3418 3419/** 3420 * _base_diag_reset - the "big hammer" start of day reset 3421 * @ioc: per adapter object 3422 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3423 * 3424 * Returns 0 for success, non-zero for failure. 3425 */ 3426static int 3427_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3428{ 3429 u32 host_diagnostic; 3430 u32 ioc_state; 3431 u32 count; 3432 u32 hcb_size; 3433 3434 printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name); 3435 3436 _base_save_msix_table(ioc); 3437 3438 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n", 3439 ioc->name)); 3440 3441 count = 0; 3442 do { 3443 /* Write magic sequence to WriteSequence register 3444 * Loop until in diagnostic mode 3445 */ 3446 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "write magic " 3447 "sequence\n", ioc->name)); 3448 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 3449 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 3450 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); 3451 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); 3452 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); 3453 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); 3454 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); 3455 3456 /* wait 100 msec */ 3457 if (sleep_flag == CAN_SLEEP) 3458 msleep(100); 3459 else 3460 mdelay(100); 3461 3462 if (count++ > 20) 3463 goto out; 3464 3465 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 3466 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "wrote magic " 3467 "sequence: count(%d), host_diagnostic(0x%08x)\n", 3468 ioc->name, count, host_diagnostic)); 3469 3470 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 3471 3472 hcb_size = readl(&ioc->chip->HCBSize); 3473 3474 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "diag reset: issued\n", 3475 ioc->name)); 3476 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 3477 &ioc->chip->HostDiagnostic); 3478 3479 /* don't access any registers for 50 milliseconds */ 3480 msleep(50); 3481 3482 /* 300 second max wait */ 3483 for (count = 0; count < 3000000 ; count++) { 3484 3485 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 3486 3487 if (host_diagnostic == 0xFFFFFFFF) 3488 goto out; 3489 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) 3490 break; 3491 3492 /* wait 100 msec */ 3493 if (sleep_flag == CAN_SLEEP) 3494 msleep(1); 3495 else 3496 mdelay(1); 3497 } 3498 3499 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 3500 3501 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter " 3502 "assuming the HCB Address points to good F/W\n", 3503 ioc->name)); 3504 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 3505 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 3506 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 3507 3508 drsprintk(ioc, printk(MPT2SAS_INFO_FMT 3509 "re-enable the HCDW\n", ioc->name)); 3510 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 3511 &ioc->chip->HCBSize); 3512 } 3513 3514 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "restart the adapter\n", 3515 ioc->name)); 3516 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 3517 &ioc->chip->HostDiagnostic); 3518 3519 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "disable writes to the " 3520 "diagnostic register\n", ioc->name)); 3521 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 3522 3523 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "Wait for FW to go to the " 3524 "READY state\n", ioc->name)); 3525 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20, 3526 sleep_flag); 3527 if (ioc_state) { 3528 printk(MPT2SAS_ERR_FMT "%s: failed going to ready state " 3529 " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); 3530 goto out; 3531 } 3532 3533 _base_restore_msix_table(ioc); 3534 printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name); 3535 return 0; 3536 3537 out: 3538 printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name); 3539 return -EFAULT; 3540} 3541 3542/** 3543 * _base_make_ioc_ready - put controller in READY state 3544 * @ioc: per adapter object 3545 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3546 * @type: FORCE_BIG_HAMMER or SOFT_RESET 3547 * 3548 * Returns 0 for success, non-zero for failure. 3549 */ 3550static int 3551_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, 3552 enum reset_type type) 3553{ 3554 u32 ioc_state; 3555 int rc; 3556 3557 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3558 __func__)); 3559 3560 if (ioc->pci_error_recovery) 3561 return 0; 3562 3563 ioc_state = mpt2sas_base_get_iocstate(ioc, 0); 3564 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n", 3565 ioc->name, __func__, ioc_state)); 3566 3567 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) 3568 return 0; 3569 3570 if (ioc_state & MPI2_DOORBELL_USED) { 3571 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell " 3572 "active!\n", ioc->name)); 3573 goto issue_diag_reset; 3574 } 3575 3576 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3577 mpt2sas_base_fault_info(ioc, ioc_state & 3578 MPI2_DOORBELL_DATA_MASK); 3579 goto issue_diag_reset; 3580 } 3581 3582 if (type == FORCE_BIG_HAMMER) 3583 goto issue_diag_reset; 3584 3585 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 3586 if (!(_base_send_ioc_reset(ioc, 3587 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) { 3588 ioc->ioc_reset_count++; 3589 return 0; 3590 } 3591 3592 issue_diag_reset: 3593 rc = _base_diag_reset(ioc, CAN_SLEEP); 3594 ioc->ioc_reset_count++; 3595 return rc; 3596} 3597 3598/** 3599 * _base_make_ioc_operational - put controller in OPERATIONAL state 3600 * @ioc: per adapter object 3601 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3602 * 3603 * Returns 0 for success, non-zero for failure. 3604 */ 3605static int 3606_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3607{ 3608 int r, i; 3609 unsigned long flags; 3610 u32 reply_address; 3611 u16 smid; 3612 struct _tr_list *delayed_tr, *delayed_tr_next; 3613 3614 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3615 __func__)); 3616 3617 /* clean the delayed target reset list */ 3618 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 3619 &ioc->delayed_tr_list, list) { 3620 list_del(&delayed_tr->list); 3621 kfree(delayed_tr); 3622 } 3623 3624 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 3625 &ioc->delayed_tr_volume_list, list) { 3626 list_del(&delayed_tr->list); 3627 kfree(delayed_tr); 3628 } 3629 3630 /* initialize the scsi lookup free list */ 3631 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3632 INIT_LIST_HEAD(&ioc->free_list); 3633 smid = 1; 3634 for (i = 0; i < ioc->scsiio_depth; i++, smid++) { 3635 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list); 3636 ioc->scsi_lookup[i].cb_idx = 0xFF; 3637 ioc->scsi_lookup[i].smid = smid; 3638 ioc->scsi_lookup[i].scmd = NULL; 3639 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 3640 &ioc->free_list); 3641 } 3642 3643 /* hi-priority queue */ 3644 INIT_LIST_HEAD(&ioc->hpr_free_list); 3645 smid = ioc->hi_priority_smid; 3646 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { 3647 ioc->hpr_lookup[i].cb_idx = 0xFF; 3648 ioc->hpr_lookup[i].smid = smid; 3649 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 3650 &ioc->hpr_free_list); 3651 } 3652 3653 /* internal queue */ 3654 INIT_LIST_HEAD(&ioc->internal_free_list); 3655 smid = ioc->internal_smid; 3656 for (i = 0; i < ioc->internal_depth; i++, smid++) { 3657 ioc->internal_lookup[i].cb_idx = 0xFF; 3658 ioc->internal_lookup[i].smid = smid; 3659 list_add_tail(&ioc->internal_lookup[i].tracker_list, 3660 &ioc->internal_free_list); 3661 } 3662 3663 /* chain pool */ 3664 INIT_LIST_HEAD(&ioc->free_chain_list); 3665 for (i = 0; i < ioc->chain_depth; i++) 3666 list_add_tail(&ioc->chain_lookup[i].tracker_list, 3667 &ioc->free_chain_list); 3668 3669 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3670 3671 /* initialize Reply Free Queue */ 3672 for (i = 0, reply_address = (u32)ioc->reply_dma ; 3673 i < ioc->reply_free_queue_depth ; i++, reply_address += 3674 ioc->reply_sz) 3675 ioc->reply_free[i] = cpu_to_le32(reply_address); 3676 3677 /* initialize Reply Post Free Queue */ 3678 for (i = 0; i < ioc->reply_post_queue_depth; i++) 3679 ioc->reply_post_free[i].Words = ULLONG_MAX; 3680 3681 r = _base_send_ioc_init(ioc, sleep_flag); 3682 if (r) 3683 return r; 3684 3685 /* initialize the index's */ 3686 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 3687 ioc->reply_post_host_index = 0; 3688 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 3689 writel(0, &ioc->chip->ReplyPostHostIndex); 3690 3691 _base_unmask_interrupts(ioc); 3692 r = _base_event_notification(ioc, sleep_flag); 3693 if (r) 3694 return r; 3695 3696 if (sleep_flag == CAN_SLEEP) 3697 _base_static_config_pages(ioc); 3698 3699 if (ioc->wait_for_port_enable_to_complete) { 3700 if (diag_buffer_enable != 0) 3701 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); 3702 if (disable_discovery > 0) 3703 return r; 3704 } 3705 3706 r = _base_send_port_enable(ioc, sleep_flag); 3707 if (r) 3708 return r; 3709 3710 return r; 3711} 3712 3713/** 3714 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap) 3715 * @ioc: per adapter object 3716 * 3717 * Return nothing. 3718 */ 3719void 3720mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) 3721{ 3722 struct pci_dev *pdev = ioc->pdev; 3723 3724 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3725 __func__)); 3726 3727 _base_mask_interrupts(ioc); 3728 ioc->shost_recovery = 1; 3729 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 3730 ioc->shost_recovery = 0; 3731 if (ioc->pci_irq) { 3732 synchronize_irq(pdev->irq); 3733 free_irq(ioc->pci_irq, ioc); 3734 } 3735 _base_disable_msix(ioc); 3736 if (ioc->chip_phys) 3737 iounmap(ioc->chip); 3738 ioc->pci_irq = -1; 3739 ioc->chip_phys = 0; 3740 pci_release_selected_regions(ioc->pdev, ioc->bars); 3741 pci_disable_pcie_error_reporting(pdev); 3742 pci_disable_device(pdev); 3743 return; 3744} 3745 3746/** 3747 * mpt2sas_base_attach - attach controller instance 3748 * @ioc: per adapter object 3749 * 3750 * Returns 0 for success, non-zero for failure. 3751 */ 3752int 3753mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) 3754{ 3755 int r, i; 3756 3757 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3758 __func__)); 3759 3760 r = mpt2sas_base_map_resources(ioc); 3761 if (r) 3762 return r; 3763 3764 pci_set_drvdata(ioc->pdev, ioc->shost); 3765 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 3766 if (r) 3767 goto out_free_resources; 3768 3769 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 3770 if (r) 3771 goto out_free_resources; 3772 3773 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 3774 sizeof(Mpi2PortFactsReply_t), GFP_KERNEL); 3775 if (!ioc->pfacts) { 3776 r = -ENOMEM; 3777 goto out_free_resources; 3778 } 3779 3780 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 3781 r = _base_get_port_facts(ioc, i, CAN_SLEEP); 3782 if (r) 3783 goto out_free_resources; 3784 } 3785 3786 r = _base_allocate_memory_pools(ioc, CAN_SLEEP); 3787 if (r) 3788 goto out_free_resources; 3789 3790 init_waitqueue_head(&ioc->reset_wq); 3791 3792 /* allocate memory pd handle bitmask list */ 3793 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 3794 if (ioc->facts.MaxDevHandle % 8) 3795 ioc->pd_handles_sz++; 3796 ioc->pd_handles = kzalloc(ioc->pd_handles_sz, 3797 GFP_KERNEL); 3798 if (!ioc->pd_handles) { 3799 r = -ENOMEM; 3800 goto out_free_resources; 3801 } 3802 3803 ioc->fwfault_debug = mpt2sas_fwfault_debug; 3804 3805 /* base internal command bits */ 3806 mutex_init(&ioc->base_cmds.mutex); 3807 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3808 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 3809 3810 /* transport internal command bits */ 3811 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3812 ioc->transport_cmds.status = MPT2_CMD_NOT_USED; 3813 mutex_init(&ioc->transport_cmds.mutex); 3814 3815 /* scsih internal command bits */ 3816 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3817 ioc->scsih_cmds.status = MPT2_CMD_NOT_USED; 3818 mutex_init(&ioc->scsih_cmds.mutex); 3819 3820 /* task management internal command bits */ 3821 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3822 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 3823 mutex_init(&ioc->tm_cmds.mutex); 3824 3825 /* config page internal command bits */ 3826 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3827 ioc->config_cmds.status = MPT2_CMD_NOT_USED; 3828 mutex_init(&ioc->config_cmds.mutex); 3829 3830 /* ctl module internal command bits */ 3831 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3832 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 3833 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 3834 mutex_init(&ioc->ctl_cmds.mutex); 3835 3836 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || 3837 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || 3838 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply || 3839 !ioc->ctl_cmds.sense) { 3840 r = -ENOMEM; 3841 goto out_free_resources; 3842 } 3843 3844 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || 3845 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || 3846 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) { 3847 r = -ENOMEM; 3848 goto out_free_resources; 3849 } 3850 3851 init_completion(&ioc->shost_recovery_done); 3852 3853 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3854 ioc->event_masks[i] = -1; 3855 3856 /* here we enable the events we care about */ 3857 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); 3858 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 3859 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3860 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 3861 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 3862 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 3863 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); 3864 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); 3865 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 3866 _base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL); 3867 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 3868 r = _base_make_ioc_operational(ioc, CAN_SLEEP); 3869 if (r) 3870 goto out_free_resources; 3871 3872 if (missing_delay[0] != -1 && missing_delay[1] != -1) 3873 _base_update_missing_delay(ioc, missing_delay[0], 3874 missing_delay[1]); 3875 3876 mpt2sas_base_start_watchdog(ioc); 3877 return 0; 3878 3879 out_free_resources: 3880 3881 ioc->remove_host = 1; 3882 mpt2sas_base_free_resources(ioc); 3883 _base_release_memory_pools(ioc); 3884 pci_set_drvdata(ioc->pdev, NULL); 3885 kfree(ioc->pd_handles); 3886 kfree(ioc->tm_cmds.reply); 3887 kfree(ioc->transport_cmds.reply); 3888 kfree(ioc->scsih_cmds.reply); 3889 kfree(ioc->config_cmds.reply); 3890 kfree(ioc->base_cmds.reply); 3891 kfree(ioc->ctl_cmds.reply); 3892 kfree(ioc->ctl_cmds.sense); 3893 kfree(ioc->pfacts); 3894 ioc->ctl_cmds.reply = NULL; 3895 ioc->base_cmds.reply = NULL; 3896 ioc->tm_cmds.reply = NULL; 3897 ioc->scsih_cmds.reply = NULL; 3898 ioc->transport_cmds.reply = NULL; 3899 ioc->config_cmds.reply = NULL; 3900 ioc->pfacts = NULL; 3901 return r; 3902} 3903 3904 3905/** 3906 * mpt2sas_base_detach - remove controller instance 3907 * @ioc: per adapter object 3908 * 3909 * Return nothing. 3910 */ 3911void 3912mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) 3913{ 3914 3915 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3916 __func__)); 3917 3918 mpt2sas_base_stop_watchdog(ioc); 3919 mpt2sas_base_free_resources(ioc); 3920 _base_release_memory_pools(ioc); 3921 pci_set_drvdata(ioc->pdev, NULL); 3922 kfree(ioc->pd_handles); 3923 kfree(ioc->pfacts); 3924 kfree(ioc->ctl_cmds.reply); 3925 kfree(ioc->ctl_cmds.sense); 3926 kfree(ioc->base_cmds.reply); 3927 kfree(ioc->tm_cmds.reply); 3928 kfree(ioc->transport_cmds.reply); 3929 kfree(ioc->scsih_cmds.reply); 3930 kfree(ioc->config_cmds.reply); 3931} 3932 3933/** 3934 * _base_reset_handler - reset callback handler (for base) 3935 * @ioc: per adapter object 3936 * @reset_phase: phase 3937 * 3938 * The handler for doing any required cleanup or initialization. 3939 * 3940 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, 3941 * MPT2_IOC_DONE_RESET 3942 * 3943 * Return nothing. 3944 */ 3945static void 3946_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) 3947{ 3948 switch (reset_phase) { 3949 case MPT2_IOC_PRE_RESET: 3950 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 3951 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); 3952 break; 3953 case MPT2_IOC_AFTER_RESET: 3954 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 3955 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); 3956 if (ioc->transport_cmds.status & MPT2_CMD_PENDING) { 3957 ioc->transport_cmds.status |= MPT2_CMD_RESET; 3958 mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid); 3959 complete(&ioc->transport_cmds.done); 3960 } 3961 if (ioc->base_cmds.status & MPT2_CMD_PENDING) { 3962 ioc->base_cmds.status |= MPT2_CMD_RESET; 3963 mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid); 3964 complete(&ioc->base_cmds.done); 3965 } 3966 if (ioc->config_cmds.status & MPT2_CMD_PENDING) { 3967 ioc->config_cmds.status |= MPT2_CMD_RESET; 3968 mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); 3969 ioc->config_cmds.smid = USHRT_MAX; 3970 complete(&ioc->config_cmds.done); 3971 } 3972 break; 3973 case MPT2_IOC_DONE_RESET: 3974 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 3975 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 3976 break; 3977 } 3978 mpt2sas_scsih_reset_handler(ioc, reset_phase); 3979 mpt2sas_ctl_reset_handler(ioc, reset_phase); 3980} 3981 3982/** 3983 * _wait_for_commands_to_complete - reset controller 3984 * @ioc: Pointer to MPT_ADAPTER structure 3985 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3986 * 3987 * This function waiting(3s) for all pending commands to complete 3988 * prior to putting controller in reset. 3989 */ 3990static void 3991_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3992{ 3993 u32 ioc_state; 3994 unsigned long flags; 3995 u16 i; 3996 3997 ioc->pending_io_count = 0; 3998 if (sleep_flag != CAN_SLEEP) 3999 return; 4000 4001 ioc_state = mpt2sas_base_get_iocstate(ioc, 0); 4002 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) 4003 return; 4004 4005 /* pending command count */ 4006 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4007 for (i = 0; i < ioc->scsiio_depth; i++) 4008 if (ioc->scsi_lookup[i].cb_idx != 0xFF) 4009 ioc->pending_io_count++; 4010 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4011 4012 if (!ioc->pending_io_count) 4013 return; 4014 4015 /* wait for pending commands to complete */ 4016 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); 4017} 4018 4019/** 4020 * mpt2sas_base_hard_reset_handler - reset controller 4021 * @ioc: Pointer to MPT_ADAPTER structure 4022 * @sleep_flag: CAN_SLEEP or NO_SLEEP 4023 * @type: FORCE_BIG_HAMMER or SOFT_RESET 4024 * 4025 * Returns 0 for success, non-zero for failure. 4026 */ 4027int 4028mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, 4029 enum reset_type type) 4030{ 4031 int r; 4032 unsigned long flags; 4033 4034 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 4035 __func__)); 4036 4037 if (ioc->pci_error_recovery) { 4038 printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n", 4039 ioc->name, __func__); 4040 r = 0; 4041 goto out; 4042 } 4043 4044 if (mpt2sas_fwfault_debug) 4045 mpt2sas_halt_firmware(ioc); 4046 4047 /* TODO - What we really should be doing is pulling 4048 * out all the code associated with NO_SLEEP; its never used. 4049 * That is legacy code from mpt fusion driver, ported over. 4050 * I will leave this BUG_ON here for now till its been resolved. 4051 */ 4052 BUG_ON(sleep_flag == NO_SLEEP); 4053 4054 /* wait for an active reset in progress to complete */ 4055 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { 4056 do { 4057 ssleep(1); 4058 } while (ioc->shost_recovery == 1); 4059 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name, 4060 __func__)); 4061 return ioc->ioc_reset_in_progress_status; 4062 } 4063 4064 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 4065 ioc->shost_recovery = 1; 4066 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 4067 4068 _base_reset_handler(ioc, MPT2_IOC_PRE_RESET); 4069 _wait_for_commands_to_complete(ioc, sleep_flag); 4070 _base_mask_interrupts(ioc); 4071 r = _base_make_ioc_ready(ioc, sleep_flag, type); 4072 if (r) 4073 goto out; 4074 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); 4075 r = _base_make_ioc_operational(ioc, sleep_flag); 4076 if (!r) 4077 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); 4078 out: 4079 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %s\n", 4080 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 4081 4082 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 4083 ioc->ioc_reset_in_progress_status = r; 4084 ioc->shost_recovery = 0; 4085 complete(&ioc->shost_recovery_done); 4086 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 4087 mutex_unlock(&ioc->reset_in_progress_mutex); 4088 4089 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name, 4090 __func__)); 4091 return r; 4092} 4093