Lines Matching refs:ioa_cfg

550 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
552 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
556 if (ipr_cmd->ioa_cfg->sis64)
588 if (ipr_cmd->ioa_cfg->sis64) {
624 * @ioa_cfg: ioa config struct
630 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
634 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
643 * @ioa_cfg: ioa config struct
652 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
658 ioa_cfg->allow_interrupts = 0;
661 if (ioa_cfg->sis64)
662 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
664 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
667 if (ioa_cfg->sis64)
668 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
669 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
670 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
675 * @ioa_cfg: ioa config struct
680 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
682 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
687 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
688 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
689 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
693 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
699 * @ioa_cfg: ioa config struct
704 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
706 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
709 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
710 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
711 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
731 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
737 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
753 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
760 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
765 * @ioa_cfg: ioa config struct
772 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
777 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
812 if (ioa_cfg->sis64) {
820 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
822 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
842 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
844 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
898 if (ipr_cmd->ioa_cfg->sis64) {
935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
940 spin_unlock_irq(ioa_cfg->host->host_lock);
942 spin_lock_irq(ioa_cfg->host->host_lock);
947 * @ioa_cfg: ioa config struct
958 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
964 if (ioa_cfg->allow_cmds) {
965 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
966 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
967 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
991 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1033 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1044 if (ioa_cfg->sis64) {
1059 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1067 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1068 ioa_cfg->max_devs_supported);
1069 set_bit(res->target, ioa_cfg->target_ids);
1076 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1077 ioa_cfg->max_devs_supported);
1078 set_bit(res->target, ioa_cfg->array_ids);
1081 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1082 ioa_cfg->max_devs_supported);
1083 set_bit(res->target, ioa_cfg->vset_ids);
1085 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1086 ioa_cfg->max_devs_supported);
1087 set_bit(res->target, ioa_cfg->target_ids);
1118 if (res->ioa_cfg->sis64) {
1171 if (res->ioa_cfg->sis64) {
1228 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1230 if (!ioa_cfg->sis64)
1234 clear_bit(res->target, ioa_cfg->array_ids);
1236 clear_bit(res->target, ioa_cfg->vset_ids);
1238 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1241 clear_bit(res->target, ioa_cfg->target_ids);
1244 clear_bit(res->target, ioa_cfg->target_ids);
1249 * @ioa_cfg: ioa config struct
1255 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1264 if (ioa_cfg->sis64) {
1272 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1280 if (list_empty(&ioa_cfg->free_res_q)) {
1281 ipr_send_hcam(ioa_cfg,
1287 res = list_entry(ioa_cfg->free_res_q.next,
1292 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1301 if (ioa_cfg->allow_ml_add_del)
1302 schedule_work(&ioa_cfg->work_q);
1305 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1309 if (ioa_cfg->allow_ml_add_del)
1310 schedule_work(&ioa_cfg->work_q);
1313 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1333 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1337 dev_err(&ioa_cfg->pdev->dev,
1340 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1342 ipr_handle_config_change(ioa_cfg, hostrcb);
1449 * @ioa_cfg: ioa config struct
1455 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1460 if (ioa_cfg->sis64)
1485 * @ioa_cfg: ioa config struct
1491 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1517 * @ioa_cfg: ioa config struct
1523 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1541 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1557 * @ioa_cfg: ioa config struct
1563 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1600 * @ioa_cfg: ioa config struct
1606 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1624 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1647 * @ioa_cfg: ioa config struct
1653 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1667 ioa_cfg->host->host_no,
1688 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1689 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1698 * @ioa_cfg: ioa config struct
1704 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1718 ioa_cfg->host->host_no,
1738 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1739 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1753 * @ioa_cfg: ioa config struct
1760 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1767 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1781 * @ioa_cfg: ioa config struct
1787 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1792 if (ioa_cfg->sis64)
1803 ipr_log_hex_data(ioa_cfg, error->data,
1811 * @ioa_cfg: ioa config struct
1817 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1829 ipr_log_hex_data(ioa_cfg, error->data,
2094 * @ioa_cfg: ioa config struct
2100 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2126 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2131 * @ioa_cfg: ioa config struct
2137 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2185 * @ioa_cfg: ioa config struct
2191 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2218 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2223 * @ioa_cfg: ioa config struct
2229 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2232 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2260 * @ioa_cfg: ioa config struct
2268 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2278 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2280 if (ioa_cfg->sis64)
2285 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2288 scsi_report_bus_reset(ioa_cfg->host,
2300 ioa_cfg->errors_logged++;
2302 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2309 ipr_log_cache_error(ioa_cfg, hostrcb);
2312 ipr_log_config_error(ioa_cfg, hostrcb);
2316 ipr_log_array_error(ioa_cfg, hostrcb);
2319 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2322 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2325 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2329 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2332 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2335 ipr_log_fabric_error(ioa_cfg, hostrcb);
2338 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2342 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2345 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2350 ipr_log_generic_error(ioa_cfg, hostrcb);
2368 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2373 if (ioa_cfg->sis64)
2379 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2382 ipr_handle_log_data(ioa_cfg, hostrcb);
2384 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2386 dev_err(&ioa_cfg->pdev->dev,
2390 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2406 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2411 ioa_cfg->errors_logged++;
2412 dev_err(&ioa_cfg->pdev->dev,
2415 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2416 ioa_cfg->sdt_state = GET_DUMP;
2418 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2419 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2438 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443 ioa_cfg->errors_logged++;
2444 dev_err(&ioa_cfg->pdev->dev,
2447 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2448 ioa_cfg->sdt_state = GET_DUMP;
2450 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2452 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2453 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2462 * @ioa_cfg: ioa config struct
2470 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2473 if (!ioa_cfg->in_reset_reload)
2474 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2476 spin_unlock_irq(ioa_cfg->host->host_lock);
2477 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2478 spin_lock_irq(ioa_cfg->host->host_lock);
2482 if (ioa_cfg->ioa_is_dead) {
2525 * @ioa_cfg: ioa config struct
2535 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2542 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2560 * @ioa_cfg: ioa config struct
2568 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2575 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2593 * @ioa_cfg: ioa config struct
2601 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2608 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2609 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2618 * @ioa_cfg: ioa config struct
2626 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2633 if (ioa_cfg->sis64)
2634 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2639 ioa_cfg->regs.set_uproc_interrupt_reg32);
2642 if (ipr_wait_iodbg_ack(ioa_cfg,
2644 dev_err(&ioa_cfg->pdev->dev,
2651 ioa_cfg->regs.clr_interrupt_reg);
2654 writel(start_addr, ioa_cfg->ioa_mailbox);
2658 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2662 if (ipr_wait_iodbg_ack(ioa_cfg,
2664 dev_err(&ioa_cfg->pdev->dev,
2670 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2677 ioa_cfg->regs.clr_interrupt_reg);
2683 ioa_cfg->regs.set_uproc_interrupt_reg32);
2686 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2690 ioa_cfg->regs.clr_interrupt_reg);
2695 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2710 * @ioa_cfg: ioa config struct
2719 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2726 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2728 if (ioa_cfg->sis64)
2754 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2755 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2758 rc = ipr_get_ldump_data_section(ioa_cfg,
2763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2795 * @ioa_cfg: ioa config struct
2801 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2804 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2812 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2821 * @ioa_cfg: ioa config struct
2827 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2842 * @ioa_cfg: ioa config struct
2848 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2857 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2863 * @ioa_cfg: ioa config struct
2869 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2878 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2884 * @ioa_cfg: ioa config struct
2890 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2904 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2906 if (ioa_cfg->sdt_state != READ_DUMP) {
2907 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2911 if (ioa_cfg->sis64) {
2912 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2914 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2917 start_addr = readl(ioa_cfg->ioa_mailbox);
2919 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2920 dev_err(&ioa_cfg->pdev->dev,
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2926 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2938 ipr_dump_version_data(ioa_cfg, driver_dump);
2939 ipr_dump_location_data(ioa_cfg, driver_dump);
2940 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2941 ipr_dump_trace_data(ioa_cfg, driver_dump);
2958 if (ioa_cfg->sis64) {
2968 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2974 dev_err(&ioa_cfg->pdev->dev,
2978 ioa_cfg->sdt_state = DUMP_OBTAINED;
2979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2990 if (ioa_cfg->sis64)
2995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3005 if (ioa_cfg->sis64)
3023 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3036 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3041 ioa_cfg->sdt_state = DUMP_OBTAINED;
3046 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3059 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3065 ioa_cfg->dump = NULL;
3066 ioa_cfg->sdt_state = INACTIVE;
3067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3094 struct ipr_ioa_cfg *ioa_cfg =
3100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3102 if (ioa_cfg->sdt_state == READ_DUMP) {
3103 dump = ioa_cfg->dump;
3105 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3110 ipr_get_ioa_dump(ioa_cfg, dump);
3113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3114 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3115 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3123 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3128 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3134 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3140 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3147 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3154 scsi_add_device(ioa_cfg->host, bus, target, lun);
3155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3161 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3184 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3191 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3218 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3219 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3228 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3252 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3256 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3258 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3275 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3278 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3279 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3310 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3318 while(ioa_cfg->in_reset_reload) {
3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3320 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3321 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3324 ioa_cfg->errors_logged = 0;
3325 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3327 if (ioa_cfg->in_reset_reload) {
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3334 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3339 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3366 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3370 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3371 if (ioa_cfg->ioa_is_dead)
3375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3403 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3404 ioa_cfg->ioa_is_dead = 0;
3405 ioa_cfg->reset_retries = 0;
3406 ioa_cfg->in_ioa_bringdown = 0;
3407 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3410 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3447 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3448 if (!ioa_cfg->in_reset_reload)
3449 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3451 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3671 * @ioa_cfg: ioa config struct
3679 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3685 while(ioa_cfg->in_reset_reload) {
3686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3687 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3688 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3691 if (ioa_cfg->ucode_sglist) {
3692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3693 dev_err(&ioa_cfg->pdev->dev,
3698 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3702 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3703 dev_err(&ioa_cfg->pdev->dev,
3708 ioa_cfg->ucode_sglist = sglist;
3709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3711 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3713 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3714 ioa_cfg->ucode_sglist = NULL;
3715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3735 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3749 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3750 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3761 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3769 dev_err(&ioa_cfg->pdev->dev,
3776 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3806 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3810 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3811 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3854 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3864 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3865 dump = ioa_cfg->dump;
3867 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3868 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3872 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3898 if (ioa_cfg->sis64)
3939 * @ioa_cfg: ioa config struct
3944 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3957 if (ioa_cfg->sis64)
3971 dump->ioa_cfg = ioa_cfg;
3973 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3975 if (INACTIVE != ioa_cfg->sdt_state) {
3976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3982 ioa_cfg->dump = dump;
3983 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3984 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3985 ioa_cfg->dump_taken = 1;
3986 schedule_work(&ioa_cfg->work_q);
3988 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3995 * @ioa_cfg: ioa config struct
4000 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4007 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4008 dump = ioa_cfg->dump;
4010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4014 ioa_cfg->dump = NULL;
4015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4041 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4048 rc = ipr_alloc_dump(ioa_cfg);
4050 rc = ipr_free_dump(ioa_cfg);
4070 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4085 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4113 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4117 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4153 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4158 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4187 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4195 if (res && ioa_cfg->sis64)
4200 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4227 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4232 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4234 if (res && ioa_cfg->sis64)
4239 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4263 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4342 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4367 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4373 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4383 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386 sata_port->ioa_cfg = ioa_cfg;
4398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4415 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4417 if (ioa_cfg->sis64) {
4420 clear_bit(starget->id, ioa_cfg->array_ids);
4422 clear_bit(starget->id, ioa_cfg->vset_ids);
4424 clear_bit(starget->id, ioa_cfg->target_ids);
4444 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4447 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4467 struct ipr_ioa_cfg *ioa_cfg;
4470 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4495 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4524 if (ioa_cfg->sis64)
4530 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4586 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4598 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4603 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4617 struct ipr_ioa_cfg *ioa_cfg;
4621 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4623 if (!ioa_cfg->in_reset_reload) {
4624 dev_err(&ioa_cfg->pdev->dev,
4627 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4628 ioa_cfg->sdt_state = GET_DUMP;
4631 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4650 * @ioa_cfg: ioa config struct
4662 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4672 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4676 if (ipr_cmd->ioa_cfg->sis64) {
4693 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4695 if (ipr_cmd->ioa_cfg->sis64)
4721 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4727 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4728 while(ioa_cfg->in_reset_reload) {
4729 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4730 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4731 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4736 rc = ipr_device_reset(ioa_cfg, res);
4740 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4759 struct ipr_ioa_cfg *ioa_cfg;
4765 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4776 if (ioa_cfg->in_reset_reload)
4778 if (ioa_cfg->ioa_is_dead)
4781 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4803 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4810 rc = ipr_device_reset(ioa_cfg, res);
4839 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4843 if (!ioa_cfg->sis64)
4844 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4846 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4860 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4878 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4883 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4884 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4890 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4916 struct ipr_ioa_cfg *ioa_cfg;
4923 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4930 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4940 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4945 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4956 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4977 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5009 * @ioa_cfg: ioa config struct
5015 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5021 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5028 if (ioa_cfg->sis64) {
5029 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5030 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5034 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5035 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5036 list_del(&ioa_cfg->reset_cmd->queue);
5037 del_timer(&ioa_cfg->reset_cmd->timer);
5038 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5048 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5051 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5052 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5054 list_del(&ioa_cfg->reset_cmd->queue);
5055 del_timer(&ioa_cfg->reset_cmd->timer);
5056 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5058 if (ioa_cfg->clear_isr) {
5060 dev_err(&ioa_cfg->pdev->dev,
5062 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5063 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5068 ioa_cfg->ioa_unit_checked = 1;
5070 dev_err(&ioa_cfg->pdev->dev,
5073 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5074 ioa_cfg->sdt_state = GET_DUMP;
5076 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5077 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5085 * @ioa_cfg: ioa config struct
5091 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5093 ioa_cfg->errors_logged++;
5094 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5096 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5097 ioa_cfg->sdt_state = GET_DUMP;
5099 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5112 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5122 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5125 if (!ioa_cfg->allow_interrupts) {
5126 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5133 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5134 ioa_cfg->toggle_bit) {
5136 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5140 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5145 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5157 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5158 ioa_cfg->hrrq_curr++;
5160 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5161 ioa_cfg->toggle_bit ^= 1u;
5165 if (ipr_cmd && !ioa_cfg->clear_isr)
5172 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5173 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5178 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5182 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5190 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5198 * @ioa_cfg: ioa config struct
5204 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5222 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5250 * @ioa_cfg: ioa config struct
5256 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5273 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5352 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5370 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5395 if (ipr_cmd->ioa_cfg->sis64)
5478 * @ioa_cfg: ioa config struct
5489 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5505 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5513 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5525 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5528 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5530 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5653 if (ipr_cmd->ioa_cfg->sis64)
5666 * @ioa_cfg: ioa config struct
5675 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5691 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5725 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5754 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5770 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5778 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5781 ipr_erp_start(ioa_cfg, ipr_cmd);
5799 struct ipr_ioa_cfg *ioa_cfg;
5806 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5815 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5822 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5832 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5834 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5863 if (ioa_cfg->sis64)
5864 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5866 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5870 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5913 struct ipr_ioa_cfg *ioa_cfg;
5916 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5919 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5963 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5967 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5968 while(ioa_cfg->in_reset_reload) {
5969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5970 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5971 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5974 if (!ioa_cfg->allow_cmds)
5977 rc = ipr_device_reset(ioa_cfg, res);
5989 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6003 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6007 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6008 while(ioa_cfg->in_reset_reload) {
6009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6010 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6011 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6014 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
6016 ipr_device_reset(ioa_cfg, sata_port->res);
6020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6061 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6067 if (ipr_cmd->ioa_cfg->sis64)
6073 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6076 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6082 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6189 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6194 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6197 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6200 if (ioa_cfg->sis64) {
6209 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6218 if (ioa_cfg->sis64)
6319 * @ioa_cfg: ioa cfg struct
6328 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6332 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6341 #define ipr_invalid_adapter(ioa_cfg) 0
6356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6359 ioa_cfg->in_reset_reload = 0;
6360 ioa_cfg->reset_retries = 0;
6361 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6362 wake_up_all(&ioa_cfg->reset_wait_q);
6364 spin_unlock_irq(ioa_cfg->host->host_lock);
6365 scsi_unblock_requests(ioa_cfg->host);
6366 spin_lock_irq(ioa_cfg->host->host_lock);
6385 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6391 ioa_cfg->in_reset_reload = 0;
6392 ioa_cfg->allow_cmds = 1;
6393 ioa_cfg->reset_cmd = NULL;
6394 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6396 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6397 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6402 schedule_work(&ioa_cfg->work_q);
6404 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6407 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6409 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6412 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6413 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6415 ioa_cfg->reset_retries = 0;
6416 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6417 wake_up_all(&ioa_cfg->reset_wait_q);
6419 spin_unlock(ioa_cfg->host->host_lock);
6420 scsi_unblock_requests(ioa_cfg->host);
6421 spin_lock(ioa_cfg->host->host_lock);
6423 if (!ioa_cfg->allow_cmds)
6424 scsi_block_requests(ioa_cfg->host);
6460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6461 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6467 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6484 ioa_cfg->vpd_cbs_dma +
6492 if (!ioa_cfg->sis64)
6541 * @ioa_cfg: ioa config struct
6549 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6566 dev_err(&ioa_cfg->pdev->dev,
6577 * @ioa_cfg: ioa config struct
6586 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6592 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6593 ioa_cfg->bus_attr[i].bus_width);
6595 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6596 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6602 * @ioa_cfg: ioa config struct
6610 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6628 dev_err(&ioa_cfg->pdev->dev,
6634 bus_attr = &ioa_cfg->bus_attr[i];
6685 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6686 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6690 ipr_scsi_bus_speed_limit(ioa_cfg);
6691 ipr_check_term_power(ioa_cfg, mode_pages);
6692 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6697 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6701 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6746 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6749 dev_err(&ioa_cfg->pdev->dev,
6753 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6754 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6770 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6775 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6795 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6799 0x28, ioa_cfg->vpd_cbs_dma +
6823 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6824 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6839 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6883 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6887 0x24, ioa_cfg->vpd_cbs_dma +
6914 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6921 if (ioa_cfg->sis64)
6922 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6924 flag = ioa_cfg->u.cfg_table->hdr.flags;
6927 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6929 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6932 if (ioa_cfg->sis64)
6933 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6935 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6938 if (ioa_cfg->sis64)
6939 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6941 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6946 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6953 if (list_empty(&ioa_cfg->free_res_q)) {
6954 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6959 res = list_entry(ioa_cfg->free_res_q.next,
6961 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6975 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6981 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6984 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7005 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7007 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7008 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7012 ioa_cfg->dual_raid = 1;
7013 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7020 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7021 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7022 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7024 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7097 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7098 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7099 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7107 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7135 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7154 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7160 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7162 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7167 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7185 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7191 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7210 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7214 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7220 if (ioa_cfg->sis64)
7223 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7225 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7227 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7229 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7235 if (ioa_cfg->sis64) {
7237 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7239 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7241 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7243 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7274 if (ioa_cfg->reset_cmd == ipr_cmd) {
7279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7299 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7309 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7310 * @ioa_cfg: ioa cfg struct
7315 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7317 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7320 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7321 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7322 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7323 ioa_cfg->toggle_bit = 1;
7326 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7341 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7344 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7359 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7360 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7361 stage_time = ioa_cfg->transop_timeout;
7364 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7369 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7370 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7380 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7397 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7403 ipr_init_ioa_mem(ioa_cfg);
7405 ioa_cfg->allow_interrupts = 1;
7406 if (ioa_cfg->sis64) {
7408 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7409 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7412 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7416 ioa_cfg->regs.clr_interrupt_mask_reg32);
7417 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7422 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7424 if (ioa_cfg->sis64) {
7427 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7429 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7431 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7433 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7435 if (ioa_cfg->sis64) {
7441 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7445 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7465 if (ioa_cfg->sdt_state == GET_DUMP)
7466 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7467 else if (ioa_cfg->sdt_state == READ_DUMP)
7468 ioa_cfg->sdt_state = ABORT_DUMP;
7470 ioa_cfg->dump_timeout = 1;
7478 * @ioa_cfg: ioa config struct
7486 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7488 ioa_cfg->errors_logged++;
7489 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7494 * @ioa_cfg: ioa config struct
7502 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7510 mailbox = readl(ioa_cfg->ioa_mailbox);
7512 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7513 ipr_unit_check_no_data(ioa_cfg);
7518 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7524 ipr_unit_check_no_data(ioa_cfg);
7536 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7541 rc = ipr_get_ldump_data_section(ioa_cfg,
7547 ipr_handle_log_data(ioa_cfg, hostrcb);
7550 ioa_cfg->sdt_state == GET_DUMP)
7551 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7553 ipr_unit_check_no_data(ioa_cfg);
7555 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7569 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7572 ioa_cfg->ioa_unit_checked = 0;
7573 ipr_get_unit_check_buffer(ioa_cfg);
7594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7598 ioa_cfg->pdev->state_saved = true;
7599 pci_restore_state(ioa_cfg->pdev);
7601 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7606 ipr_fail_all_ops(ioa_cfg);
7608 if (ioa_cfg->sis64) {
7610 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7611 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7614 if (ioa_cfg->ioa_unit_checked) {
7615 if (ioa_cfg->sis64) {
7620 ioa_cfg->ioa_unit_checked = 0;
7621 ipr_get_unit_check_buffer(ioa_cfg);
7628 if (ioa_cfg->in_ioa_bringdown) {
7633 if (GET_DUMP == ioa_cfg->sdt_state) {
7634 ioa_cfg->sdt_state = READ_DUMP;
7635 ioa_cfg->dump_timeout = 0;
7636 if (ioa_cfg->sis64)
7641 schedule_work(&ioa_cfg->work_q);
7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7664 if (ioa_cfg->cfg_locked)
7665 pci_cfg_access_unlock(ioa_cfg->pdev);
7666 ioa_cfg->cfg_locked = 0;
7683 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7687 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7689 ioa_cfg->regs.set_uproc_interrupt_reg32);
7691 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7698 if (ioa_cfg->cfg_locked)
7699 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7700 ioa_cfg->cfg_locked = 0;
7721 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7740 struct pci_dev *pdev = ioa_cfg->pdev;
7761 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7764 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
7765 ioa_cfg->cfg_locked = 1;
7766 ipr_cmd->job_step = ioa_cfg->reset;
7774 ipr_cmd->job_step = ioa_cfg->reset;
7775 dev_err(&ioa_cfg->pdev->dev,
7794 ipr_cmd->ioa_cfg->cfg_locked = 0;
7802 * @ioa_cfg: ioa config struct
7807 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7811 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7835 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7860 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7865 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7868 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7869 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7893 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7894 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7896 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7915 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7916 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7932 if (ioa_cfg->sis64)
7958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7964 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7974 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8002 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8007 if (ioa_cfg->reset_cmd != ipr_cmd) {
8012 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8030 * @ioa_cfg: ioa config struct
8042 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8048 ioa_cfg->in_reset_reload = 1;
8049 ioa_cfg->allow_cmds = 0;
8050 scsi_block_requests(ioa_cfg->host);
8052 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8053 ioa_cfg->reset_cmd = ipr_cmd;
8062 * @ioa_cfg: ioa config struct
8072 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8075 if (ioa_cfg->ioa_is_dead)
8078 if (ioa_cfg->in_reset_reload) {
8079 if (ioa_cfg->sdt_state == GET_DUMP)
8080 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8081 else if (ioa_cfg->sdt_state == READ_DUMP)
8082 ioa_cfg->sdt_state = ABORT_DUMP;
8085 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8086 dev_err(&ioa_cfg->pdev->dev,
8089 ioa_cfg->reset_retries = 0;
8090 ioa_cfg->ioa_is_dead = 1;
8092 if (ioa_cfg->in_ioa_bringdown) {
8093 ioa_cfg->reset_cmd = NULL;
8094 ioa_cfg->in_reset_reload = 0;
8095 ipr_fail_all_ops(ioa_cfg);
8096 wake_up_all(&ioa_cfg->reset_wait_q);
8098 spin_unlock_irq(ioa_cfg->host->host_lock);
8099 scsi_unblock_requests(ioa_cfg->host);
8100 spin_lock_irq(ioa_cfg->host->host_lock);
8103 ioa_cfg->in_ioa_bringdown = 1;
8108 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8123 ipr_cmd->ioa_cfg->allow_interrupts = 0;
8124 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
8140 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8142 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8143 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8158 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8160 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8161 if (ioa_cfg->needs_warm_reset)
8162 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8164 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8180 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8182 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8183 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8184 ioa_cfg->sdt_state = ABORT_DUMP;
8185 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8186 ioa_cfg->in_ioa_bringdown = 1;
8187 ioa_cfg->allow_cmds = 0;
8188 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8189 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8221 * @ioa_cfg: ioa cfg struct
8230 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8236 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8237 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8238 if (ioa_cfg->needs_hard_reset) {
8239 ioa_cfg->needs_hard_reset = 0;
8240 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8242 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8246 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8247 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8249 if (ioa_cfg->ioa_is_dead) {
8251 } else if (ipr_invalid_adapter(ioa_cfg)) {
8255 dev_err(&ioa_cfg->pdev->dev,
8259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8267 * @ioa_cfg: ioa config struct
8272 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8277 if (ioa_cfg->ipr_cmnd_list[i])
8278 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8279 ioa_cfg->ipr_cmnd_list[i],
8280 ioa_cfg->ipr_cmnd_list_dma[i]);
8282 ioa_cfg->ipr_cmnd_list[i] = NULL;
8285 if (ioa_cfg->ipr_cmd_pool)
8286 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8288 kfree(ioa_cfg->ipr_cmnd_list);
8289 kfree(ioa_cfg->ipr_cmnd_list_dma);
8290 ioa_cfg->ipr_cmnd_list = NULL;
8291 ioa_cfg->ipr_cmnd_list_dma = NULL;
8292 ioa_cfg->ipr_cmd_pool = NULL;
8297 * @ioa_cfg: ioa cfg struct
8302 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8306 kfree(ioa_cfg->res_entries);
8307 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8308 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8309 ipr_free_cmd_blks(ioa_cfg);
8310 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8311 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8312 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8313 ioa_cfg->u.cfg_table,
8314 ioa_cfg->cfg_table_dma);
8317 pci_free_consistent(ioa_cfg->pdev,
8319 ioa_cfg->hostrcb[i],
8320 ioa_cfg->hostrcb_dma[i]);
8323 ipr_free_dump(ioa_cfg);
8324 kfree(ioa_cfg->trace);
8337 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8339 struct pci_dev *pdev = ioa_cfg->pdev;
8342 free_irq(pdev->irq, ioa_cfg);
8344 iounmap(ioa_cfg->hdw_dma_regs);
8346 ipr_free_mem(ioa_cfg);
8347 scsi_host_put(ioa_cfg->host);
8354 * @ioa_cfg: ioa config struct
8359 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8366 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8369 if (!ioa_cfg->ipr_cmd_pool)
8372 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8373 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8375 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8376 ipr_free_cmd_blks(ioa_cfg);
8381 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8384 ipr_free_cmd_blks(ioa_cfg);
8389 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8390 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8394 if (ioa_cfg->sis64)
8400 if (ioa_cfg->sis64) {
8414 ipr_cmd->ioa_cfg = ioa_cfg;
8418 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8426 * @ioa_cfg: ioa config struct
8431 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8433 struct pci_dev *pdev = ioa_cfg->pdev;
8437 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8438 ioa_cfg->max_devs_supported, GFP_KERNEL);
8440 if (!ioa_cfg->res_entries)
8443 if (ioa_cfg->sis64) {
8444 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8445 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8446 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8447 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8448 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8449 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8452 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8453 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8454 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8457 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8459 &ioa_cfg->vpd_cbs_dma);
8461 if (!ioa_cfg->vpd_cbs)
8464 if (ipr_alloc_cmd_blks(ioa_cfg))
8467 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8469 &ioa_cfg->host_rrq_dma);
8471 if (!ioa_cfg->host_rrq)
8474 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8475 ioa_cfg->cfg_table_size,
8476 &ioa_cfg->cfg_table_dma);
8478 if (!ioa_cfg->u.cfg_table)
8482 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8484 &ioa_cfg->hostrcb_dma[i]);
8486 if (!ioa_cfg->hostrcb[i])
8489 ioa_cfg->hostrcb[i]->hostrcb_dma =
8490 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8491 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8492 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8495 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8498 if (!ioa_cfg->trace)
8509 ioa_cfg->hostrcb[i],
8510 ioa_cfg->hostrcb_dma[i]);
8512 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8513 ioa_cfg->u.cfg_table,
8514 ioa_cfg->cfg_table_dma);
8517 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8519 ipr_free_cmd_blks(ioa_cfg);
8522 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8524 kfree(ioa_cfg->res_entries);
8530 * @ioa_cfg: ioa config struct
8535 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8540 ioa_cfg->bus_attr[i].bus = i;
8541 ioa_cfg->bus_attr[i].qas_enabled = 0;
8542 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8544 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8546 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8552 * @ioa_cfg: ioa config struct
8559 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8566 ioa_cfg->host = host;
8567 ioa_cfg->pdev = pdev;
8568 ioa_cfg->log_level = ipr_log_level;
8569 ioa_cfg->doorbell = IPR_DOORBELL;
8570 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8571 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8572 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8573 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8574 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8575 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8576 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8577 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8579 INIT_LIST_HEAD(&ioa_cfg->free_q);
8580 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8581 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8582 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8583 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8584 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8585 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8586 init_waitqueue_head(&ioa_cfg->reset_wait_q);
8587 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8588 ioa_cfg->sdt_state = INACTIVE;
8590 ipr_initialize_bus_attr(ioa_cfg);
8591 ioa_cfg->max_devs_supported = ipr_max_devs;
8593 if (ioa_cfg->sis64) {
8597 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8602 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8607 host->can_queue = ioa_cfg->max_cmds;
8608 pci_set_drvdata(pdev, ioa_cfg);
8610 p = &ioa_cfg->chip_cfg->regs;
8611 t = &ioa_cfg->regs;
8612 base = ioa_cfg->hdw_dma_regs;
8631 if (ioa_cfg->sis64) {
8670 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8674 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8676 ioa_cfg->msi_received = 1;
8677 wake_up(&ioa_cfg->msi_wait_q);
8679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8695 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8704 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8705 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8706 ioa_cfg->msi_received = 0;
8707 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8708 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8709 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8712 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8719 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8720 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8721 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8722 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8724 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8725 if (!ioa_cfg->msi_received) {
8732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8734 free_irq(pdev->irq, ioa_cfg);
8752 struct ipr_ioa_cfg *ioa_cfg;
8768 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8776 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8777 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8778 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8781 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8783 if (!ioa_cfg->ipr_chip) {
8790 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8791 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8792 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
8793 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
8796 ioa_cfg->transop_timeout = ipr_transop_timeout;
8798 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8800 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8802 ioa_cfg->revid = pdev->revision;
8822 ioa_cfg->hdw_dma_regs = ipr_regs;
8823 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8824 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8826 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8830 if (ioa_cfg->sis64) {
8846 ioa_cfg->chip_cfg->cache_line_size);
8855 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8856 rc = ipr_test_msi(ioa_cfg, pdev);
8875 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8878 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8881 if (ioa_cfg->sis64)
8882 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8884 * ioa_cfg->max_devs_supported)));
8886 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8888 * ioa_cfg->max_devs_supported)));
8890 rc = ipr_alloc_mem(ioa_cfg);
8901 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8902 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8903 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8905 ioa_cfg->needs_hard_reset = 1;
8907 ioa_cfg->needs_hard_reset = 1;
8909 ioa_cfg->ioa_unit_checked = 1;
8911 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8913 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8914 IPR_NAME, ioa_cfg);
8923 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8924 ioa_cfg->needs_warm_reset = 1;
8925 ioa_cfg->reset = ipr_reset_slot_reset;
8927 ioa_cfg->reset = ipr_reset_start_bist;
8930 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8938 ipr_free_mem(ioa_cfg);
8954 * @ioa_cfg: ioa config struct
8962 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8968 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8973 * @ioa_cfg: ioa config struct
8985 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8989 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8990 ioa_cfg->sdt_state = ABORT_DUMP;
8991 ioa_cfg->reset_retries = 0;
8992 ioa_cfg->in_ioa_bringdown = 1;
8993 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9009 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9012 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9013 while(ioa_cfg->in_reset_reload) {
9014 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9015 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9016 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9019 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9022 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9023 flush_work_sync(&ioa_cfg->work_q);
9024 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9027 list_del(&ioa_cfg->queue);
9030 if (ioa_cfg->sdt_state == ABORT_DUMP)
9031 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9032 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9034 ipr_free_all_resources(ioa_cfg);
9050 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9054 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9056 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9058 scsi_remove_host(ioa_cfg->host);
9074 struct ipr_ioa_cfg *ioa_cfg;
9082 ioa_cfg = pci_get_drvdata(pdev);
9083 rc = ipr_probe_ioa_part2(ioa_cfg);
9090 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9097 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9101 scsi_remove_host(ioa_cfg->host);
9106 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9110 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9112 scsi_remove_host(ioa_cfg->host);
9117 scsi_scan_host(ioa_cfg->host);
9118 ipr_scan_vsets(ioa_cfg);
9119 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9120 ioa_cfg->allow_ml_add_del = 1;
9121 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9122 schedule_work(&ioa_cfg->work_q);
9138 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9142 while(ioa_cfg->in_reset_reload) {
9143 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9144 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9145 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9148 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9150 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9253 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9255 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9267 struct ipr_ioa_cfg *ioa_cfg;
9275 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9276 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9277 if (!ioa_cfg->allow_cmds) {
9278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9282 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);