Lines Matching refs:hba

175 static void ufshcd_tmc_handler(struct ufs_hba *hba);
177 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
178 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
179 static void ufshcd_hba_exit(struct ufs_hba *hba);
180 static int ufshcd_probe_hba(struct ufs_hba *hba);
181 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
183 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
184 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
185 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
186 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
188 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
191 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
195 if (!hba->is_irq_enabled) {
196 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
197 hba);
199 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
201 hba->is_irq_enabled = true;
207 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
209 if (hba->is_irq_enabled) {
210 free_irq(hba->irq, hba);
211 hba->is_irq_enabled = false;
217 * @hba - per-adapter interface
226 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
235 while ((ufshcd_readl(hba, reg) & mask) != val) {
240 if ((ufshcd_readl(hba, reg) & mask) != val)
251 * @hba - Pointer to adapter instance
255 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
257 if (hba->ufs_version == UFSHCI_VERSION_10)
265 * @hba - Pointer to adapter instance
269 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
271 return ufshcd_readl(hba, REG_UFS_VERSION);
277 * @hba: pointer to adapter instance
281 static inline int ufshcd_is_device_present(struct ufs_hba *hba)
283 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
314 * @hba: per adapter instance
321 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
330 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
331 if (tag >= hba->nutmrs)
333 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
341 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
343 clear_bit_unlock(slot, &hba->tm_slots_in_use);
348 * @hba: per adapter instance
351 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
353 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
380 * @hba: Pointer to adapter instance
385 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
387 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
393 * @hba: Pointer to adapter instance
398 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
400 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
457 * @hba: per adapter instance
460 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
462 ufshcd_writel(hba, INT_AGGR_ENABLE |
469 * @hba: per adapter instance
474 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
476 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
486 * @hba: per adapter instance
488 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
490 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
492 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
498 * @hba: per adapter instance
500 static inline void ufshcd_hba_start(struct ufs_hba *hba)
502 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
507 * @hba: per adapter instance
511 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
513 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
520 struct ufs_hba *hba = container_of(work, struct ufs_hba,
523 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
525 spin_lock_irqsave(hba->host->host_lock, flags);
526 if (hba->clk_gating.state == CLKS_ON) {
527 spin_unlock_irqrestore(hba->host->host_lock, flags);
531 spin_unlock_irqrestore(hba->host->host_lock, flags);
532 ufshcd_setup_clocks(hba, true);
535 if (ufshcd_can_hibern8_during_gating(hba)) {
537 hba->clk_gating.is_suspended = true;
538 if (ufshcd_is_link_hibern8(hba)) {
539 ret = ufshcd_uic_hibern8_exit(hba);
541 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
544 ufshcd_set_link_active(hba);
546 hba->clk_gating.is_suspended = false;
549 if (ufshcd_is_clkscaling_enabled(hba))
550 devfreq_resume_device(hba->devfreq);
551 scsi_unblock_requests(hba->host);
557 * @hba: per adapter instance
560 int ufshcd_hold(struct ufs_hba *hba, bool async)
565 if (!ufshcd_is_clkgating_allowed(hba))
567 spin_lock_irqsave(hba->host->host_lock, flags);
568 hba->clk_gating.active_reqs++;
571 switch (hba->clk_gating.state) {
575 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
576 hba->clk_gating.state = CLKS_ON;
585 scsi_block_requests(hba->host);
586 hba->clk_gating.state = REQ_CLKS_ON;
587 schedule_work(&hba->clk_gating.ungate_work);
595 hba->clk_gating.active_reqs--;
599 spin_unlock_irqrestore(hba->host->host_lock, flags);
600 flush_work(&hba->clk_gating.ungate_work);
602 spin_lock_irqsave(hba->host->host_lock, flags);
605 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
606 __func__, hba->clk_gating.state);
609 spin_unlock_irqrestore(hba->host->host_lock, flags);
616 struct ufs_hba *hba = container_of(work, struct ufs_hba,
620 spin_lock_irqsave(hba->host->host_lock, flags);
621 if (hba->clk_gating.is_suspended) {
622 hba->clk_gating.state = CLKS_ON;
626 if (hba->clk_gating.active_reqs
627 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
628 || hba->lrb_in_use || hba->outstanding_tasks
629 || hba->active_uic_cmd || hba->uic_async_done)
632 spin_unlock_irqrestore(hba->host->host_lock, flags);
635 if (ufshcd_can_hibern8_during_gating(hba)) {
636 if (ufshcd_uic_hibern8_enter(hba)) {
637 hba->clk_gating.state = CLKS_ON;
640 ufshcd_set_link_hibern8(hba);
643 if (ufshcd_is_clkscaling_enabled(hba)) {
644 devfreq_suspend_device(hba->devfreq);
645 hba->clk_scaling.window_start_t = 0;
648 if (!ufshcd_is_link_active(hba))
649 ufshcd_setup_clocks(hba, false);
652 __ufshcd_setup_clocks(hba, false, true);
663 spin_lock_irqsave(hba->host->host_lock, flags);
664 if (hba->clk_gating.state == REQ_CLKS_OFF)
665 hba->clk_gating.state = CLKS_OFF;
668 spin_unlock_irqrestore(hba->host->host_lock, flags);
674 static void __ufshcd_release(struct ufs_hba *hba)
676 if (!ufshcd_is_clkgating_allowed(hba))
679 hba->clk_gating.active_reqs--;
681 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
682 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
683 || hba->lrb_in_use || hba->outstanding_tasks
684 || hba->active_uic_cmd || hba->uic_async_done)
687 hba->clk_gating.state = REQ_CLKS_OFF;
688 schedule_delayed_work(&hba->clk_gating.gate_work,
689 msecs_to_jiffies(hba->clk_gating.delay_ms));
692 void ufshcd_release(struct ufs_hba *hba)
696 spin_lock_irqsave(hba->host->host_lock, flags);
697 __ufshcd_release(hba);
698 spin_unlock_irqrestore(hba->host->host_lock, flags);
704 struct ufs_hba *hba = dev_get_drvdata(dev);
706 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
712 struct ufs_hba *hba = dev_get_drvdata(dev);
718 spin_lock_irqsave(hba->host->host_lock, flags);
719 hba->clk_gating.delay_ms = value;
720 spin_unlock_irqrestore(hba->host->host_lock, flags);
724 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
726 if (!ufshcd_is_clkgating_allowed(hba))
729 hba->clk_gating.delay_ms = 150;
730 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
731 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
733 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
734 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
735 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
736 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
737 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
738 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
739 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
742 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
744 if (!ufshcd_is_clkgating_allowed(hba))
746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
747 cancel_work_sync(&hba->clk_gating.ungate_work);
748 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
752 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
754 if (!ufshcd_is_clkscaling_enabled(hba))
757 if (!hba->clk_scaling.is_busy_started) {
758 hba->clk_scaling.busy_start_t = ktime_get();
759 hba->clk_scaling.is_busy_started = true;
763 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
765 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
767 if (!ufshcd_is_clkscaling_enabled(hba))
770 if (!hba->outstanding_reqs && scaling->is_busy_started) {
779 * @hba: per adapter instance
783 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
785 ufshcd_clk_scaling_start_busy(hba);
786 __set_bit(task_tag, &hba->outstanding_reqs);
787 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
809 * @hba: per adapter instance
813 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
815 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
830 hba->dev_cmd.query.request.upiu_req.length);
832 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
834 dev_warn(hba->dev,
846 * @hba: per adapter instance
848 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
850 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
853 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
854 hba->nutmrs =
855 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
861 * @hba: per adapter instance
864 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
866 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
874 * @hba: Pointer to adapter instance
879 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
881 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
886 * @hba: per adapter instance
892 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
894 WARN_ON(hba->active_uic_cmd);
896 hba->active_uic_cmd = uic_cmd;
899 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
900 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
901 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
904 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
910 * @hba: per adapter instance
917 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
928 spin_lock_irqsave(hba->host->host_lock, flags);
929 hba->active_uic_cmd = NULL;
930 spin_unlock_irqrestore(hba->host->host_lock, flags);
937 * @hba: per adapter instance
945 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
947 if (!ufshcd_ready_for_uic_cmd(hba)) {
948 dev_err(hba->dev,
955 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
962 * @hba: per adapter instance
968 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
973 ufshcd_hold(hba, false);
974 mutex_lock(&hba->uic_cmd_mutex);
975 spin_lock_irqsave(hba->host->host_lock, flags);
976 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
977 spin_unlock_irqrestore(hba->host->host_lock, flags);
979 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
981 mutex_unlock(&hba->uic_cmd_mutex);
983 ufshcd_release(hba);
1029 * @hba: per adapter instance
1032 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
1034 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1036 if (hba->ufs_version == UFSHCI_VERSION_10) {
1044 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1049 * @hba: per adapter instance
1052 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1054 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1056 if (hba->ufs_version == UFSHCI_VERSION_10) {
1066 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1142 * @hba: UFS hba
1146 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1150 struct ufs_query *query = &hba->dev_cmd.query;
1189 * @hba - per adapter instance
1192 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1209 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1211 hba, lrbp, upiu_flags);
1212 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1220 dev_err(hba->dev, "%s: UFS native command are not supported\n",
1225 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
1269 struct ufs_hba *hba;
1274 hba = shost_priv(host);
1278 spin_lock_irqsave(hba->host->host_lock, flags);
1279 switch (hba->ufshcd_state) {
1290 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1291 __func__, hba->ufshcd_state);
1296 spin_unlock_irqrestore(hba->host->host_lock, flags);
1299 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1310 err = ufshcd_hold(hba, true);
1313 clear_bit_unlock(tag, &hba->lrb_in_use);
1316 WARN_ON(hba->clk_gating.state != CLKS_ON);
1318 lrbp = &hba->lrb[tag];
1330 ufshcd_compose_upiu(hba, lrbp);
1334 clear_bit_unlock(tag, &hba->lrb_in_use);
1339 spin_lock_irqsave(hba->host->host_lock, flags);
1340 ufshcd_send_command(hba, tag);
1342 spin_unlock_irqrestore(hba->host->host_lock, flags);
1347 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1357 hba->dev_cmd.type = cmd_type;
1359 return ufshcd_compose_upiu(hba, lrbp);
1363 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1370 spin_lock_irqsave(hba->host->host_lock, flags);
1371 ufshcd_utrl_clear(hba, tag);
1372 spin_unlock_irqrestore(hba->host->host_lock, flags);
1378 err = ufshcd_wait_for_register(hba,
1386 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1388 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1398 * @hba: per adapter instance
1402 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1411 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1413 dev_err(hba->dev, "%s: unexpected response %x\n",
1418 err = ufshcd_check_query_response(hba, lrbp);
1420 err = ufshcd_copy_query_response(hba, lrbp);
1425 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1430 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1438 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1445 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1448 spin_lock_irqsave(hba->host->host_lock, flags);
1449 hba->dev_cmd.complete = NULL;
1453 err = ufshcd_dev_cmd_completion(hba, lrbp);
1455 spin_unlock_irqrestore(hba->host->host_lock, flags);
1459 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1469 * @hba: per-adapter instance
1478 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1488 tmp = ~hba->lrb_in_use;
1489 tag = find_last_bit(&tmp, hba->nutrs);
1490 if (tag >= hba->nutrs)
1492 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1500 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1502 clear_bit_unlock(tag, &hba->lrb_in_use);
1507 * @hba - UFS hba
1512 * it is expected you hold the hba->dev_cmd.lock mutex.
1514 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1528 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1531 lrbp = &hba->lrb[tag];
1533 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1537 hba->dev_cmd.complete = &wait;
1539 spin_lock_irqsave(hba->host->host_lock, flags);
1540 ufshcd_send_command(hba, tag);
1541 spin_unlock_irqrestore(hba->host->host_lock, flags);
1543 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1546 ufshcd_put_dev_cmd_tag(hba, tag);
1547 wake_up(&hba->dev_cmd.tag_wq);
1553 * @hba: per-adapter instance
1561 static inline void ufshcd_init_query(struct ufs_hba *hba,
1565 *request = &hba->dev_cmd.query.request;
1566 *response = &hba->dev_cmd.query.response;
1577 * hba: per-adapter instance
1584 static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1591 BUG_ON(!hba);
1593 ufshcd_hold(hba, false);
1594 mutex_lock(&hba->dev_cmd.lock);
1595 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1608 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1615 dev_err(hba->dev,
1622 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1625 dev_err(hba->dev,
1636 mutex_unlock(&hba->dev_cmd.lock);
1637 ufshcd_release(hba);
1643 * hba: per-adapter instance
1652 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1659 BUG_ON(!hba);
1661 ufshcd_hold(hba, false);
1663 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1669 mutex_lock(&hba->dev_cmd.lock);
1670 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1682 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1688 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1691 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1699 mutex_unlock(&hba->dev_cmd.lock);
1701 ufshcd_release(hba);
1707 * hba: per-adapter instance
1719 static int ufshcd_query_descriptor(struct ufs_hba *hba,
1727 BUG_ON(!hba);
1729 ufshcd_hold(hba, false);
1731 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1738 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1744 mutex_lock(&hba->dev_cmd.lock);
1745 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1747 hba->dev_cmd.query.descriptor = desc_buf;
1758 dev_err(hba->dev,
1765 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1768 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1773 hba->dev_cmd.query.descriptor = NULL;
1777 mutex_unlock(&hba->dev_cmd.lock);
1779 ufshcd_release(hba);
1785 * @hba: Pointer to adapter instance
1794 static int ufshcd_read_desc_param(struct ufs_hba *hba,
1825 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
1833 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
1849 static inline int ufshcd_read_desc(struct ufs_hba *hba,
1855 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1858 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
1862 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
1867 * @hba: Pointer to adapter instance
1875 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
1888 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
1894 * @hba: per adapter instance
1905 static int ufshcd_memory_alloc(struct ufs_hba *hba)
1910 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
1911 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1913 &hba->ucdl_dma_addr,
1918 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1919 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1922 if (!hba->ucdl_base_addr ||
1923 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
1924 dev_err(hba->dev,
1933 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
1934 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1936 &hba->utrdl_dma_addr,
1938 if (!hba->utrdl_base_addr ||
1939 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
1940 dev_err(hba->dev,
1949 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
1950 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1952 &hba->utmrdl_dma_addr,
1954 if (!hba->utmrdl_base_addr ||
1955 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
1956 dev_err(hba->dev,
1962 hba->lrb = devm_kzalloc(hba->dev,
1963 hba->nutrs * sizeof(struct ufshcd_lrb),
1965 if (!hba->lrb) {
1966 dev_err(hba->dev, "LRB Memory allocation failed\n");
1977 * @hba: per adapter instance
1987 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1998 utrdlp = hba->utrdl_base_addr;
1999 cmd_descp = hba->ucdl_base_addr;
2007 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2009 for (i = 0; i < hba->nutrs; i++) {
2026 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
2027 hba->lrb[i].ucd_req_ptr =
2029 hba->lrb[i].ucd_rsp_ptr =
2031 hba->lrb[i].ucd_prdt_ptr =
2038 * @hba: per adapter instance
2047 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2054 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2056 dev_err(hba->dev,
2063 * @hba: per adapter instance
2071 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2088 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2090 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2099 * @hba: per adapter instance
2106 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2121 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2123 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
2139 * @hba: per adapter instance
2151 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2158 mutex_lock(&hba->uic_cmd_mutex);
2161 spin_lock_irqsave(hba->host->host_lock, flags);
2162 hba->uic_async_done = &uic_async_done;
2163 ret = __ufshcd_send_uic_cmd(hba, cmd);
2164 spin_unlock_irqrestore(hba->host->host_lock, flags);
2166 dev_err(hba->dev,
2171 ret = ufshcd_wait_for_uic_cmd(hba, cmd);
2173 dev_err(hba->dev,
2179 if (!wait_for_completion_timeout(hba->uic_async_done,
2181 dev_err(hba->dev,
2188 status = ufshcd_get_upmcrs(hba);
2190 dev_err(hba->dev,
2196 spin_lock_irqsave(hba->host->host_lock, flags);
2197 hba->uic_async_done = NULL;
2198 spin_unlock_irqrestore(hba->host->host_lock, flags);
2199 mutex_unlock(&hba->uic_cmd_mutex);
2207 * @hba: per adapter instance
2212 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2220 ufshcd_hold(hba, false);
2221 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2222 ufshcd_release(hba);
2227 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2233 return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2236 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2242 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2244 ufshcd_set_link_off(hba);
2245 ret = ufshcd_host_reset_and_restore(hba);
2253 * values in hba power info
2254 * @hba: per-adapter instance
2256 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2258 hba->pwr_info.gear_rx = UFS_PWM_G1;
2259 hba->pwr_info.gear_tx = UFS_PWM_G1;
2260 hba->pwr_info.lane_rx = 1;
2261 hba->pwr_info.lane_tx = 1;
2262 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2263 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2264 hba->pwr_info.hs_rate = 0;
2269 * @hba: per-adapter instance
2271 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2273 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2275 if (hba->max_pwr_info.is_valid)
2283 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2285 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2289 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2301 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2303 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2306 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2313 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2316 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2319 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2326 hba->max_pwr_info.is_valid = true;
2330 static int ufshcd_change_power_mode(struct ufs_hba *hba,
2336 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2337 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2338 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2339 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2340 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2341 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2342 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2343 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2353 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2354 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2358 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2360 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
2362 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2363 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2367 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2369 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
2375 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2378 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2382 dev_err(hba->dev,
2385 if (hba->vops && hba->vops->pwr_change_notify)
2386 hba->vops->pwr_change_notify(hba,
2389 memcpy(&hba->pwr_info, pwr_mode,
2398 * @hba: per-adapter instance
2401 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2407 if (hba->vops && hba->vops->pwr_change_notify)
2408 hba->vops->pwr_change_notify(hba,
2413 ret = ufshcd_change_power_mode(hba, &final_params);
2420 * hba: per-adapter instance
2424 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2431 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2435 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2438 dev_err(hba->dev,
2447 err = ufshcd_query_flag(hba,
2452 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
2457 dev_err(hba->dev,
2461 dev_err(hba->dev,
2471 * @hba: per adapter instance
2481 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2487 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2490 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2493 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2495 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2497 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2499 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2506 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
2508 ufshcd_enable_run_stop_reg(hba);
2510 dev_err(hba->dev,
2522 * @hba: per adapter instance
2530 static int ufshcd_hba_enable(struct ufs_hba *hba)
2540 if (!ufshcd_is_hba_active(hba)) {
2543 ufshcd_hba_stop(hba);
2554 ufshcd_set_link_off(hba);
2556 if (hba->vops && hba->vops->hce_enable_notify)
2557 hba->vops->hce_enable_notify(hba, PRE_CHANGE);
2560 ufshcd_hba_start(hba);
2576 while (ufshcd_is_hba_active(hba)) {
2580 dev_err(hba->dev,
2588 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
2590 if (hba->vops && hba->vops->hce_enable_notify)
2591 hba->vops->hce_enable_notify(hba, POST_CHANGE);
2598 * @hba: per adapter instance
2602 static int ufshcd_link_startup(struct ufs_hba *hba)
2608 if (hba->vops && hba->vops->link_startup_notify)
2609 hba->vops->link_startup_notify(hba, PRE_CHANGE);
2611 ret = ufshcd_dme_link_startup(hba);
2614 if (!ret && !ufshcd_is_device_present(hba)) {
2615 dev_err(hba->dev, "%s: Device not present\n", __func__);
2625 if (ret && ufshcd_hba_enable(hba))
2634 if (hba->vops && hba->vops->link_startup_notify) {
2635 ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
2640 ret = ufshcd_make_hba_operational(hba);
2643 dev_err(hba->dev, "link startup failed %d\n", ret);
2649 * @hba: per-adapter instance
2657 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
2662 ufshcd_hold(hba, false);
2663 mutex_lock(&hba->dev_cmd.lock);
2665 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
2671 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2673 mutex_unlock(&hba->dev_cmd.lock);
2674 ufshcd_release(hba);
2677 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
2694 struct ufs_hba *hba;
2696 hba = shost_priv(sdev->host);
2698 lun_qdepth = hba->nutrs;
2699 ret = ufshcd_read_unit_desc_param(hba,
2710 lun_qdepth = hba->nutrs;
2712 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
2714 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2721 * @hba: per-adapter instance
2730 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
2746 ret = ufshcd_read_unit_desc_param(hba,
2757 * @hba: per-adapter instance
2761 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
2764 if (hba->dev_info.f_power_on_wp_en &&
2765 !hba->dev_info.is_lu_power_on_wp) {
2768 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
2771 hba->dev_info.is_lu_power_on_wp = true;
2783 struct ufs_hba *hba;
2785 hba = shost_priv(sdev->host);
2801 ufshcd_get_lu_power_on_wp_status(hba, sdev);
2818 struct ufs_hba *hba = shost_priv(sdev->host);
2820 if (depth > hba->nutrs)
2821 depth = hba->nutrs;
2860 struct ufs_hba *hba;
2862 hba = shost_priv(sdev->host);
2863 scsi_deactivate_tcq(sdev, hba->nutrs);
2868 spin_lock_irqsave(hba->host->host_lock, flags);
2869 hba->sdev_ufs_device = NULL;
2870 spin_unlock_irqrestore(hba->host->host_lock, flags);
2876 * @hba: per adapter instance
2882 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
2890 spin_lock_irqsave(hba->host->host_lock, flags);
2893 __clear_bit(index, &hba->outstanding_tasks);
2895 task_req_descp = hba->utmrdl_base_addr;
2906 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
2909 spin_unlock_irqrestore(hba->host->host_lock, flags);
2950 * @hba: per adapter instance
2956 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2985 schedule_work(&hba->eeh_work);
2990 dev_err(hba->dev,
2995 dev_err(hba->dev,
3015 dev_err(hba->dev,
3025 * @hba: per adapter instance
3028 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
3030 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
3031 hba->active_uic_cmd->argument2 |=
3032 ufshcd_get_uic_cmd_result(hba);
3033 hba->active_uic_cmd->argument3 =
3034 ufshcd_get_dme_attr_val(hba);
3035 complete(&hba->active_uic_cmd->done);
3038 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3039 complete(hba->uic_async_done);
3044 * @hba: per adapter instance
3046 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3062 ufshcd_reset_intr_aggr(hba);
3064 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3065 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3067 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3068 lrbp = &hba->lrb[index];
3071 result = ufshcd_transfer_rsp_status(hba, lrbp);
3076 clear_bit_unlock(index, &hba->lrb_in_use);
3079 __ufshcd_release(hba);
3081 if (hba->dev_cmd.complete)
3082 complete(hba->dev_cmd.complete);
3087 hba->outstanding_reqs ^= completed_reqs;
3089 ufshcd_clk_scaling_update_busy(hba);
3092 wake_up(&hba->dev_cmd.tag_wq);
3097 * @hba: per-adapter instance
3105 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3110 if (!(hba->ee_ctrl_mask & mask))
3113 val = hba->ee_ctrl_mask & ~mask;
3115 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3118 hba->ee_ctrl_mask &= ~mask;
3125 * @hba: per-adapter instance
3133 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3138 if (hba->ee_ctrl_mask & mask)
3141 val = hba->ee_ctrl_mask | mask;
3143 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3146 hba->ee_ctrl_mask |= mask;
3153 * @hba: per-adapter instance
3162 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3166 if (hba->auto_bkops_enabled)
3169 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3172 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3177 hba->auto_bkops_enabled = true;
3180 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3182 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3190 * @hba: per-adapter instance
3200 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3204 if (!hba->auto_bkops_enabled)
3211 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3213 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3218 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
3221 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3223 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3227 hba->auto_bkops_enabled = false;
3234 * @hba: per adapter instance
3240 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3242 hba->auto_bkops_enabled = false;
3243 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3244 ufshcd_enable_auto_bkops(hba);
3247 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3249 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3255 * @hba: per-adapter instance
3265 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3269 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3275 err = ufshcd_get_bkops_status(hba, &curr_status);
3277 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3281 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3288 err = ufshcd_enable_auto_bkops(hba);
3290 err = ufshcd_disable_auto_bkops(hba);
3297 * @hba: per-adapter instance
3305 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3307 return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
3310 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3312 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3325 struct ufs_hba *hba;
3328 hba = container_of(work, struct ufs_hba, eeh_work);
3330 pm_runtime_get_sync(hba->dev);
3331 err = ufshcd_get_ee_status(hba, &status);
3333 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3338 status &= hba->ee_ctrl_mask;
3340 err = ufshcd_urgent_bkops(hba);
3342 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3346 pm_runtime_put_sync(hba->dev);
3356 struct ufs_hba *hba;
3363 hba = container_of(work, struct ufs_hba, eh_work);
3365 pm_runtime_get_sync(hba->dev);
3366 ufshcd_hold(hba, false);
3368 spin_lock_irqsave(hba->host->host_lock, flags);
3369 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
3370 spin_unlock_irqrestore(hba->host->host_lock, flags);
3374 hba->ufshcd_state = UFSHCD_STATE_RESET;
3375 ufshcd_set_eh_in_progress(hba);
3378 ufshcd_transfer_req_compl(hba);
3379 ufshcd_tmc_handler(hba);
3380 spin_unlock_irqrestore(hba->host->host_lock, flags);
3383 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
3384 if (ufshcd_clear_cmd(hba, tag))
3388 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
3389 if (ufshcd_clear_tm_cmd(hba, tag))
3393 spin_lock_irqsave(hba->host->host_lock, flags);
3394 ufshcd_transfer_req_compl(hba);
3395 ufshcd_tmc_handler(hba);
3396 spin_unlock_irqrestore(hba->host->host_lock, flags);
3399 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
3400 ((hba->saved_err & UIC_ERROR) &&
3401 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
3402 err = ufshcd_reset_and_restore(hba);
3404 dev_err(hba->dev, "%s: reset and restore failed\n",
3406 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3412 scsi_report_bus_reset(hba->host, 0);
3413 hba->saved_err = 0;
3414 hba->saved_uic_err = 0;
3416 ufshcd_clear_eh_in_progress(hba);
3419 scsi_unblock_requests(hba->host);
3420 ufshcd_release(hba);
3421 pm_runtime_put_sync(hba->dev);
3426 * @hba: per-adapter instance
3428 static void ufshcd_update_uic_error(struct ufs_hba *hba)
3433 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
3435 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
3438 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
3440 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
3442 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
3444 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
3446 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
3448 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
3450 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
3451 __func__, hba->uic_error);
3456 * @hba: per-adapter instance
3458 static void ufshcd_check_errors(struct ufs_hba *hba)
3462 if (hba->errors & INT_FATAL_ERRORS)
3465 if (hba->errors & UIC_ERROR) {
3466 hba->uic_error = 0;
3467 ufshcd_update_uic_error(hba);
3468 if (hba->uic_error)
3474 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
3476 scsi_block_requests(hba->host);
3479 hba->saved_err |= hba->errors;
3480 hba->saved_uic_err |= hba->uic_error;
3482 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3483 schedule_work(&hba->eh_work);
3496 * @hba: per adapter instance
3498 static void ufshcd_tmc_handler(struct ufs_hba *hba)
3502 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
3503 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
3504 wake_up(&hba->tm_wq);
3509 * @hba: per adapter instance
3512 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
3514 hba->errors = UFSHCD_ERROR_MASK & intr_status;
3515 if (hba->errors)
3516 ufshcd_check_errors(hba);
3519 ufshcd_uic_cmd_compl(hba, intr_status);
3522 ufshcd_tmc_handler(hba);
3525 ufshcd_transfer_req_compl(hba);
3540 struct ufs_hba *hba = __hba;
3542 spin_lock(hba->host->host_lock);
3543 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
3546 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
3547 ufshcd_sl_intr(hba, intr_status);
3550 spin_unlock(hba->host->host_lock);
3554 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
3560 if (!test_bit(tag, &hba->outstanding_tasks))
3563 spin_lock_irqsave(hba->host->host_lock, flags);
3564 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
3565 spin_unlock_irqrestore(hba->host->host_lock, flags);
3568 err = ufshcd_wait_for_register(hba,
3577 * @hba: per adapter instance
3585 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
3596 host = hba->host;
3603 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
3604 ufshcd_hold(hba, false);
3607 task_req_descp = hba->utmrdl_base_addr;
3618 task_tag = hba->nutrs + free_slot;
3632 __set_bit(free_slot, &hba->outstanding_tasks);
3633 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
3638 err = wait_event_timeout(hba->tm_wq,
3639 test_bit(free_slot, &hba->tm_condition),
3642 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
3644 if (ufshcd_clear_tm_cmd(hba, free_slot))
3645 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
3649 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
3652 clear_bit(free_slot, &hba->tm_condition);
3653 ufshcd_put_tm_slot(hba, free_slot);
3654 wake_up(&hba->tm_tag_wq);
3656 ufshcd_release(hba);
3670 struct ufs_hba *hba;
3679 hba = shost_priv(host);
3682 lrbp = &hba->lrb[tag];
3683 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
3691 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
3692 if (hba->lrb[pos].lun == lrbp->lun) {
3693 err = ufshcd_clear_cmd(hba, pos);
3699 ufshcd_transfer_req_compl(hba);
3705 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3726 struct ufs_hba *hba;
3736 hba = shost_priv(host);
3739 ufshcd_hold(hba, false);
3741 if (!(test_bit(tag, &hba->outstanding_reqs)))
3744 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3746 dev_err(hba->dev,
3751 lrbp = &hba->lrb[tag];
3753 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3763 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3783 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3791 err = ufshcd_clear_cmd(hba, tag);
3798 __clear_bit(tag, &hba->outstanding_reqs);
3799 hba->lrb[tag].cmd = NULL;
3802 clear_bit_unlock(tag, &hba->lrb_in_use);
3803 wake_up(&hba->dev_cmd.tag_wq);
3809 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3817 ufshcd_release(hba);
3823 * @hba: per-adapter instance
3831 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
3837 spin_lock_irqsave(hba->host->host_lock, flags);
3838 ufshcd_hba_stop(hba);
3839 spin_unlock_irqrestore(hba->host->host_lock, flags);
3841 err = ufshcd_hba_enable(hba);
3846 err = ufshcd_probe_hba(hba);
3848 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3852 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
3859 * @hba: per-adapter instance
3866 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
3873 err = ufshcd_host_reset_and_restore(hba);
3880 spin_lock_irqsave(hba->host->host_lock, flags);
3881 ufshcd_transfer_req_compl(hba);
3882 ufshcd_tmc_handler(hba);
3883 spin_unlock_irqrestore(hba->host->host_lock, flags);
3898 struct ufs_hba *hba;
3900 hba = shost_priv(cmd->device->host);
3902 ufshcd_hold(hba, false);
3910 spin_lock_irqsave(hba->host->host_lock, flags);
3911 if (!(work_pending(&hba->eh_work) ||
3912 hba->ufshcd_state == UFSHCD_STATE_RESET))
3914 spin_unlock_irqrestore(hba->host->host_lock, flags);
3915 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
3916 flush_work(&hba->eh_work);
3919 hba->ufshcd_state = UFSHCD_STATE_RESET;
3920 ufshcd_set_eh_in_progress(hba);
3921 spin_unlock_irqrestore(hba->host->host_lock, flags);
3923 err = ufshcd_reset_and_restore(hba);
3925 spin_lock_irqsave(hba->host->host_lock, flags);
3928 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3931 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3933 ufshcd_clear_eh_in_progress(hba);
3934 spin_unlock_irqrestore(hba->host->host_lock, flags);
3936 ufshcd_release(hba);
3988 * @hba: per-adapter instance
3994 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
3999 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4000 !hba->vreg_info.vccq2) {
4001 dev_err(hba->dev,
4007 if (hba->vreg_info.vcc)
4009 hba->vreg_info.vcc->max_uA,
4013 if (hba->vreg_info.vccq)
4015 hba->vreg_info.vccq->max_uA,
4019 if (hba->vreg_info.vccq2)
4021 hba->vreg_info.vccq2->max_uA,
4028 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4034 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4036 dev_err(hba->dev,
4042 hba->init_prefetch_data.icc_level =
4043 ufshcd_find_max_sup_active_icc_level(hba,
4045 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4046 __func__, hba->init_prefetch_data.icc_level);
4048 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4050 &hba->init_prefetch_data.icc_level);
4053 dev_err(hba->dev,
4055 __func__, hba->init_prefetch_data.icc_level , ret);
4061 * @hba: per-adapter instance
4085 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4091 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4093 if (IS_ERR(hba->sdev_ufs_device)) {
4094 ret = PTR_ERR(hba->sdev_ufs_device);
4095 hba->sdev_ufs_device = NULL;
4098 scsi_device_put(hba->sdev_ufs_device);
4100 sdev_boot = __scsi_add_device(hba->host, 0, 0,
4108 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4120 scsi_remove_device(hba->sdev_ufs_device);
4126 * ufshcd_probe_hba - probe hba to detect device and initialize
4127 * @hba: per-adapter instance
4131 static int ufshcd_probe_hba(struct ufs_hba *hba)
4135 ret = ufshcd_link_startup(hba);
4139 ufshcd_init_pwr_info(hba);
4142 ufshcd_set_link_active(hba);
4144 ret = ufshcd_verify_dev_init(hba);
4148 ret = ufshcd_complete_dev_init(hba);
4153 ufshcd_set_ufs_dev_active(hba);
4154 ufshcd_force_reset_auto_bkops(hba);
4155 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4156 hba->wlun_dev_clr_ua = true;
4158 if (ufshcd_get_max_pwr_mode(hba)) {
4159 dev_err(hba->dev,
4163 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
4165 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
4173 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4177 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
4178 if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4180 hba->dev_info.f_power_on_wp_en = flag;
4182 if (!hba->is_init_prefetch)
4183 ufshcd_init_icc_levels(hba);
4186 if (ufshcd_scsi_add_wlus(hba))
4189 scsi_scan_host(hba->host);
4190 pm_runtime_put_sync(hba->dev);
4193 if (!hba->is_init_prefetch)
4194 hba->is_init_prefetch = true;
4197 if (ufshcd_is_clkscaling_enabled(hba))
4198 devfreq_resume_device(hba->devfreq);
4205 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4206 pm_runtime_put_sync(hba->dev);
4207 ufshcd_hba_exit(hba);
4214 * ufshcd_async_scan - asynchronous execution for probing hba
4220 struct ufs_hba *hba = (struct ufs_hba *)data;
4222 ufshcd_probe_hba(hba);
4268 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4274 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4277 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4283 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4355 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
4358 struct device *dev = hba->dev;
4359 struct ufs_vreg_info *info = &hba->vreg_info;
4385 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
4387 struct ufs_vreg_info *info = &hba->vreg_info;
4390 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
4412 static int ufshcd_init_vreg(struct ufs_hba *hba)
4415 struct device *dev = hba->dev;
4416 struct ufs_vreg_info *info = &hba->vreg_info;
4434 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
4436 struct ufs_vreg_info *info = &hba->vreg_info;
4439 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
4444 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4449 struct list_head *head = &hba->clk_list_head;
4463 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
4471 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
4476 if (hba->vops && hba->vops->setup_clocks)
4477 ret = hba->vops->setup_clocks(hba, on);
4485 spin_lock_irqsave(hba->host->host_lock, flags);
4486 hba->clk_gating.state = CLKS_ON;
4487 spin_unlock_irqrestore(hba->host->host_lock, flags);
4492 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
4494 return __ufshcd_setup_clocks(hba, on, false);
4497 static int ufshcd_init_clocks(struct ufs_hba *hba)
4501 struct device *dev = hba->dev;
4502 struct list_head *head = &hba->clk_list_head;
4522 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
4536 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
4540 if (!hba->vops)
4543 if (hba->vops->init) {
4544 err = hba->vops->init(hba);
4549 if (hba->vops->setup_regulators) {
4550 err = hba->vops->setup_regulators(hba, true);
4558 if (hba->vops->exit)
4559 hba->vops->exit(hba);
4562 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
4563 __func__, hba->vops ? hba->vops->name : "", err);
4567 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
4569 if (!hba->vops)
4572 if (hba->vops->setup_clocks)
4573 hba->vops->setup_clocks(hba, false);
4575 if (hba->vops->setup_regulators)
4576 hba->vops->setup_regulators(hba, false);
4578 if (hba->vops->exit)
4579 hba->vops->exit(hba);
4582 static int ufshcd_hba_init(struct ufs_hba *hba)
4593 err = ufshcd_init_hba_vreg(hba);
4597 err = ufshcd_setup_hba_vreg(hba, true);
4601 err = ufshcd_init_clocks(hba);
4605 err = ufshcd_setup_clocks(hba, true);
4609 err = ufshcd_init_vreg(hba);
4613 err = ufshcd_setup_vreg(hba, true);
4617 err = ufshcd_variant_hba_init(hba);
4621 hba->is_powered = true;
4625 ufshcd_setup_vreg(hba, false);
4627 ufshcd_setup_clocks(hba, false);
4629 ufshcd_setup_hba_vreg(hba, false);
4634 static void ufshcd_hba_exit(struct ufs_hba *hba)
4636 if (hba->is_powered) {
4637 ufshcd_variant_hba_exit(hba);
4638 ufshcd_setup_vreg(hba, false);
4639 ufshcd_setup_clocks(hba, false);
4640 ufshcd_setup_hba_vreg(hba, false);
4641 hba->is_powered = false;
4646 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
4677 * @hba: per adapter instance
4683 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4692 spin_lock_irqsave(hba->host->host_lock, flags);
4693 sdp = hba->sdev_ufs_device;
4703 spin_unlock_irqrestore(hba->host->host_lock, flags);
4714 hba->host->eh_noresume = 1;
4715 if (hba->wlun_dev_clr_ua) {
4716 ret = ufshcd_send_request_sense(hba, sdp);
4720 hba->wlun_dev_clr_ua = false;
4743 hba->curr_dev_pwr_mode = pwr_mode;
4746 hba->host->eh_noresume = 0;
4750 static int ufshcd_link_state_transition(struct ufs_hba *hba,
4756 if (req_link_state == hba->uic_link_state)
4760 ret = ufshcd_uic_hibern8_enter(hba);
4762 ufshcd_set_link_hibern8(hba);
4772 !hba->auto_bkops_enabled))) {
4777 ufshcd_hba_stop(hba);
4782 ufshcd_set_link_off(hba);
4789 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
4803 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4804 !hba->dev_info.is_lu_power_on_wp) {
4805 ufshcd_setup_vreg(hba, false);
4806 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4807 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4808 if (!ufshcd_is_link_active(hba)) {
4809 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4810 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
4815 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
4819 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4820 !hba->dev_info.is_lu_power_on_wp) {
4821 ret = ufshcd_setup_vreg(hba, true);
4822 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4823 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
4824 if (!ret && !ufshcd_is_link_active(hba)) {
4825 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
4828 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
4836 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4838 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4843 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
4845 if (ufshcd_is_link_off(hba))
4846 ufshcd_setup_hba_vreg(hba, false);
4849 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
4851 if (ufshcd_is_link_off(hba))
4852 ufshcd_setup_hba_vreg(hba, true);
4857 * @hba: per adapter instance
4871 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4878 hba->pm_op_in_progress = 1;
4881 hba->rpm_lvl : hba->spm_lvl;
4893 ufshcd_hold(hba, false);
4894 hba->clk_gating.is_suspended = true;
4901 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
4902 (req_link_state == hba->uic_link_state))
4906 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
4912 if (ufshcd_can_autobkops_during_suspend(hba)) {
4918 ret = ufshcd_urgent_bkops(hba);
4923 ufshcd_disable_auto_bkops(hba);
4927 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
4928 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
4931 ufshcd_disable_auto_bkops(hba);
4932 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
4937 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
4941 ufshcd_vreg_set_lpm(hba);
4949 if (ufshcd_is_clkscaling_enabled(hba)) {
4950 devfreq_suspend_device(hba->devfreq);
4951 hba->clk_scaling.window_start_t = 0;
4958 if (hba->vops && hba->vops->suspend) {
4959 ret = hba->vops->suspend(hba, pm_op);
4964 if (hba->vops && hba->vops->setup_clocks) {
4965 ret = hba->vops->setup_clocks(hba, false);
4970 if (!ufshcd_is_link_active(hba))
4971 ufshcd_setup_clocks(hba, false);
4974 __ufshcd_setup_clocks(hba, false, true);
4976 hba->clk_gating.state = CLKS_OFF;
4981 ufshcd_disable_irq(hba);
4983 ufshcd_hba_vreg_set_lpm(hba);
4987 if (hba->vops && hba->vops->resume)
4988 hba->vops->resume(hba, pm_op);
4990 ufshcd_vreg_set_hpm(hba);
4991 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
4992 ufshcd_set_link_active(hba);
4993 else if (ufshcd_is_link_off(hba))
4994 ufshcd_host_reset_and_restore(hba);
4996 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
4997 ufshcd_disable_auto_bkops(hba);
4999 hba->clk_gating.is_suspended = false;
5000 ufshcd_release(hba);
5002 hba->pm_op_in_progress = 0;
5008 * @hba: per adapter instance
5016 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5021 hba->pm_op_in_progress = 1;
5022 old_link_state = hba->uic_link_state;
5024 ufshcd_hba_vreg_set_hpm(hba);
5026 ret = ufshcd_setup_clocks(hba, true);
5031 ret = ufshcd_enable_irq(hba);
5035 ret = ufshcd_vreg_set_hpm(hba);
5044 if (hba->vops && hba->vops->resume) {
5045 ret = hba->vops->resume(hba, pm_op);
5050 if (ufshcd_is_link_hibern8(hba)) {
5051 ret = ufshcd_uic_hibern8_exit(hba);
5053 ufshcd_set_link_active(hba);
5056 } else if (ufshcd_is_link_off(hba)) {
5057 ret = ufshcd_host_reset_and_restore(hba);
5062 if (ret || !ufshcd_is_link_active(hba))
5066 if (!ufshcd_is_ufs_dev_active(hba)) {
5067 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
5076 ufshcd_urgent_bkops(hba);
5077 hba->clk_gating.is_suspended = false;
5079 if (ufshcd_is_clkscaling_enabled(hba))
5080 devfreq_resume_device(hba->devfreq);
5083 ufshcd_release(hba);
5087 ufshcd_link_state_transition(hba, old_link_state, 0);
5089 if (hba->vops && hba->vops->suspend)
5090 hba->vops->suspend(hba, pm_op);
5092 ufshcd_vreg_set_lpm(hba);
5094 ufshcd_disable_irq(hba);
5095 ufshcd_setup_clocks(hba, false);
5097 hba->pm_op_in_progress = 0;
5103 * @hba: per adapter instance
5110 int ufshcd_system_suspend(struct ufs_hba *hba)
5114 if (!hba || !hba->is_powered)
5117 if (pm_runtime_suspended(hba->dev)) {
5118 if (hba->rpm_lvl == hba->spm_lvl)
5123 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
5124 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
5135 ret = ufshcd_runtime_resume(hba);
5140 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
5143 hba->is_sys_suspended = true;
5150 * @hba: per adapter instance
5155 int ufshcd_system_resume(struct ufs_hba *hba)
5157 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
5164 return ufshcd_resume(hba, UFS_SYSTEM_PM);
5170 * @hba: per adapter instance
5176 int ufshcd_runtime_suspend(struct ufs_hba *hba)
5178 if (!hba || !hba->is_powered)
5181 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
5187 * @hba: per adapter instance
5206 int ufshcd_runtime_resume(struct ufs_hba *hba)
5208 if (!hba || !hba->is_powered)
5211 return ufshcd_resume(hba, UFS_RUNTIME_PM);
5215 int ufshcd_runtime_idle(struct ufs_hba *hba)
5223 * @hba: per adapter instance
5229 int ufshcd_shutdown(struct ufs_hba *hba)
5233 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
5236 if (pm_runtime_suspended(hba->dev)) {
5237 ret = ufshcd_runtime_resume(hba);
5242 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
5245 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
5254 * @hba - per adapter instance
5256 void ufshcd_remove(struct ufs_hba *hba)
5258 scsi_remove_host(hba->host);
5260 ufshcd_disable_intr(hba, hba->intr_mask);
5261 ufshcd_hba_stop(hba);
5263 scsi_host_put(hba->host);
5265 ufshcd_exit_clk_gating(hba);
5266 if (ufshcd_is_clkscaling_enabled(hba))
5267 devfreq_remove_device(hba->devfreq);
5268 ufshcd_hba_exit(hba);
5275 * @hba: per adapter instance
5279 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
5281 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
5282 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
5285 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
5297 struct ufs_hba *hba;
5314 hba = shost_priv(host);
5315 hba->host = host;
5316 hba->dev = dev;
5317 *hba_handle = hba;
5324 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
5328 struct list_head *head = &hba->clk_list_head;
5340 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5352 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5360 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
5363 if (hba->vops->clk_scale_notify)
5364 hba->vops->clk_scale_notify(hba);
5373 struct ufs_hba *hba = dev_get_drvdata(dev);
5375 if (!ufshcd_is_clkscaling_enabled(hba))
5379 err = ufshcd_scale_clks(hba, true);
5381 err = ufshcd_scale_clks(hba, false);
5389 struct ufs_hba *hba = dev_get_drvdata(dev);
5390 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
5393 if (!ufshcd_is_clkscaling_enabled(hba))
5398 spin_lock_irqsave(hba->host->host_lock, flags);
5413 if (hba->outstanding_reqs) {
5420 spin_unlock_irqrestore(hba->host->host_lock, flags);
5432 * @hba: per-adapter instance
5437 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5440 struct Scsi_Host *host = hba->host;
5441 struct device *dev = hba->dev;
5444 dev_err(hba->dev,
5450 hba->mmio_base = mmio_base;
5451 hba->irq = irq;
5453 err = ufshcd_hba_init(hba);
5458 ufshcd_hba_capabilities(hba);
5461 hba->ufs_version = ufshcd_get_ufs_version(hba);
5464 hba->intr_mask = ufshcd_get_intr_mask(hba);
5466 err = ufshcd_set_dma_mask(hba);
5468 dev_err(hba->dev, "set dma mask failed\n");
5473 err = ufshcd_memory_alloc(hba);
5475 dev_err(hba->dev, "Memory allocation failed\n");
5480 ufshcd_host_memory_configure(hba);
5482 host->can_queue = hba->nutrs;
5483 host->cmd_per_lun = hba->nutrs;
5490 hba->max_pwr_info.is_valid = false;
5493 init_waitqueue_head(&hba->tm_wq);
5494 init_waitqueue_head(&hba->tm_tag_wq);
5497 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
5498 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
5501 mutex_init(&hba->uic_cmd_mutex);
5504 mutex_init(&hba->dev_cmd.lock);
5507 init_waitqueue_head(&hba->dev_cmd.tag_wq);
5509 ufshcd_init_clk_gating(hba);
5511 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
5513 dev_err(hba->dev, "request irq failed\n");
5516 hba->is_irq_enabled = true;
5522 dev_err(hba->dev, "init shared queue failed\n");
5526 err = scsi_add_host(host, hba->dev);
5528 dev_err(hba->dev, "scsi_add_host failed\n");
5533 err = ufshcd_hba_enable(hba);
5535 dev_err(hba->dev, "Host controller enable failed\n");
5539 if (ufshcd_is_clkscaling_enabled(hba)) {
5540 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
5542 if (IS_ERR(hba->devfreq)) {
5543 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
5544 PTR_ERR(hba->devfreq));
5548 devfreq_suspend_device(hba->devfreq);
5549 hba->clk_scaling.window_start_t = 0;
5559 ufshcd_set_ufs_dev_poweroff(hba);
5561 async_schedule(ufshcd_async_scan, hba);
5566 scsi_remove_host(hba->host);
5568 ufshcd_exit_clk_gating(hba);
5570 hba->is_irq_enabled = false;
5572 ufshcd_hba_exit(hba);