Lines Matching refs:ctrl

215 		q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
224 static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
227 if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
228 pci_unmap_page(ctrl->card->dev, dma->dma_addr,
239 static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
244 ctrl->stats.dma_sw_err++;
246 ctrl->stats.dma_hw_fault++;
248 ctrl->stats.dma_cancelled++;
251 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
253 rsxx_free_dma(ctrl, dma);
256 int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
266 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
268 rsxx_free_dma(ctrl, dma);
275 static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
282 spin_lock_bh(&ctrl->queue_lock);
283 ctrl->stats.sw_q_depth++;
284 list_add(&dma->list, &ctrl->queue);
285 spin_unlock_bh(&ctrl->queue_lock);
288 static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
295 dev_dbg(CARD_TO_DEV(ctrl->card),
300 ctrl->stats.crc_errors++;
302 ctrl->stats.hard_errors++;
304 ctrl->stats.soft_errors++;
309 if (ctrl->card->scrub_hard) {
312 ctrl->stats.reads_retried++;
315 ctrl->stats.reads_failed++;
319 ctrl->stats.reads_failed++;
327 ctrl->stats.reads_failed++;
333 ctrl->stats.writes_failed++;
338 ctrl->stats.discards_failed++;
342 dev_err(CARD_TO_DEV(ctrl->card),
352 rsxx_requeue_dma(ctrl, dma);
354 rsxx_complete_dma(ctrl, dma, status);
359 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
362 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
363 unlikely(ctrl->card->eeh_state))
366 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
371 dev_warn(CARD_TO_DEV(ctrl->card),
373 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
374 mod_timer(&ctrl->activity_timer,
377 dev_warn(CARD_TO_DEV(ctrl->card),
379 ctrl->id);
380 ctrl->card->dma_fault = 1;
383 spin_lock(&ctrl->queue_lock);
384 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
385 spin_unlock(&ctrl->queue_lock);
387 cnt += rsxx_dma_cancel(ctrl);
390 dev_info(CARD_TO_DEV(ctrl->card),
392 cnt, ctrl->id);
396 static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
404 hw_cmd_buf = ctrl->cmd.buf;
406 if (unlikely(ctrl->card->halt) ||
407 unlikely(ctrl->card->eeh_state))
411 spin_lock_bh(&ctrl->queue_lock);
412 if (list_empty(&ctrl->queue)) {
413 spin_unlock_bh(&ctrl->queue_lock);
416 spin_unlock_bh(&ctrl->queue_lock);
418 tag = pop_tracker(ctrl->trackers);
422 spin_lock_bh(&ctrl->queue_lock);
423 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
425 ctrl->stats.sw_q_depth--;
426 spin_unlock_bh(&ctrl->queue_lock);
433 if (unlikely(ctrl->card->dma_fault)) {
434 push_tracker(ctrl->trackers, tag);
435 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
455 dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
457 if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
458 push_tracker(ctrl->trackers, tag);
459 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
464 set_tracker_dma(ctrl->trackers, tag, dma);
465 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
466 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
467 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
468 hw_cmd_buf[ctrl->cmd.idx].sub_page =
472 hw_cmd_buf[ctrl->cmd.idx].device_addr =
475 hw_cmd_buf[ctrl->cmd.idx].host_addr =
478 dev_dbg(CARD_TO_DEV(ctrl->card),
480 ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
482 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
486 ctrl->stats.writes_issued++;
488 ctrl->stats.discards_issued++;
490 ctrl->stats.reads_issued++;
495 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
496 mod_timer(&ctrl->activity_timer,
499 if (unlikely(ctrl->card->eeh_state)) {
500 del_timer_sync(&ctrl->activity_timer);
504 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
508 static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
517 hw_st_buf = ctrl->status.buf;
519 if (unlikely(ctrl->card->halt) ||
520 unlikely(ctrl->card->dma_fault) ||
521 unlikely(ctrl->card->eeh_state))
524 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
526 while (count == ctrl->e_cnt) {
536 status = hw_st_buf[ctrl->status.idx].status;
537 tag = hw_st_buf[ctrl->status.idx].tag;
539 dma = get_tracker_dma(ctrl->trackers, tag);
541 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
542 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
543 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
545 dev_err(CARD_TO_DEV(ctrl->card),
548 tag, ctrl->status.idx, ctrl->id);
552 dev_dbg(CARD_TO_DEV(ctrl->card),
555 ctrl->id, dma->laddr, tag, status, count,
556 ctrl->status.idx);
558 atomic_dec(&ctrl->stats.hw_q_depth);
560 mod_timer(&ctrl->activity_timer,
564 rsxx_handle_dma_error(ctrl, dma, status);
566 rsxx_complete_dma(ctrl, dma, 0);
568 push_tracker(ctrl->trackers, tag);
570 ctrl->status.idx = (ctrl->status.idx + 1) &
572 ctrl->e_cnt++;
574 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
577 dma_intr_coal_auto_tune(ctrl->card);
579 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
580 del_timer_sync(&ctrl->activity_timer);
582 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
583 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
584 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
586 spin_lock_bh(&ctrl->queue_lock);
587 if (ctrl->stats.sw_q_depth)
588 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
589 spin_unlock_bh(&ctrl->queue_lock);
594 struct rsxx_dma_ctrl *ctrl;
596 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
598 mutex_lock(&ctrl->work_lock);
599 rsxx_issue_dmas(ctrl);
600 mutex_unlock(&ctrl->work_lock);
605 struct rsxx_dma_ctrl *ctrl;
607 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
609 mutex_lock(&ctrl->work_lock);
610 rsxx_dma_done(ctrl);
611 mutex_unlock(&ctrl->work_lock);
756 spin_lock_bh(&card->ctrl[i].queue_lock);
757 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
758 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
759 spin_unlock_bh(&card->ctrl[i].queue_lock);
761 queue_work(card->ctrl[i].issue_wq,
762 &card->ctrl[i].issue_dma_work);
770 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
778 int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
780 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
781 &ctrl->status.dma_addr);
782 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
783 &ctrl->cmd.dma_addr);
784 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
787 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
788 iowrite32(lower_32_bits(ctrl->status.dma_addr),
789 ctrl->regmap + SB_ADD_LO);
790 iowrite32(upper_32_bits(ctrl->status.dma_addr),
791 ctrl->regmap + SB_ADD_HI);
793 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
794 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
795 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
797 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
798 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
800 ctrl->status.idx);
803 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
804 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
806 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
807 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
809 ctrl->status.idx);
812 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
813 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
819 struct rsxx_dma_ctrl *ctrl)
824 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
826 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
827 if (!ctrl->trackers)
830 ctrl->trackers->head = 0;
832 ctrl->trackers->list[i].next_tag = i + 1;
833 ctrl->trackers->list[i].dma = NULL;
835 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
836 spin_lock_init(&ctrl->trackers->lock);
838 spin_lock_init(&ctrl->queue_lock);
839 mutex_init(&ctrl->work_lock);
840 INIT_LIST_HEAD(&ctrl->queue);
842 setup_timer(&ctrl->activity_timer, dma_engine_stalled,
843 (unsigned long)ctrl);
845 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
846 if (!ctrl->issue_wq)
849 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
850 if (!ctrl->done_wq)
853 INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
854 INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
856 st = rsxx_hw_buffers_init(dev, ctrl);
918 card->ctrl[i].regmap = card->regmap + (i * 4096);
927 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
931 card->ctrl[i].card = card;
932 card->ctrl[i].id = i;
951 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
953 if (ctrl->issue_wq) {
954 destroy_workqueue(ctrl->issue_wq);
955 ctrl->issue_wq = NULL;
958 if (ctrl->done_wq) {
959 destroy_workqueue(ctrl->done_wq);
960 ctrl->done_wq = NULL;
963 if (ctrl->trackers)
964 vfree(ctrl->trackers);
966 if (ctrl->status.buf)
968 ctrl->status.buf,
969 ctrl->status.dma_addr);
970 if (ctrl->cmd.buf)
972 ctrl->cmd.buf, ctrl->cmd.dma_addr);
978 int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
986 dma = get_tracker_dma(ctrl->trackers, i);
988 atomic_dec(&ctrl->stats.hw_q_depth);
989 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
990 push_tracker(ctrl->trackers, i);
1000 struct rsxx_dma_ctrl *ctrl;
1004 ctrl = &card->ctrl[i];
1006 if (ctrl->issue_wq) {
1007 destroy_workqueue(ctrl->issue_wq);
1008 ctrl->issue_wq = NULL;
1011 if (ctrl->done_wq) {
1012 destroy_workqueue(ctrl->done_wq);
1013 ctrl->done_wq = NULL;
1016 if (timer_pending(&ctrl->activity_timer))
1017 del_timer_sync(&ctrl->activity_timer);
1020 spin_lock_bh(&ctrl->queue_lock);
1021 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
1022 spin_unlock_bh(&ctrl->queue_lock);
1024 rsxx_dma_cancel(ctrl);
1026 vfree(ctrl->trackers);
1029 ctrl->status.buf, ctrl->status.dma_addr);
1031 ctrl->cmd.buf, ctrl->cmd.dma_addr);
1052 dma = get_tracker_dma(card->ctrl[i].trackers, j);
1057 card->ctrl[i].stats.writes_issued--;
1059 card->ctrl[i].stats.discards_issued--;
1061 card->ctrl[i].stats.reads_issued--;
1072 push_tracker(card->ctrl[i].trackers, j);
1076 spin_lock_bh(&card->ctrl[i].queue_lock);
1077 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1079 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1080 card->ctrl[i].stats.sw_q_depth += cnt;
1081 card->ctrl[i].e_cnt = 0;
1082 spin_unlock_bh(&card->ctrl[i].queue_lock);