Lines Matching refs:dcb

230 	struct DeviceCtlBlk *dcb;
270 struct list_head list; /* next/prev ptrs for the dcb list */
302 struct list_head dcb_list; /* head of going dcb list */
378 static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
382 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
390 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
392 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
395 struct DeviceCtlBlk *dcb);
717 /* find supplied dcb and then select the next one */
736 static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
739 dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
778 static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
782 srb->cmd, dcb->target_id, dcb->target_lun, srb);
783 list_add(&srb->list, &dcb->srb_waiting_list);
787 static void srb_waiting_append(struct DeviceCtlBlk *dcb,
791 srb->cmd, dcb->target_id, dcb->target_lun, srb);
792 list_add_tail(&srb->list, &dcb->srb_waiting_list);
796 static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
799 srb->cmd, dcb->target_id, dcb->target_lun, srb);
800 list_add_tail(&srb->list, &dcb->srb_going_list);
804 static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
809 srb->cmd, dcb->target_id, dcb->target_lun, srb);
811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
819 static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
825 srb->cmd, dcb->target_id, dcb->target_lun, srb);
827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
835 static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
840 srb->cmd, dcb->target_id, dcb->target_lun, srb);
841 list_move(&srb->list, &dcb->srb_waiting_list);
845 static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
850 srb->cmd, dcb->target_id, dcb->target_lun, srb);
851 list_move(&srb->list, &dcb->srb_going_list);
877 struct DeviceCtlBlk *dcb;
892 * Find the starting dcb. Need to find it again in the list
895 list_for_each_entry(dcb, dcb_list_head, list)
896 if (dcb == acb->dcb_run_robin) {
897 start = dcb;
908 * Loop over the dcb, but we start somewhere (potentially) in
921 /* move to next dcb */
961 struct DeviceCtlBlk *dcb = srb->dcb;
963 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
966 srb_waiting_append(dcb, srb);
971 if (!start_scsi(acb, dcb, srb))
972 srb_going_append(dcb, srb);
974 srb_waiting_insert(dcb, srb);
980 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
986 cmd, dcb->target_id, dcb->target_lun);
988 srb->dcb = dcb;
1045 if (dcb->sync_period & WIDE_SYNC &&
1051 srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
1085 struct DeviceCtlBlk *dcb;
1110 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1111 if (!dcb) {
1133 build_srb(cmd, dcb, srb);
1135 if (!list_empty(&dcb->srb_waiting_list)) {
1137 srb_waiting_append(dcb, srb);
1192 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1197 if (!dcb)
1198 dcb = acb->active_dcb;
1199 if (!srb && dcb)
1200 srb = dcb->active_srb;
1273 struct DeviceCtlBlk *dcb;
1277 list_for_each_entry(dcb, &acb->dcb_list, list) {
1280 dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
1281 dcb->sync_period = 0;
1282 dcb->sync_offset = 0;
1284 dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
1285 period_index = eeprom->target[dcb->target_id].period & 0x07;
1286 dcb->min_nego_period = clock_period[period_index];
1287 if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
1289 dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
1369 struct DeviceCtlBlk *dcb;
1374 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1375 if (!dcb) {
1380 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1382 srb_waiting_remove(dcb, srb);
1385 free_tag(dcb, srb);
1391 srb = find_cmd(cmd, &dcb->srb_going_list);
1403 static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1414 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
1415 dcb->sync_offset = 0;
1416 dcb->min_nego_period = 200 >> 2;
1417 } else if (dcb->sync_offset == 0)
1418 dcb->sync_offset = SYNC_NEGO_OFFSET;
1423 *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
1424 *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
1431 static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1434 u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
1492 static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1499 dcb->target_id, dcb->target_lun, srb);
1546 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
1547 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
1548 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
1551 identify_message = dcb->identify_msg;
1560 && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
1561 && !(dcb->sync_mode & WIDE_NEGO_DONE))
1562 || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
1563 && !(dcb->sync_mode & SYNC_NEGO_DONE)))
1564 && (dcb->target_lun == 0)) {
1570 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1571 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1572 build_wdtr(acb, dcb, srb);
1576 if (dcb->sync_mode & SYNC_NEGO_ENABLE
1577 && dcb->inquiry7 & SCSI_INQ_SYNC) {
1578 build_sdtr(acb, dcb, srb);
1581 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1582 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1583 build_wdtr(acb, dcb, srb);
1594 if ((dcb->sync_mode & EN_TAG_QUEUEING)
1599 while (tag_mask & dcb->tag_mask
1600 && tag_number < dcb->max_command) {
1604 if (tag_number >= dcb->max_command) {
1617 dcb->tag_mask |= tag_mask;
1630 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1650 srb->cmd, dcb->target_id, dcb->target_lun);
1652 free_tag(dcb, srb);
1662 dcb->active_srb = srb;
1663 acb->active_dcb = dcb;
1702 struct DeviceCtlBlk *dcb;
1743 dcb = acb->active_dcb;
1744 if (!dcb) {
1750 srb = dcb->active_srb;
1751 if (dcb->flag & ABORT_DEV_) {
1898 struct DeviceCtlBlk *dcb;
1913 dcb = acb->active_dcb;
1915 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1978 pci_dma_sync_single_for_device(srb->dcb->
2040 struct DeviceCtlBlk *dcb = srb->dcb;
2086 if (dcb->sync_period & WIDE_SYNC)
2093 (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2116 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
2144 (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
2244 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2251 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2268 (srb->dcb->sync_period & WIDE_SYNC) ?
2271 if (srb->dcb->sync_period & WIDE_SYNC)
2312 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2349 if (srb->dcb->sync_period & WIDE_SYNC)
2405 struct DeviceCtlBlk *dcb = srb->dcb;
2428 dump_register_info(acb, dcb, srb);
2488 if (srb->dcb->sync_period & WIDE_SYNC)
2518 if (srb->dcb->sync_period & WIDE_SYNC) {
2545 if (dcb->sync_period & WIDE_SYNC) {
2628 srb->dcb->target_id, srb->dcb->target_lun);
2633 struct DeviceCtlBlk *dcb, u8 tag)
2640 if (!(dcb->tag_mask & (1 << tag)))
2643 dcb->tag_mask, tag);
2645 if (list_empty(&dcb->srb_going_list))
2647 list_for_each_entry(i, &dcb->srb_going_list, list) {
2657 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2658 if (dcb->flag & ABORT_DEV_) {
2666 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2667 srb->state |= dcb->active_srb->state;
2669 dcb->active_srb = srb;
2676 dcb->active_srb = srb;
2686 struct DeviceCtlBlk *dcb)
2688 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
2689 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
2690 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
2691 set_xfer_rate(acb, dcb);
2698 struct DeviceCtlBlk *dcb = srb->dcb;
2700 dcb->target_id, dcb->target_lun);
2702 dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
2703 dcb->sync_mode |= SYNC_NEGO_DONE;
2704 /*dcb->sync_period &= 0; */
2705 dcb->sync_offset = 0;
2706 dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
2708 reprogram_regs(acb, dcb);
2709 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2710 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2711 build_wdtr(acb, dcb, srb);
2721 struct DeviceCtlBlk *dcb = srb->dcb;
2726 dcb->target_id, srb->msgin_buf[3] << 2,
2733 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
2734 dcb->sync_offset = 0;
2735 else if (dcb->sync_offset == 0)
2736 dcb->sync_offset = srb->msgin_buf[4];
2737 if (srb->msgin_buf[4] > dcb->sync_offset)
2738 srb->msgin_buf[4] = dcb->sync_offset;
2740 dcb->sync_offset = srb->msgin_buf[4];
2743 || dcb->min_nego_period >
2751 dcb->sync_period &= 0xf0;
2752 dcb->sync_period |= ALT_SYNC | bval;
2753 dcb->min_nego_period = srb->msgin_buf[3];
2755 if (dcb->sync_period & WIDE_SYNC)
2762 dcb->target_id, (fact == 500) ? "Wide16" : "",
2763 dcb->min_nego_period << 2, dcb->sync_offset,
2764 (fact / dcb->min_nego_period),
2765 ((fact % dcb->min_nego_period) * 10 +
2766 dcb->min_nego_period / 2) / dcb->min_nego_period);
2776 dcb->sync_mode |= SYNC_NEGO_DONE;
2778 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2779 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2780 build_wdtr(acb, dcb, srb);
2786 dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
2788 reprogram_regs(acb, dcb);
2795 struct DeviceCtlBlk *dcb = srb->dcb;
2796 dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
2798 dcb->sync_period &= ~WIDE_SYNC;
2799 dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
2800 dcb->sync_mode |= WIDE_NEGO_DONE;
2802 reprogram_regs(acb, dcb);
2803 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2804 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2805 build_sdtr(acb, dcb, srb);
2813 struct DeviceCtlBlk *dcb = srb->dcb;
2814 u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
2816 dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
2824 dcb->target_id);
2831 dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
2833 dcb->sync_period |= WIDE_SYNC;
2835 dcb->sync_period &= ~WIDE_SYNC;
2837 /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
2840 (8 << srb->msgin_buf[3]), dcb->target_id);
2841 reprogram_regs(acb, dcb);
2842 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2843 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2844 build_sdtr(acb, dcb, srb);
2866 struct DeviceCtlBlk *dcb = acb->active_dcb;
2881 msgin_qtag(acb, dcb,
2945 srb->cmd, dcb->target_id,
2946 dcb->target_lun);
2947 dcb->flag |= ABORT_DEV_;
2956 srb->msgout_buf[0] = dcb->identify_msg;
3002 static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
3007 if (dcb->identify_msg & 0x07)
3011 current_sync_offset = dcb->sync_offset;
3016 if (i->target_id == dcb->target_id) {
3017 i->sync_period = dcb->sync_period;
3018 i->sync_offset = dcb->sync_offset;
3019 i->sync_mode = dcb->sync_mode;
3020 i->min_nego_period = dcb->min_nego_period;
3027 struct DeviceCtlBlk *dcb = acb->active_dcb;
3030 if (!dcb) {
3041 srb = dcb->active_srb;
3051 dcb->target_id, dcb->target_lun);
3055 dcb->flag &= ~ABORT_DEV_;
3081 dcb->target_id, dcb->target_lun);
3088 free_tag(dcb, srb);
3089 srb_going_to_waiting_move(dcb, srb);
3112 free_tag(dcb, srb);
3113 dcb->active_srb = NULL;
3115 srb_done(acb, dcb, srb);
3123 struct DeviceCtlBlk *dcb = acb->active_dcb;
3134 if (dcb) { /* Arbitration lost but Reselection win */
3135 srb = dcb->active_srb;
3146 srb->cmd, dcb->target_id,
3147 dcb->target_lun, rsel_tar_lun_id,
3153 free_tag(dcb, srb);
3154 srb_going_to_waiting_move(dcb, srb);
3166 dcb = find_dcb(acb, id, lun);
3167 if (!dcb) {
3173 acb->active_dcb = dcb;
3175 if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
3178 dcb->target_id, dcb->target_lun);
3180 if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
3182 dcb->active_srb = srb;
3185 srb = dcb->active_srb;
3192 dcb->target_id, dcb->target_lun);
3195 dcb->active_srb = srb;
3198 if (dcb->flag & ABORT_DEV_) {
3209 dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
3211 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
3212 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
3213 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
3236 static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
3241 && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
3242 /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
3243 /* ((dcb->dev_type == TYPE_DISK)
3244 || (dcb->dev_type == TYPE_MOD)) && */
3246 if (dcb->max_command == 1)
3247 dcb->max_command =
3248 dcb->acb->tag_max_num;
3249 dcb->sync_mode |= EN_TAG_QUEUEING;
3250 /*dcb->tag_mask = 0; */
3252 dcb->max_command = 1;
3257 static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3261 dcb->dev_type = bval1;
3263 disc_tagq_set(dcb, ptr);
3312 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3340 cmd->cmnd[0], dcb->target_id,
3341 dcb->target_lun, status, acb->scan_devices);
3346 cmd->cmnd[0], dcb->target_id,
3347 dcb->target_lun, status, acb->scan_devices);
3352 cmd->cmnd[0], dcb->target_id,
3353 dcb->target_lun, status, acb->scan_devices);
3358 cmd->cmnd[0], dcb->target_id,
3359 dcb->target_lun, status, acb->scan_devices);
3364 cmd->cmnd[0], dcb->target_id,
3365 dcb->target_lun, status, acb->scan_devices);
3407 request_sense(acb, dcb, srb);
3410 tempcnt = (u8)list_size(&dcb->srb_going_list);
3412 dcb->target_id, dcb->target_lun, tempcnt);
3415 dcb->max_command = tempcnt;
3416 free_tag(dcb, srb);
3417 srb_going_to_waiting_move(dcb, srb);
3475 dcb->inquiry7 = ptr->Flags;
3482 if (!dcb->init_tcq_flag) {
3483 add_dev(acb, dcb, ptr);
3484 dcb->init_tcq_flag = 1;
3505 srb_going_remove(dcb, srb);
3525 struct DeviceCtlBlk *dcb;
3528 list_for_each_entry(dcb, &acb->dcb_list, list) {
3533 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3542 srb_going_remove(dcb, srb);
3543 free_tag(dcb, srb);
3554 if (!list_empty(&dcb->srb_going_list))
3557 dcb->target_id, dcb->target_lun);
3558 if (dcb->tag_mask)
3561 dcb->target_id, dcb->target_lun,
3562 dcb->tag_mask);
3565 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3572 srb_waiting_remove(dcb, srb);
3583 if (!list_empty(&dcb->srb_waiting_list))
3585 list_size(&dcb->srb_waiting_list), dcb->target_id,
3586 dcb->target_lun);
3587 dcb->flag &= ~ABORT_DEV_;
3675 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3708 if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
3711 srb->cmd, dcb->target_id, dcb->target_lun);
3712 srb_going_to_waiting_move(dcb, srb);
3736 struct DeviceCtlBlk *dcb;
3738 dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
3740 if (!dcb)
3742 dcb->acb = NULL;
3743 INIT_LIST_HEAD(&dcb->srb_going_list);
3744 INIT_LIST_HEAD(&dcb->srb_waiting_list);
3745 dcb->active_srb = NULL;
3746 dcb->tag_mask = 0;
3747 dcb->max_command = 1;
3748 dcb->target_id = target;
3749 dcb->target_lun = lun;
3751 dcb->identify_msg =
3752 IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
3754 dcb->identify_msg = IDENTIFY(0, lun);
3756 dcb->dev_mode = eeprom->target[target].cfg0;
3757 dcb->inquiry7 = 0;
3758 dcb->sync_mode = 0;
3759 dcb->min_nego_period = clock_period[period_index];
3760 dcb->sync_period = 0;
3761 dcb->sync_offset = 0;
3762 dcb->flag = 0;
3765 if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
3767 dcb->sync_mode |= WIDE_NEGO_ENABLE;
3770 if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
3772 dcb->sync_mode |= SYNC_NEGO_ENABLE;
3774 if (dcb->target_lun != 0) {
3778 if (p->target_id == dcb->target_id)
3782 dcb->target_id, dcb->target_lun,
3784 dcb->sync_mode = p->sync_mode;
3785 dcb->sync_period = p->sync_period;
3786 dcb->min_nego_period = p->min_nego_period;
3787 dcb->sync_offset = p->sync_offset;
3788 dcb->inquiry7 = p->inquiry7;
3790 return dcb;
3798 * @dcb: A newly created and initialised device instance to add.
3801 struct DeviceCtlBlk *dcb)
3804 dcb->acb = acb;
3808 acb->dcb_run_robin = dcb;
3811 list_add_tail(&dcb->list, &acb->dcb_list);
3814 acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
3815 acb->children[dcb->target_id][dcb->target_lun] = dcb;
3826 * @dcb: A device that has previously been added to the adapter.
3829 struct DeviceCtlBlk *dcb)
3834 dcb->target_id, dcb->target_lun);
3837 if (acb->active_dcb == dcb)
3839 if (acb->dcb_run_robin == dcb)
3840 acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
3844 if (dcb == i) {
3850 acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
3851 acb->children[dcb->target_id][dcb->target_lun] = NULL;
3852 dcb->acb = NULL;
3861 * @dcb: A device that has previously been added to the adapter.
3864 struct DeviceCtlBlk *dcb)
3866 if (list_size(&dcb->srb_going_list) > 1) {
3869 dcb->target_id, dcb->target_lun,
3870 list_size(&dcb->srb_going_list));
3873 adapter_remove_device(acb, dcb);
3874 kfree(dcb);
3886 struct DeviceCtlBlk *dcb;
3891 list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
3892 adapter_remove_and_free_device(acb, dcb);
3906 struct DeviceCtlBlk *dcb;
3908 dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
3909 if (!dcb)
3911 adapter_add_device(acb, dcb);
3926 struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
3927 if (dcb)
3928 adapter_remove_and_free_device(acb, dcb);
4548 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4632 struct DeviceCtlBlk *dcb;
4677 list_for_each_entry(dcb, &acb->dcb_list, list) {
4679 SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
4680 dcb->target_lun);
4681 YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
4682 YESNO(dcb->sync_offset);
4683 YESNO(dcb->sync_period & WIDE_SYNC);
4684 YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
4685 YESNO(dcb->dev_mode & NTC_DO_SEND_START);
4686 YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
4687 nego_period = clock_period[dcb->sync_period & 0x07] << 2;
4688 if (dcb->sync_offset)
4691 SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
4693 if (dcb->sync_offset & 0x0f) {
4698 (dcb->sync_offset & 0x0f));
4703 SPRINTF(" %02i\n", dcb->max_command);
4712 list_for_each_entry(dcb, &acb->dcb_list, list) {
4714 if (!list_empty(&dcb->srb_waiting_list))
4716 dcb->target_id, dcb->target_lun,
4717 list_size(&dcb->srb_waiting_list));
4718 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4720 if (!list_empty(&dcb->srb_going_list))
4722 dcb->target_id, dcb->target_lun,
4723 list_size(&dcb->srb_going_list));
4724 list_for_each_entry(srb, &dcb->srb_going_list, list)
4726 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4732 list_for_each_entry(dcb, &acb->dcb_list, list) {
4733 SPRINTF("%p -> ", dcb);