Lines Matching refs:shost

304 	struct Scsi_Host *shost = sdev->host;
308 spin_lock_irqsave(shost->host_lock, flags);
309 shost->host_busy--;
311 if (unlikely(scsi_host_in_recovery(shost) &&
312 (shost->host_failed || shost->host_eh_scheduled)))
313 scsi_eh_wakeup(shost);
314 spin_unlock(shost->host_lock);
329 struct Scsi_Host *shost = current_sdev->host;
334 spin_lock_irqsave(shost->host_lock, flags);
336 spin_unlock_irqrestore(shost->host_lock, flags);
346 spin_lock_irqsave(shost->host_lock, flags);
356 spin_unlock_irqrestore(shost->host_lock, flags);
358 spin_lock_irqsave(shost->host_lock, flags);
363 spin_unlock_irqrestore(shost->host_lock, flags);
381 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
383 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
384 shost->host_blocked || shost->host_self_blocked)
405 struct Scsi_Host *shost;
413 shost = sdev->host;
417 spin_lock_irqsave(shost->host_lock, flags);
418 list_splice_init(&shost->starved_list, &starved_list);
422 * As long as shost is accepting commands and we have
431 if (scsi_host_is_busy(shost))
439 &shost->starved_list);
443 spin_unlock(shost->host_lock);
447 spin_lock(shost->host_lock);
450 list_splice(&starved_list, &shost->starved_list);
451 spin_unlock_irqrestore(shost->host_lock, flags);
512 void scsi_run_host_queues(struct Scsi_Host *shost)
516 shost_for_each_device(sdev, shost)
1296 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1320 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1328 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1335 struct Scsi_Host *shost,
1338 if (scsi_host_in_recovery(shost))
1340 if (shost->host_busy == 0 && shost->host_blocked) {
1344 if (--shost->host_blocked == 0) {
1347 shost->host_no));
1352 if (scsi_host_is_busy(shost)) {
1354 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1369 * shost/starget/sdev, since the returned value is not guaranteed and
1380 struct Scsi_Host *shost;
1385 shost = sdev->host;
1393 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1407 struct Scsi_Host *shost;
1415 shost = sdev->host;
1427 spin_lock(shost->host_lock);
1428 shost->host_busy++;
1430 spin_unlock(shost->host_lock);
1489 struct Scsi_Host *shost;
1507 shost = sdev->host;
1544 spin_lock(shost->host_lock);
1557 &shost->starved_list);
1561 if (!scsi_target_queue_ready(shost, sdev))
1564 if (!scsi_host_queue_ready(q, shost, sdev))
1568 shost->host_busy++;
1574 spin_unlock_irq(shost->host_lock);
1594 spin_unlock_irq(shost->host_lock);
1618 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1623 if (shost->unchecked_isa_dma)
1632 host_dev = scsi_get_device(shost);
1640 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1644 struct device *dev = shost->dma_dev;
1653 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1656 if (scsi_host_prot_dma(shost)) {
1657 shost->sg_prot_tablesize =
1658 min_not_zero(shost->sg_prot_tablesize,
1660 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1661 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1664 blk_queue_max_hw_sectors(q, shost->max_sectors);
1665 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1666 blk_queue_segment_boundary(q, shost->dma_boundary);
1667 dma_set_seg_boundary(dev, shost->dma_boundary);
1671 if (!shost->use_clustering)
1720 * Arguments: shost - Host in question
1730 void scsi_block_requests(struct Scsi_Host *shost)
1732 shost->host_self_blocked = 1;
1742 * Arguments: shost - Host in question
1756 void scsi_unblock_requests(struct Scsi_Host *shost)
1758 shost->host_self_blocked = 0;
1759 scsi_run_host_queues(shost);