scsi_sysfs.c revision db5bd1e0b505c54ff492172ce4abc245cf6cd639
1/*
2 * scsi_sysfs.c
3 *
4 * SCSI sysfs interface routines.
5 *
6 * Created to pull SCSI mid layer sysfs routines into one file.
7 */
8
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/device.h>
14
15#include <scsi/scsi.h>
16#include <scsi/scsi_device.h>
17#include <scsi/scsi_host.h>
18#include <scsi/scsi_tcq.h>
19#include <scsi/scsi_transport.h>
20#include <scsi/scsi_driver.h>
21
22#include "scsi_priv.h"
23#include "scsi_logging.h"
24
25static struct device_type scsi_dev_type;
26
27static const struct {
28	enum scsi_device_state	value;
29	char			*name;
30} sdev_states[] = {
31	{ SDEV_CREATED, "created" },
32	{ SDEV_RUNNING, "running" },
33	{ SDEV_CANCEL, "cancel" },
34	{ SDEV_DEL, "deleted" },
35	{ SDEV_QUIESCE, "quiesce" },
36	{ SDEV_OFFLINE,	"offline" },
37	{ SDEV_BLOCK,	"blocked" },
38	{ SDEV_CREATED_BLOCK, "created-blocked" },
39};
40
41const char *scsi_device_state_name(enum scsi_device_state state)
42{
43	int i;
44	char *name = NULL;
45
46	for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
47		if (sdev_states[i].value == state) {
48			name = sdev_states[i].name;
49			break;
50		}
51	}
52	return name;
53}
54
55static const struct {
56	enum scsi_host_state	value;
57	char			*name;
58} shost_states[] = {
59	{ SHOST_CREATED, "created" },
60	{ SHOST_RUNNING, "running" },
61	{ SHOST_CANCEL, "cancel" },
62	{ SHOST_DEL, "deleted" },
63	{ SHOST_RECOVERY, "recovery" },
64	{ SHOST_CANCEL_RECOVERY, "cancel/recovery" },
65	{ SHOST_DEL_RECOVERY, "deleted/recovery", },
66};
67const char *scsi_host_state_name(enum scsi_host_state state)
68{
69	int i;
70	char *name = NULL;
71
72	for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
73		if (shost_states[i].value == state) {
74			name = shost_states[i].name;
75			break;
76		}
77	}
78	return name;
79}
80
81static int check_set(unsigned int *val, char *src)
82{
83	char *last;
84
85	if (strncmp(src, "-", 20) == 0) {
86		*val = SCAN_WILD_CARD;
87	} else {
88		/*
89		 * Doesn't check for int overflow
90		 */
91		*val = simple_strtoul(src, &last, 0);
92		if (*last != '\0')
93			return 1;
94	}
95	return 0;
96}
97
98static int scsi_scan(struct Scsi_Host *shost, const char *str)
99{
100	char s1[15], s2[15], s3[15], junk;
101	unsigned int channel, id, lun;
102	int res;
103
104	res = sscanf(str, "%10s %10s %10s %c", s1, s2, s3, &junk);
105	if (res != 3)
106		return -EINVAL;
107	if (check_set(&channel, s1))
108		return -EINVAL;
109	if (check_set(&id, s2))
110		return -EINVAL;
111	if (check_set(&lun, s3))
112		return -EINVAL;
113	if (shost->transportt->user_scan)
114		res = shost->transportt->user_scan(shost, channel, id, lun);
115	else
116		res = scsi_scan_host_selected(shost, channel, id, lun, 1);
117	return res;
118}
119
120/*
121 * shost_show_function: macro to create an attr function that can be used to
122 * show a non-bit field.
123 */
124#define shost_show_function(name, field, format_string)			\
125static ssize_t								\
126show_##name (struct device *dev, struct device_attribute *attr, 	\
127	     char *buf)							\
128{									\
129	struct Scsi_Host *shost = class_to_shost(dev);			\
130	return snprintf (buf, 20, format_string, shost->field);		\
131}
132
133/*
134 * shost_rd_attr: macro to create a function and attribute variable for a
135 * read only field.
136 */
137#define shost_rd_attr2(name, field, format_string)			\
138	shost_show_function(name, field, format_string)			\
139static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
140
141#define shost_rd_attr(field, format_string) \
142shost_rd_attr2(field, field, format_string)
143
144/*
145 * Create the actual show/store functions and data structures.
146 */
147
148static ssize_t
149store_scan(struct device *dev, struct device_attribute *attr,
150	   const char *buf, size_t count)
151{
152	struct Scsi_Host *shost = class_to_shost(dev);
153	int res;
154
155	res = scsi_scan(shost, buf);
156	if (res == 0)
157		res = count;
158	return res;
159};
160static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
161
162static ssize_t
163store_shost_state(struct device *dev, struct device_attribute *attr,
164		  const char *buf, size_t count)
165{
166	int i;
167	struct Scsi_Host *shost = class_to_shost(dev);
168	enum scsi_host_state state = 0;
169
170	for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
171		const int len = strlen(shost_states[i].name);
172		if (strncmp(shost_states[i].name, buf, len) == 0 &&
173		   buf[len] == '\n') {
174			state = shost_states[i].value;
175			break;
176		}
177	}
178	if (!state)
179		return -EINVAL;
180
181	if (scsi_host_set_state(shost, state))
182		return -EINVAL;
183	return count;
184}
185
186static ssize_t
187show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
188{
189	struct Scsi_Host *shost = class_to_shost(dev);
190	const char *name = scsi_host_state_name(shost->shost_state);
191
192	if (!name)
193		return -EINVAL;
194
195	return snprintf(buf, 20, "%s\n", name);
196}
197
198/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
199struct device_attribute dev_attr_hstate =
200	__ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
201
202static ssize_t
203show_shost_mode(unsigned int mode, char *buf)
204{
205	ssize_t len = 0;
206
207	if (mode & MODE_INITIATOR)
208		len = sprintf(buf, "%s", "Initiator");
209
210	if (mode & MODE_TARGET)
211		len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target");
212
213	len += sprintf(buf + len, "\n");
214
215	return len;
216}
217
218static ssize_t
219show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
220			  char *buf)
221{
222	struct Scsi_Host *shost = class_to_shost(dev);
223	unsigned int supported_mode = shost->hostt->supported_mode;
224
225	if (supported_mode == MODE_UNKNOWN)
226		/* by default this should be initiator */
227		supported_mode = MODE_INITIATOR;
228
229	return show_shost_mode(supported_mode, buf);
230}
231
232static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
233
234static ssize_t
235show_shost_active_mode(struct device *dev,
236		       struct device_attribute *attr, char *buf)
237{
238	struct Scsi_Host *shost = class_to_shost(dev);
239
240	if (shost->active_mode == MODE_UNKNOWN)
241		return snprintf(buf, 20, "unknown\n");
242	else
243		return show_shost_mode(shost->active_mode, buf);
244}
245
246static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
247
248shost_rd_attr(unique_id, "%u\n");
249shost_rd_attr(host_busy, "%hu\n");
250shost_rd_attr(cmd_per_lun, "%hd\n");
251shost_rd_attr(can_queue, "%hd\n");
252shost_rd_attr(sg_tablesize, "%hu\n");
253shost_rd_attr(unchecked_isa_dma, "%d\n");
254shost_rd_attr(prot_capabilities, "%u\n");
255shost_rd_attr(prot_guard_type, "%hd\n");
256shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
257
258static struct attribute *scsi_sysfs_shost_attrs[] = {
259	&dev_attr_unique_id.attr,
260	&dev_attr_host_busy.attr,
261	&dev_attr_cmd_per_lun.attr,
262	&dev_attr_can_queue.attr,
263	&dev_attr_sg_tablesize.attr,
264	&dev_attr_unchecked_isa_dma.attr,
265	&dev_attr_proc_name.attr,
266	&dev_attr_scan.attr,
267	&dev_attr_hstate.attr,
268	&dev_attr_supported_mode.attr,
269	&dev_attr_active_mode.attr,
270	&dev_attr_prot_capabilities.attr,
271	&dev_attr_prot_guard_type.attr,
272	NULL
273};
274
275struct attribute_group scsi_shost_attr_group = {
276	.attrs =	scsi_sysfs_shost_attrs,
277};
278
279const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
280	&scsi_shost_attr_group,
281	NULL
282};
283
284static void scsi_device_cls_release(struct device *class_dev)
285{
286	struct scsi_device *sdev;
287
288	sdev = class_to_sdev(class_dev);
289	put_device(&sdev->sdev_gendev);
290}
291
292static void scsi_device_dev_release_usercontext(struct work_struct *work)
293{
294	struct scsi_device *sdev;
295	struct device *parent;
296	struct scsi_target *starget;
297	struct list_head *this, *tmp;
298	unsigned long flags;
299
300	sdev = container_of(work, struct scsi_device, ew.work);
301
302	parent = sdev->sdev_gendev.parent;
303	starget = to_scsi_target(parent);
304
305	spin_lock_irqsave(sdev->host->host_lock, flags);
306	starget->reap_ref++;
307	list_del(&sdev->siblings);
308	list_del(&sdev->same_target_siblings);
309	list_del(&sdev->starved_entry);
310	spin_unlock_irqrestore(sdev->host->host_lock, flags);
311
312	cancel_work_sync(&sdev->event_work);
313
314	list_for_each_safe(this, tmp, &sdev->event_list) {
315		struct scsi_event *evt;
316
317		evt = list_entry(this, struct scsi_event, node);
318		list_del(&evt->node);
319		kfree(evt);
320	}
321
322	if (sdev->request_queue) {
323		sdev->request_queue->queuedata = NULL;
324		/* user context needed to free queue */
325		scsi_free_queue(sdev->request_queue);
326		/* temporary expedient, try to catch use of queue lock
327		 * after free of sdev */
328		sdev->request_queue = NULL;
329	}
330
331	scsi_target_reap(scsi_target(sdev));
332
333	kfree(sdev->inquiry);
334	kfree(sdev);
335
336	if (parent)
337		put_device(parent);
338}
339
340static void scsi_device_dev_release(struct device *dev)
341{
342	struct scsi_device *sdp = to_scsi_device(dev);
343	execute_in_process_context(scsi_device_dev_release_usercontext,
344				   &sdp->ew);
345}
346
347static struct class sdev_class = {
348	.name		= "scsi_device",
349	.dev_release	= scsi_device_cls_release,
350};
351
352/* all probing is done in the individual ->probe routines */
353static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
354{
355	struct scsi_device *sdp;
356
357	if (dev->type != &scsi_dev_type)
358		return 0;
359
360	sdp = to_scsi_device(dev);
361	if (sdp->no_uld_attach)
362		return 0;
363	return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
364}
365
366static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
367{
368	struct scsi_device *sdev;
369
370	if (dev->type != &scsi_dev_type)
371		return 0;
372
373	sdev = to_scsi_device(dev);
374
375	add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
376	return 0;
377}
378
379struct bus_type scsi_bus_type = {
380        .name		= "scsi",
381        .match		= scsi_bus_match,
382	.uevent		= scsi_bus_uevent,
383	.pm		= &scsi_bus_pm_ops,
384};
385EXPORT_SYMBOL_GPL(scsi_bus_type);
386
387int scsi_sysfs_register(void)
388{
389	int error;
390
391	error = bus_register(&scsi_bus_type);
392	if (!error) {
393		error = class_register(&sdev_class);
394		if (error)
395			bus_unregister(&scsi_bus_type);
396	}
397
398	return error;
399}
400
401void scsi_sysfs_unregister(void)
402{
403	class_unregister(&sdev_class);
404	bus_unregister(&scsi_bus_type);
405}
406
407/*
408 * sdev_show_function: macro to create an attr function that can be used to
409 * show a non-bit field.
410 */
411#define sdev_show_function(field, format_string)				\
412static ssize_t								\
413sdev_show_##field (struct device *dev, struct device_attribute *attr,	\
414		   char *buf)						\
415{									\
416	struct scsi_device *sdev;					\
417	sdev = to_scsi_device(dev);					\
418	return snprintf (buf, 20, format_string, sdev->field);		\
419}									\
420
421/*
422 * sdev_rd_attr: macro to create a function and attribute variable for a
423 * read only field.
424 */
425#define sdev_rd_attr(field, format_string)				\
426	sdev_show_function(field, format_string)			\
427static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
428
429
430/*
431 * sdev_rw_attr: create a function and attribute variable for a
432 * read/write field.
433 */
434#define sdev_rw_attr(field, format_string)				\
435	sdev_show_function(field, format_string)				\
436									\
437static ssize_t								\
438sdev_store_##field (struct device *dev, struct device_attribute *attr,	\
439		    const char *buf, size_t count)			\
440{									\
441	struct scsi_device *sdev;					\
442	sdev = to_scsi_device(dev);					\
443	sscanf (buf, format_string, &sdev->field);			\
444	return count;							\
445}									\
446static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
447
448/* Currently we don't export bit fields, but we might in future,
449 * so leave this code in */
450#if 0
451/*
452 * sdev_rd_attr: create a function and attribute variable for a
453 * read/write bit field.
454 */
455#define sdev_rw_attr_bit(field)						\
456	sdev_show_function(field, "%d\n")					\
457									\
458static ssize_t								\
459sdev_store_##field (struct device *dev, struct device_attribute *attr,	\
460		    const char *buf, size_t count)			\
461{									\
462	int ret;							\
463	struct scsi_device *sdev;					\
464	ret = scsi_sdev_check_buf_bit(buf);				\
465	if (ret >= 0)	{						\
466		sdev = to_scsi_device(dev);				\
467		sdev->field = ret;					\
468		ret = count;						\
469	}								\
470	return ret;							\
471}									\
472static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
473
474/*
475 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
476 * else return -EINVAL.
477 */
478static int scsi_sdev_check_buf_bit(const char *buf)
479{
480	if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
481		if (buf[0] == '1')
482			return 1;
483		else if (buf[0] == '0')
484			return 0;
485		else
486			return -EINVAL;
487	} else
488		return -EINVAL;
489}
490#endif
491/*
492 * Create the actual show/store functions and data structures.
493 */
494sdev_rd_attr (device_blocked, "%d\n");
495sdev_rd_attr (queue_depth, "%d\n");
496sdev_rd_attr (type, "%d\n");
497sdev_rd_attr (scsi_level, "%d\n");
498sdev_rd_attr (vendor, "%.8s\n");
499sdev_rd_attr (model, "%.16s\n");
500sdev_rd_attr (rev, "%.4s\n");
501
502/*
503 * TODO: can we make these symlinks to the block layer ones?
504 */
505static ssize_t
506sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
507{
508	struct scsi_device *sdev;
509	sdev = to_scsi_device(dev);
510	return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
511}
512
513static ssize_t
514sdev_store_timeout (struct device *dev, struct device_attribute *attr,
515		    const char *buf, size_t count)
516{
517	struct scsi_device *sdev;
518	int timeout;
519	sdev = to_scsi_device(dev);
520	sscanf (buf, "%d\n", &timeout);
521	blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
522	return count;
523}
524static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
525
526static ssize_t
527store_rescan_field (struct device *dev, struct device_attribute *attr,
528		    const char *buf, size_t count)
529{
530	scsi_rescan_device(dev);
531	return count;
532}
533static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
534
535static void sdev_store_delete_callback(struct device *dev)
536{
537	scsi_remove_device(to_scsi_device(dev));
538}
539
540static ssize_t
541sdev_store_delete(struct device *dev, struct device_attribute *attr,
542		  const char *buf, size_t count)
543{
544	int rc;
545
546	/* An attribute cannot be unregistered by one of its own methods,
547	 * so we have to use this roundabout approach.
548	 */
549	rc = device_schedule_callback(dev, sdev_store_delete_callback);
550	if (rc)
551		count = rc;
552	return count;
553};
554static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
555
556static ssize_t
557store_state_field(struct device *dev, struct device_attribute *attr,
558		  const char *buf, size_t count)
559{
560	int i;
561	struct scsi_device *sdev = to_scsi_device(dev);
562	enum scsi_device_state state = 0;
563
564	for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
565		const int len = strlen(sdev_states[i].name);
566		if (strncmp(sdev_states[i].name, buf, len) == 0 &&
567		   buf[len] == '\n') {
568			state = sdev_states[i].value;
569			break;
570		}
571	}
572	if (!state)
573		return -EINVAL;
574
575	if (scsi_device_set_state(sdev, state))
576		return -EINVAL;
577	return count;
578}
579
580static ssize_t
581show_state_field(struct device *dev, struct device_attribute *attr, char *buf)
582{
583	struct scsi_device *sdev = to_scsi_device(dev);
584	const char *name = scsi_device_state_name(sdev->sdev_state);
585
586	if (!name)
587		return -EINVAL;
588
589	return snprintf(buf, 20, "%s\n", name);
590}
591
592static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field);
593
594static ssize_t
595show_queue_type_field(struct device *dev, struct device_attribute *attr,
596		      char *buf)
597{
598	struct scsi_device *sdev = to_scsi_device(dev);
599	const char *name = "none";
600
601	if (sdev->ordered_tags)
602		name = "ordered";
603	else if (sdev->simple_tags)
604		name = "simple";
605
606	return snprintf(buf, 20, "%s\n", name);
607}
608
609static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL);
610
611static ssize_t
612show_iostat_counterbits(struct device *dev, struct device_attribute *attr, 				char *buf)
613{
614	return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
615}
616
617static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);
618
619#define show_sdev_iostat(field)						\
620static ssize_t								\
621show_iostat_##field(struct device *dev, struct device_attribute *attr,	\
622		    char *buf)						\
623{									\
624	struct scsi_device *sdev = to_scsi_device(dev);			\
625	unsigned long long count = atomic_read(&sdev->field);		\
626	return snprintf(buf, 20, "0x%llx\n", count);			\
627}									\
628static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
629
630show_sdev_iostat(iorequest_cnt);
631show_sdev_iostat(iodone_cnt);
632show_sdev_iostat(ioerr_cnt);
633
634static ssize_t
635sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
636{
637	struct scsi_device *sdev;
638	sdev = to_scsi_device(dev);
639	return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
640}
641static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
642
643#define DECLARE_EVT_SHOW(name, Cap_name)				\
644static ssize_t								\
645sdev_show_evt_##name(struct device *dev, struct device_attribute *attr,	\
646		     char *buf)						\
647{									\
648	struct scsi_device *sdev = to_scsi_device(dev);			\
649	int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
650	return snprintf(buf, 20, "%d\n", val);				\
651}
652
653#define DECLARE_EVT_STORE(name, Cap_name)				\
654static ssize_t								\
655sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
656		      const char *buf, size_t count)			\
657{									\
658	struct scsi_device *sdev = to_scsi_device(dev);			\
659	int val = simple_strtoul(buf, NULL, 0);				\
660	if (val == 0)							\
661		clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events);	\
662	else if (val == 1)						\
663		set_bit(SDEV_EVT_##Cap_name, sdev->supported_events);	\
664	else								\
665		return -EINVAL;						\
666	return count;							\
667}
668
669#define DECLARE_EVT(name, Cap_name)					\
670	DECLARE_EVT_SHOW(name, Cap_name)				\
671	DECLARE_EVT_STORE(name, Cap_name)				\
672	static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name,	\
673			   sdev_store_evt_##name);
674#define REF_EVT(name) &dev_attr_evt_##name.attr
675
676DECLARE_EVT(media_change, MEDIA_CHANGE)
677
678/* Default template for device attributes.  May NOT be modified */
679static struct attribute *scsi_sdev_attrs[] = {
680	&dev_attr_device_blocked.attr,
681	&dev_attr_type.attr,
682	&dev_attr_scsi_level.attr,
683	&dev_attr_vendor.attr,
684	&dev_attr_model.attr,
685	&dev_attr_rev.attr,
686	&dev_attr_rescan.attr,
687	&dev_attr_delete.attr,
688	&dev_attr_state.attr,
689	&dev_attr_timeout.attr,
690	&dev_attr_iocounterbits.attr,
691	&dev_attr_iorequest_cnt.attr,
692	&dev_attr_iodone_cnt.attr,
693	&dev_attr_ioerr_cnt.attr,
694	&dev_attr_modalias.attr,
695	REF_EVT(media_change),
696	NULL
697};
698
699static struct attribute_group scsi_sdev_attr_group = {
700	.attrs =	scsi_sdev_attrs,
701};
702
703static const struct attribute_group *scsi_sdev_attr_groups[] = {
704	&scsi_sdev_attr_group,
705	NULL
706};
707
708static ssize_t
709sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
710			  const char *buf, size_t count)
711{
712	int depth, retval;
713	struct scsi_device *sdev = to_scsi_device(dev);
714	struct scsi_host_template *sht = sdev->host->hostt;
715
716	if (!sht->change_queue_depth)
717		return -EINVAL;
718
719	depth = simple_strtoul(buf, NULL, 0);
720
721	if (depth < 1)
722		return -EINVAL;
723
724	retval = sht->change_queue_depth(sdev, depth,
725					 SCSI_QDEPTH_DEFAULT);
726	if (retval < 0)
727		return retval;
728
729	sdev->max_queue_depth = sdev->queue_depth;
730
731	return count;
732}
733
734static struct device_attribute sdev_attr_queue_depth_rw =
735	__ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
736	       sdev_store_queue_depth_rw);
737
738static ssize_t
739sdev_show_queue_ramp_up_period(struct device *dev,
740			       struct device_attribute *attr,
741			       char *buf)
742{
743	struct scsi_device *sdev;
744	sdev = to_scsi_device(dev);
745	return snprintf(buf, 20, "%u\n",
746			jiffies_to_msecs(sdev->queue_ramp_up_period));
747}
748
749static ssize_t
750sdev_store_queue_ramp_up_period(struct device *dev,
751				struct device_attribute *attr,
752				const char *buf, size_t count)
753{
754	struct scsi_device *sdev = to_scsi_device(dev);
755	unsigned long period;
756
757	if (strict_strtoul(buf, 10, &period))
758		return -EINVAL;
759
760	sdev->queue_ramp_up_period = msecs_to_jiffies(period);
761	return period;
762}
763
764static struct device_attribute sdev_attr_queue_ramp_up_period =
765	__ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
766	       sdev_show_queue_ramp_up_period,
767	       sdev_store_queue_ramp_up_period);
768
769static ssize_t
770sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
771			 const char *buf, size_t count)
772{
773	struct scsi_device *sdev = to_scsi_device(dev);
774	struct scsi_host_template *sht = sdev->host->hostt;
775	int tag_type = 0, retval;
776	int prev_tag_type = scsi_get_tag_type(sdev);
777
778	if (!sdev->tagged_supported || !sht->change_queue_type)
779		return -EINVAL;
780
781	if (strncmp(buf, "ordered", 7) == 0)
782		tag_type = MSG_ORDERED_TAG;
783	else if (strncmp(buf, "simple", 6) == 0)
784		tag_type = MSG_SIMPLE_TAG;
785	else if (strncmp(buf, "none", 4) != 0)
786		return -EINVAL;
787
788	if (tag_type == prev_tag_type)
789		return count;
790
791	retval = sht->change_queue_type(sdev, tag_type);
792	if (retval < 0)
793		return retval;
794
795	return count;
796}
797
798static int scsi_target_add(struct scsi_target *starget)
799{
800	int error;
801
802	if (starget->state != STARGET_CREATED)
803		return 0;
804
805	device_enable_async_suspend(&starget->dev);
806
807	error = device_add(&starget->dev);
808	if (error) {
809		dev_err(&starget->dev, "target device_add failed, error %d\n", error);
810		return error;
811	}
812	transport_add_device(&starget->dev);
813	starget->state = STARGET_RUNNING;
814
815	return 0;
816}
817
818static struct device_attribute sdev_attr_queue_type_rw =
819	__ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
820	       sdev_store_queue_type_rw);
821
822/**
823 * scsi_sysfs_add_sdev - add scsi device to sysfs
824 * @sdev:	scsi_device to add
825 *
826 * Return value:
827 * 	0 on Success / non-zero on Failure
828 **/
829int scsi_sysfs_add_sdev(struct scsi_device *sdev)
830{
831	int error, i;
832	struct request_queue *rq = sdev->request_queue;
833	struct scsi_target *starget = sdev->sdev_target;
834
835	error = scsi_device_set_state(sdev, SDEV_RUNNING);
836	if (error)
837		return error;
838
839	error = scsi_target_add(starget);
840	if (error)
841		return error;
842
843	transport_configure_device(&starget->dev);
844	device_enable_async_suspend(&sdev->sdev_gendev);
845	error = device_add(&sdev->sdev_gendev);
846	if (error) {
847		printk(KERN_INFO "error 1\n");
848		return error;
849	}
850	device_enable_async_suspend(&sdev->sdev_dev);
851	error = device_add(&sdev->sdev_dev);
852	if (error) {
853		printk(KERN_INFO "error 2\n");
854		device_del(&sdev->sdev_gendev);
855		return error;
856	}
857	transport_add_device(&sdev->sdev_gendev);
858	sdev->is_visible = 1;
859
860	/* create queue files, which may be writable, depending on the host */
861	if (sdev->host->hostt->change_queue_depth) {
862		error = device_create_file(&sdev->sdev_gendev,
863					   &sdev_attr_queue_depth_rw);
864		error = device_create_file(&sdev->sdev_gendev,
865					   &sdev_attr_queue_ramp_up_period);
866	}
867	else
868		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
869	if (error)
870		return error;
871
872	if (sdev->host->hostt->change_queue_type)
873		error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
874	else
875		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
876	if (error)
877		return error;
878
879	error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
880
881	if (error)
882		/* we're treating error on bsg register as non-fatal,
883		 * so pretend nothing went wrong */
884		sdev_printk(KERN_INFO, sdev,
885			    "Failed to register bsg queue, errno=%d\n", error);
886
887	/* add additional host specific attributes */
888	if (sdev->host->hostt->sdev_attrs) {
889		for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
890			error = device_create_file(&sdev->sdev_gendev,
891					sdev->host->hostt->sdev_attrs[i]);
892			if (error)
893				return error;
894		}
895	}
896
897	return error;
898}
899
900void __scsi_remove_device(struct scsi_device *sdev)
901{
902	struct device *dev = &sdev->sdev_gendev;
903
904	if (sdev->is_visible) {
905		if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
906			return;
907
908		bsg_unregister_queue(sdev->request_queue);
909		device_unregister(&sdev->sdev_dev);
910		transport_remove_device(dev);
911		device_del(dev);
912	} else
913		put_device(&sdev->sdev_dev);
914	scsi_device_set_state(sdev, SDEV_DEL);
915	if (sdev->host->hostt->slave_destroy)
916		sdev->host->hostt->slave_destroy(sdev);
917	transport_destroy_device(dev);
918	put_device(dev);
919}
920
921/**
922 * scsi_remove_device - unregister a device from the scsi bus
923 * @sdev:	scsi_device to unregister
924 **/
925void scsi_remove_device(struct scsi_device *sdev)
926{
927	struct Scsi_Host *shost = sdev->host;
928
929	mutex_lock(&shost->scan_mutex);
930	__scsi_remove_device(sdev);
931	mutex_unlock(&shost->scan_mutex);
932}
933EXPORT_SYMBOL(scsi_remove_device);
934
935static void __scsi_remove_target(struct scsi_target *starget)
936{
937	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
938	unsigned long flags;
939	struct scsi_device *sdev;
940
941	spin_lock_irqsave(shost->host_lock, flags);
942	starget->reap_ref++;
943 restart:
944	list_for_each_entry(sdev, &shost->__devices, siblings) {
945		if (sdev->channel != starget->channel ||
946		    sdev->id != starget->id ||
947		    sdev->sdev_state == SDEV_DEL)
948			continue;
949		spin_unlock_irqrestore(shost->host_lock, flags);
950		scsi_remove_device(sdev);
951		spin_lock_irqsave(shost->host_lock, flags);
952		goto restart;
953	}
954	spin_unlock_irqrestore(shost->host_lock, flags);
955	scsi_target_reap(starget);
956}
957
958static int __remove_child (struct device * dev, void * data)
959{
960	if (scsi_is_target_device(dev))
961		__scsi_remove_target(to_scsi_target(dev));
962	return 0;
963}
964
965/**
966 * scsi_remove_target - try to remove a target and all its devices
967 * @dev: generic starget or parent of generic stargets to be removed
968 *
969 * Note: This is slightly racy.  It is possible that if the user
970 * requests the addition of another device then the target won't be
971 * removed.
972 */
973void scsi_remove_target(struct device *dev)
974{
975	struct device *rdev;
976
977	if (scsi_is_target_device(dev)) {
978		__scsi_remove_target(to_scsi_target(dev));
979		return;
980	}
981
982	rdev = get_device(dev);
983	device_for_each_child(dev, NULL, __remove_child);
984	put_device(rdev);
985}
986EXPORT_SYMBOL(scsi_remove_target);
987
988int scsi_register_driver(struct device_driver *drv)
989{
990	drv->bus = &scsi_bus_type;
991
992	return driver_register(drv);
993}
994EXPORT_SYMBOL(scsi_register_driver);
995
996int scsi_register_interface(struct class_interface *intf)
997{
998	intf->class = &sdev_class;
999
1000	return class_interface_register(intf);
1001}
1002EXPORT_SYMBOL(scsi_register_interface);
1003
1004/**
1005 * scsi_sysfs_add_host - add scsi host to subsystem
1006 * @shost:     scsi host struct to add to subsystem
1007 **/
1008int scsi_sysfs_add_host(struct Scsi_Host *shost)
1009{
1010	int error, i;
1011
1012	/* add host specific attributes */
1013	if (shost->hostt->shost_attrs) {
1014		for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1015			error = device_create_file(&shost->shost_dev,
1016					shost->hostt->shost_attrs[i]);
1017			if (error)
1018				return error;
1019		}
1020	}
1021
1022	transport_register_device(&shost->shost_gendev);
1023	transport_configure_device(&shost->shost_gendev);
1024	return 0;
1025}
1026
1027static struct device_type scsi_dev_type = {
1028	.name =		"scsi_device",
1029	.release =	scsi_device_dev_release,
1030	.groups =	scsi_sdev_attr_groups,
1031};
1032
1033void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1034{
1035	unsigned long flags;
1036	struct Scsi_Host *shost = sdev->host;
1037	struct scsi_target  *starget = sdev->sdev_target;
1038
1039	device_initialize(&sdev->sdev_gendev);
1040	sdev->sdev_gendev.bus = &scsi_bus_type;
1041	sdev->sdev_gendev.type = &scsi_dev_type;
1042	dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
1043		     sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1044
1045	device_initialize(&sdev->sdev_dev);
1046	sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1047	sdev->sdev_dev.class = &sdev_class;
1048	dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
1049		     sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1050	sdev->scsi_level = starget->scsi_level;
1051	transport_setup_device(&sdev->sdev_gendev);
1052	spin_lock_irqsave(shost->host_lock, flags);
1053	list_add_tail(&sdev->same_target_siblings, &starget->devices);
1054	list_add_tail(&sdev->siblings, &shost->__devices);
1055	spin_unlock_irqrestore(shost->host_lock, flags);
1056}
1057
1058int scsi_is_sdev_device(const struct device *dev)
1059{
1060	return dev->type == &scsi_dev_type;
1061}
1062EXPORT_SYMBOL(scsi_is_sdev_device);
1063
1064/* A blank transport template that is used in drivers that don't
1065 * yet implement Transport Attributes */
1066struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, };
1067