scsi_transport_fc.c revision 5415907af1f5ef80c95147bacbd321b0d4236dd5
1/*
2 *  FiberChannel transport specific attributes exported to sysfs.
3 *
4 *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
5 *
6 *  This program is free software; you can redistribute it and/or modify
7 *  it under the terms of the GNU General Public License as published by
8 *  the Free Software Foundation; either version 2 of the License, or
9 *  (at your option) any later version.
10 *
11 *  This program is distributed in the hope that it will be useful,
12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 *  GNU General Public License for more details.
15 *
16 *  You should have received a copy of the GNU General Public License
17 *  along with this program; if not, write to the Free Software
18 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19 *
20 *  ========
21 *
22 *  Copyright (C) 2004-2007   James Smart, Emulex Corporation
23 *    Rewrite for host, target, device, and remote port attributes,
24 *    statistics, and service functions...
25 *    Add vports, etc
26 *
27 */
28#include <linux/module.h>
29#include <linux/init.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_cmnd.h>
35#include <linux/netlink.h>
36#include <net/netlink.h>
37#include <scsi/scsi_netlink_fc.h>
38#include <scsi/scsi_bsg_fc.h>
39#include "scsi_priv.h"
40#include "scsi_transport_fc_internal.h"
41
42static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
43static void fc_vport_sched_delete(struct work_struct *work);
44static int fc_vport_setup(struct Scsi_Host *shost, int channel,
45	struct device *pdev, struct fc_vport_identifiers  *ids,
46	struct fc_vport **vport);
47static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
48static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
49static void fc_bsg_remove(struct request_queue *);
50static void fc_bsg_goose_queue(struct fc_rport *);
51
52/*
53 * Redefine so that we can have same named attributes in the
54 * sdev/starget/host objects.
55 */
56#define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store)		\
57struct device_attribute device_attr_##_prefix##_##_name = 	\
58	__ATTR(_name,_mode,_show,_store)
59
60#define fc_enum_name_search(title, table_type, table)			\
61static const char *get_fc_##title##_name(enum table_type table_key)	\
62{									\
63	int i;								\
64	char *name = NULL;						\
65									\
66	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
67		if (table[i].value == table_key) {			\
68			name = table[i].name;				\
69			break;						\
70		}							\
71	}								\
72	return name;							\
73}
74
75#define fc_enum_name_match(title, table_type, table)			\
76static int get_fc_##title##_match(const char *table_key,		\
77		enum table_type *value)					\
78{									\
79	int i;								\
80									\
81	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
82		if (strncmp(table_key, table[i].name,			\
83				table[i].matchlen) == 0) {		\
84			*value = table[i].value;			\
85			return 0; /* success */				\
86		}							\
87	}								\
88	return 1; /* failure */						\
89}
90
91
92/* Convert fc_port_type values to ascii string name */
93static struct {
94	enum fc_port_type	value;
95	char			*name;
96} fc_port_type_names[] = {
97	{ FC_PORTTYPE_UNKNOWN,		"Unknown" },
98	{ FC_PORTTYPE_OTHER,		"Other" },
99	{ FC_PORTTYPE_NOTPRESENT,	"Not Present" },
100	{ FC_PORTTYPE_NPORT,	"NPort (fabric via point-to-point)" },
101	{ FC_PORTTYPE_NLPORT,	"NLPort (fabric via loop)" },
102	{ FC_PORTTYPE_LPORT,	"LPort (private loop)" },
103	{ FC_PORTTYPE_PTP,	"Point-To-Point (direct nport connection)" },
104	{ FC_PORTTYPE_NPIV,		"NPIV VPORT" },
105};
106fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
107#define FC_PORTTYPE_MAX_NAMELEN		50
108
109/* Reuse fc_port_type enum function for vport_type */
110#define get_fc_vport_type_name get_fc_port_type_name
111
112
113/* Convert fc_host_event_code values to ascii string name */
114static const struct {
115	enum fc_host_event_code		value;
116	char				*name;
117} fc_host_event_code_names[] = {
118	{ FCH_EVT_LIP,			"lip" },
119	{ FCH_EVT_LINKUP,		"link_up" },
120	{ FCH_EVT_LINKDOWN,		"link_down" },
121	{ FCH_EVT_LIPRESET,		"lip_reset" },
122	{ FCH_EVT_RSCN,			"rscn" },
123	{ FCH_EVT_ADAPTER_CHANGE,	"adapter_chg" },
124	{ FCH_EVT_PORT_UNKNOWN,		"port_unknown" },
125	{ FCH_EVT_PORT_ONLINE,		"port_online" },
126	{ FCH_EVT_PORT_OFFLINE,		"port_offline" },
127	{ FCH_EVT_PORT_FABRIC,		"port_fabric" },
128	{ FCH_EVT_LINK_UNKNOWN,		"link_unknown" },
129	{ FCH_EVT_VENDOR_UNIQUE,	"vendor_unique" },
130};
131fc_enum_name_search(host_event_code, fc_host_event_code,
132		fc_host_event_code_names)
133#define FC_HOST_EVENT_CODE_MAX_NAMELEN	30
134
135
136/* Convert fc_port_state values to ascii string name */
137static struct {
138	enum fc_port_state	value;
139	char			*name;
140} fc_port_state_names[] = {
141	{ FC_PORTSTATE_UNKNOWN,		"Unknown" },
142	{ FC_PORTSTATE_NOTPRESENT,	"Not Present" },
143	{ FC_PORTSTATE_ONLINE,		"Online" },
144	{ FC_PORTSTATE_OFFLINE,		"Offline" },
145	{ FC_PORTSTATE_BLOCKED,		"Blocked" },
146	{ FC_PORTSTATE_BYPASSED,	"Bypassed" },
147	{ FC_PORTSTATE_DIAGNOSTICS,	"Diagnostics" },
148	{ FC_PORTSTATE_LINKDOWN,	"Linkdown" },
149	{ FC_PORTSTATE_ERROR,		"Error" },
150	{ FC_PORTSTATE_LOOPBACK,	"Loopback" },
151	{ FC_PORTSTATE_DELETED,		"Deleted" },
152};
153fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
154#define FC_PORTSTATE_MAX_NAMELEN	20
155
156
157/* Convert fc_vport_state values to ascii string name */
158static struct {
159	enum fc_vport_state	value;
160	char			*name;
161} fc_vport_state_names[] = {
162	{ FC_VPORT_UNKNOWN,		"Unknown" },
163	{ FC_VPORT_ACTIVE,		"Active" },
164	{ FC_VPORT_DISABLED,		"Disabled" },
165	{ FC_VPORT_LINKDOWN,		"Linkdown" },
166	{ FC_VPORT_INITIALIZING,	"Initializing" },
167	{ FC_VPORT_NO_FABRIC_SUPP,	"No Fabric Support" },
168	{ FC_VPORT_NO_FABRIC_RSCS,	"No Fabric Resources" },
169	{ FC_VPORT_FABRIC_LOGOUT,	"Fabric Logout" },
170	{ FC_VPORT_FABRIC_REJ_WWN,	"Fabric Rejected WWN" },
171	{ FC_VPORT_FAILED,		"VPort Failed" },
172};
173fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
174#define FC_VPORTSTATE_MAX_NAMELEN	24
175
176/* Reuse fc_vport_state enum function for vport_last_state */
177#define get_fc_vport_last_state_name get_fc_vport_state_name
178
179
180/* Convert fc_tgtid_binding_type values to ascii string name */
181static const struct {
182	enum fc_tgtid_binding_type	value;
183	char				*name;
184	int				matchlen;
185} fc_tgtid_binding_type_names[] = {
186	{ FC_TGTID_BIND_NONE, "none", 4 },
187	{ FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
188	{ FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
189	{ FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
190};
191fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type,
192		fc_tgtid_binding_type_names)
193fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type,
194		fc_tgtid_binding_type_names)
195#define FC_BINDTYPE_MAX_NAMELEN	30
196
197
198#define fc_bitfield_name_search(title, table)			\
199static ssize_t							\
200get_fc_##title##_names(u32 table_key, char *buf)		\
201{								\
202	char *prefix = "";					\
203	ssize_t len = 0;					\
204	int i;							\
205								\
206	for (i = 0; i < ARRAY_SIZE(table); i++) {		\
207		if (table[i].value & table_key) {		\
208			len += sprintf(buf + len, "%s%s",	\
209				prefix, table[i].name);		\
210			prefix = ", ";				\
211		}						\
212	}							\
213	len += sprintf(buf + len, "\n");			\
214	return len;						\
215}
216
217
218/* Convert FC_COS bit values to ascii string name */
219static const struct {
220	u32 			value;
221	char			*name;
222} fc_cos_names[] = {
223	{ FC_COS_CLASS1,	"Class 1" },
224	{ FC_COS_CLASS2,	"Class 2" },
225	{ FC_COS_CLASS3,	"Class 3" },
226	{ FC_COS_CLASS4,	"Class 4" },
227	{ FC_COS_CLASS6,	"Class 6" },
228};
229fc_bitfield_name_search(cos, fc_cos_names)
230
231
232/* Convert FC_PORTSPEED bit values to ascii string name */
233static const struct {
234	u32 			value;
235	char			*name;
236} fc_port_speed_names[] = {
237	{ FC_PORTSPEED_1GBIT,		"1 Gbit" },
238	{ FC_PORTSPEED_2GBIT,		"2 Gbit" },
239	{ FC_PORTSPEED_4GBIT,		"4 Gbit" },
240	{ FC_PORTSPEED_10GBIT,		"10 Gbit" },
241	{ FC_PORTSPEED_8GBIT,		"8 Gbit" },
242	{ FC_PORTSPEED_16GBIT,		"16 Gbit" },
243	{ FC_PORTSPEED_NOT_NEGOTIATED,	"Not Negotiated" },
244};
245fc_bitfield_name_search(port_speed, fc_port_speed_names)
246
247
248static int
249show_fc_fc4s (char *buf, u8 *fc4_list)
250{
251	int i, len=0;
252
253	for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
254		len += sprintf(buf + len , "0x%02x ", *fc4_list);
255	len += sprintf(buf + len, "\n");
256	return len;
257}
258
259
260/* Convert FC_PORT_ROLE bit values to ascii string name */
261static const struct {
262	u32 			value;
263	char			*name;
264} fc_port_role_names[] = {
265	{ FC_PORT_ROLE_FCP_TARGET,	"FCP Target" },
266	{ FC_PORT_ROLE_FCP_INITIATOR,	"FCP Initiator" },
267	{ FC_PORT_ROLE_IP_PORT,		"IP Port" },
268};
269fc_bitfield_name_search(port_roles, fc_port_role_names)
270
271/*
272 * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
273 */
274#define FC_WELLKNOWN_PORTID_MASK	0xfffff0
275#define FC_WELLKNOWN_ROLE_MASK  	0x00000f
276#define FC_FPORT_PORTID			0x00000e
277#define FC_FABCTLR_PORTID		0x00000d
278#define FC_DIRSRVR_PORTID		0x00000c
279#define FC_TIMESRVR_PORTID		0x00000b
280#define FC_MGMTSRVR_PORTID		0x00000a
281
282
283static void fc_timeout_deleted_rport(struct work_struct *work);
284static void fc_timeout_fail_rport_io(struct work_struct *work);
285static void fc_scsi_scan_rport(struct work_struct *work);
286
287/*
288 * Attribute counts pre object type...
289 * Increase these values if you add attributes
290 */
291#define FC_STARGET_NUM_ATTRS 	3
292#define FC_RPORT_NUM_ATTRS	10
293#define FC_VPORT_NUM_ATTRS	9
294#define FC_HOST_NUM_ATTRS	22
295
296struct fc_internal {
297	struct scsi_transport_template t;
298	struct fc_function_template *f;
299
300	/*
301	 * For attributes : each object has :
302	 *   An array of the actual attributes structures
303	 *   An array of null-terminated pointers to the attribute
304	 *     structures - used for mid-layer interaction.
305	 *
306	 * The attribute containers for the starget and host are are
307	 * part of the midlayer. As the remote port is specific to the
308	 * fc transport, we must provide the attribute container.
309	 */
310	struct device_attribute private_starget_attrs[
311							FC_STARGET_NUM_ATTRS];
312	struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
313
314	struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
315	struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
316
317	struct transport_container rport_attr_cont;
318	struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
319	struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
320
321	struct transport_container vport_attr_cont;
322	struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
323	struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
324};
325
326#define to_fc_internal(tmpl)	container_of(tmpl, struct fc_internal, t)
327
328static int fc_target_setup(struct transport_container *tc, struct device *dev,
329			   struct device *cdev)
330{
331	struct scsi_target *starget = to_scsi_target(dev);
332	struct fc_rport *rport = starget_to_rport(starget);
333
334	/*
335	 * if parent is remote port, use values from remote port.
336	 * Otherwise, this host uses the fc_transport, but not the
337	 * remote port interface. As such, initialize to known non-values.
338	 */
339	if (rport) {
340		fc_starget_node_name(starget) = rport->node_name;
341		fc_starget_port_name(starget) = rport->port_name;
342		fc_starget_port_id(starget) = rport->port_id;
343	} else {
344		fc_starget_node_name(starget) = -1;
345		fc_starget_port_name(starget) = -1;
346		fc_starget_port_id(starget) = -1;
347	}
348
349	return 0;
350}
351
352static DECLARE_TRANSPORT_CLASS(fc_transport_class,
353			       "fc_transport",
354			       fc_target_setup,
355			       NULL,
356			       NULL);
357
358static int fc_host_setup(struct transport_container *tc, struct device *dev,
359			 struct device *cdev)
360{
361	struct Scsi_Host *shost = dev_to_shost(dev);
362	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
363
364	/*
365	 * Set default values easily detected by the midlayer as
366	 * failure cases.  The scsi lldd is responsible for initializing
367	 * all transport attributes to valid values per host.
368	 */
369	fc_host->node_name = -1;
370	fc_host->port_name = -1;
371	fc_host->permanent_port_name = -1;
372	fc_host->supported_classes = FC_COS_UNSPECIFIED;
373	memset(fc_host->supported_fc4s, 0,
374		sizeof(fc_host->supported_fc4s));
375	fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
376	fc_host->maxframe_size = -1;
377	fc_host->max_npiv_vports = 0;
378	memset(fc_host->serial_number, 0,
379		sizeof(fc_host->serial_number));
380
381	fc_host->port_id = -1;
382	fc_host->port_type = FC_PORTTYPE_UNKNOWN;
383	fc_host->port_state = FC_PORTSTATE_UNKNOWN;
384	memset(fc_host->active_fc4s, 0,
385		sizeof(fc_host->active_fc4s));
386	fc_host->speed = FC_PORTSPEED_UNKNOWN;
387	fc_host->fabric_name = -1;
388	memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
389	memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
390
391	fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
392
393	INIT_LIST_HEAD(&fc_host->rports);
394	INIT_LIST_HEAD(&fc_host->rport_bindings);
395	INIT_LIST_HEAD(&fc_host->vports);
396	fc_host->next_rport_number = 0;
397	fc_host->next_target_id = 0;
398	fc_host->next_vport_number = 0;
399	fc_host->npiv_vports_inuse = 0;
400
401	snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
402		 "fc_wq_%d", shost->host_no);
403	fc_host->work_q = create_singlethread_workqueue(
404					fc_host->work_q_name);
405	if (!fc_host->work_q)
406		return -ENOMEM;
407
408	snprintf(fc_host->devloss_work_q_name,
409		 sizeof(fc_host->devloss_work_q_name),
410		 "fc_dl_%d", shost->host_no);
411	fc_host->devloss_work_q = create_singlethread_workqueue(
412					fc_host->devloss_work_q_name);
413	if (!fc_host->devloss_work_q) {
414		destroy_workqueue(fc_host->work_q);
415		fc_host->work_q = NULL;
416		return -ENOMEM;
417	}
418
419	fc_bsg_hostadd(shost, fc_host);
420	/* ignore any bsg add error - we just can't do sgio */
421
422	return 0;
423}
424
425static int fc_host_remove(struct transport_container *tc, struct device *dev,
426			 struct device *cdev)
427{
428	struct Scsi_Host *shost = dev_to_shost(dev);
429	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
430
431	fc_bsg_remove(fc_host->rqst_q);
432	return 0;
433}
434
435static DECLARE_TRANSPORT_CLASS(fc_host_class,
436			       "fc_host",
437			       fc_host_setup,
438			       fc_host_remove,
439			       NULL);
440
441/*
442 * Setup and Remove actions for remote ports are handled
443 * in the service functions below.
444 */
445static DECLARE_TRANSPORT_CLASS(fc_rport_class,
446			       "fc_remote_ports",
447			       NULL,
448			       NULL,
449			       NULL);
450
451/*
452 * Setup and Remove actions for virtual ports are handled
453 * in the service functions below.
454 */
455static DECLARE_TRANSPORT_CLASS(fc_vport_class,
456			       "fc_vports",
457			       NULL,
458			       NULL,
459			       NULL);
460
461/*
462 * Module Parameters
463 */
464
465/*
466 * dev_loss_tmo: the default number of seconds that the FC transport
467 *   should insulate the loss of a remote port.
468 *   The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
469 */
470static unsigned int fc_dev_loss_tmo = 60;		/* seconds */
471
472module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
473MODULE_PARM_DESC(dev_loss_tmo,
474		 "Maximum number of seconds that the FC transport should"
475		 " insulate the loss of a remote port. Once this value is"
476		 " exceeded, the scsi target is removed. Value should be"
477		 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
478
479/*
480 * Netlink Infrastructure
481 */
482
483static atomic_t fc_event_seq;
484
485/**
486 * fc_get_event_number - Obtain the next sequential FC event number
487 *
488 * Notes:
489 *   We could have inlined this, but it would have required fc_event_seq to
490 *   be exposed. For now, live with the subroutine call.
491 *   Atomic used to avoid lock/unlock...
492 */
493u32
494fc_get_event_number(void)
495{
496	return atomic_add_return(1, &fc_event_seq);
497}
498EXPORT_SYMBOL(fc_get_event_number);
499
500
501/**
502 * fc_host_post_event - called to post an even on an fc_host.
503 * @shost:		host the event occurred on
504 * @event_number:	fc event number obtained from get_fc_event_number()
505 * @event_code:		fc_host event being posted
506 * @event_data:		32bits of data for the event being posted
507 *
508 * Notes:
509 *	This routine assumes no locks are held on entry.
510 */
511void
512fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
513		enum fc_host_event_code event_code, u32 event_data)
514{
515	struct sk_buff *skb;
516	struct nlmsghdr	*nlh;
517	struct fc_nl_event *event;
518	const char *name;
519	u32 len, skblen;
520	int err;
521
522	if (!scsi_nl_sock) {
523		err = -ENOENT;
524		goto send_fail;
525	}
526
527	len = FC_NL_MSGALIGN(sizeof(*event));
528	skblen = NLMSG_SPACE(len);
529
530	skb = alloc_skb(skblen, GFP_KERNEL);
531	if (!skb) {
532		err = -ENOBUFS;
533		goto send_fail;
534	}
535
536	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
537				skblen - sizeof(*nlh), 0);
538	if (!nlh) {
539		err = -ENOBUFS;
540		goto send_fail_skb;
541	}
542	event = NLMSG_DATA(nlh);
543
544	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
545				FC_NL_ASYNC_EVENT, len);
546	event->seconds = get_seconds();
547	event->vendor_id = 0;
548	event->host_no = shost->host_no;
549	event->event_datalen = sizeof(u32);	/* bytes */
550	event->event_num = event_number;
551	event->event_code = event_code;
552	event->event_data = event_data;
553
554	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
555			GFP_KERNEL);
556	return;
557
558send_fail_skb:
559	kfree_skb(skb);
560send_fail:
561	name = get_fc_host_event_code_name(event_code);
562	printk(KERN_WARNING
563		"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
564		__func__, shost->host_no,
565		(name) ? name : "<unknown>", event_data, err);
566	return;
567}
568EXPORT_SYMBOL(fc_host_post_event);
569
570
571/**
572 * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
573 * @shost:		host the event occurred on
574 * @event_number:	fc event number obtained from get_fc_event_number()
575 * @data_len:		amount, in bytes, of vendor unique data
576 * @data_buf:		pointer to vendor unique data
577 * @vendor_id:          Vendor id
578 *
579 * Notes:
580 *	This routine assumes no locks are held on entry.
581 */
582void
583fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
584		u32 data_len, char * data_buf, u64 vendor_id)
585{
586	struct sk_buff *skb;
587	struct nlmsghdr	*nlh;
588	struct fc_nl_event *event;
589	u32 len, skblen;
590	int err;
591
592	if (!scsi_nl_sock) {
593		err = -ENOENT;
594		goto send_vendor_fail;
595	}
596
597	len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
598	skblen = NLMSG_SPACE(len);
599
600	skb = alloc_skb(skblen, GFP_KERNEL);
601	if (!skb) {
602		err = -ENOBUFS;
603		goto send_vendor_fail;
604	}
605
606	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
607				skblen - sizeof(*nlh), 0);
608	if (!nlh) {
609		err = -ENOBUFS;
610		goto send_vendor_fail_skb;
611	}
612	event = NLMSG_DATA(nlh);
613
614	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
615				FC_NL_ASYNC_EVENT, len);
616	event->seconds = get_seconds();
617	event->vendor_id = vendor_id;
618	event->host_no = shost->host_no;
619	event->event_datalen = data_len;	/* bytes */
620	event->event_num = event_number;
621	event->event_code = FCH_EVT_VENDOR_UNIQUE;
622	memcpy(&event->event_data, data_buf, data_len);
623
624	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
625			GFP_KERNEL);
626	return;
627
628send_vendor_fail_skb:
629	kfree_skb(skb);
630send_vendor_fail:
631	printk(KERN_WARNING
632		"%s: Dropped Event : host %d vendor_unique - err %d\n",
633		__func__, shost->host_no, err);
634	return;
635}
636EXPORT_SYMBOL(fc_host_post_vendor_event);
637
638
639
640static __init int fc_transport_init(void)
641{
642	int error;
643
644	atomic_set(&fc_event_seq, 0);
645
646	error = transport_class_register(&fc_host_class);
647	if (error)
648		return error;
649	error = transport_class_register(&fc_vport_class);
650	if (error)
651		return error;
652	error = transport_class_register(&fc_rport_class);
653	if (error)
654		return error;
655	return transport_class_register(&fc_transport_class);
656}
657
658static void __exit fc_transport_exit(void)
659{
660	transport_class_unregister(&fc_transport_class);
661	transport_class_unregister(&fc_rport_class);
662	transport_class_unregister(&fc_host_class);
663	transport_class_unregister(&fc_vport_class);
664}
665
666/*
667 * FC Remote Port Attribute Management
668 */
669
670#define fc_rport_show_function(field, format_string, sz, cast)		\
671static ssize_t								\
672show_fc_rport_##field (struct device *dev, 				\
673		       struct device_attribute *attr, char *buf)	\
674{									\
675	struct fc_rport *rport = transport_class_to_rport(dev);		\
676	struct Scsi_Host *shost = rport_to_shost(rport);		\
677	struct fc_internal *i = to_fc_internal(shost->transportt);	\
678	if ((i->f->get_rport_##field) &&				\
679	    !((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
680	      (rport->port_state == FC_PORTSTATE_DELETED) ||		\
681	      (rport->port_state == FC_PORTSTATE_NOTPRESENT)))		\
682		i->f->get_rport_##field(rport);				\
683	return snprintf(buf, sz, format_string, cast rport->field); 	\
684}
685
686#define fc_rport_store_function(field)					\
687static ssize_t								\
688store_fc_rport_##field(struct device *dev,				\
689		       struct device_attribute *attr,			\
690		       const char *buf,	size_t count)			\
691{									\
692	int val;							\
693	struct fc_rport *rport = transport_class_to_rport(dev);		\
694	struct Scsi_Host *shost = rport_to_shost(rport);		\
695	struct fc_internal *i = to_fc_internal(shost->transportt);	\
696	char *cp;							\
697	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
698	    (rport->port_state == FC_PORTSTATE_DELETED) ||		\
699	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))		\
700		return -EBUSY;						\
701	val = simple_strtoul(buf, &cp, 0);				\
702	if (*cp && (*cp != '\n'))					\
703		return -EINVAL;						\
704	i->f->set_rport_##field(rport, val);				\
705	return count;							\
706}
707
708#define fc_rport_rd_attr(field, format_string, sz)			\
709	fc_rport_show_function(field, format_string, sz, )		\
710static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
711			 show_fc_rport_##field, NULL)
712
713#define fc_rport_rd_attr_cast(field, format_string, sz, cast)		\
714	fc_rport_show_function(field, format_string, sz, (cast))	\
715static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
716			  show_fc_rport_##field, NULL)
717
718#define fc_rport_rw_attr(field, format_string, sz)			\
719	fc_rport_show_function(field, format_string, sz, )		\
720	fc_rport_store_function(field)					\
721static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR,		\
722			show_fc_rport_##field,				\
723			store_fc_rport_##field)
724
725
726#define fc_private_rport_show_function(field, format_string, sz, cast)	\
727static ssize_t								\
728show_fc_rport_##field (struct device *dev, 				\
729		       struct device_attribute *attr, char *buf)	\
730{									\
731	struct fc_rport *rport = transport_class_to_rport(dev);		\
732	return snprintf(buf, sz, format_string, cast rport->field); 	\
733}
734
735#define fc_private_rport_rd_attr(field, format_string, sz)		\
736	fc_private_rport_show_function(field, format_string, sz, )	\
737static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
738			 show_fc_rport_##field, NULL)
739
740#define fc_private_rport_rd_attr_cast(field, format_string, sz, cast)	\
741	fc_private_rport_show_function(field, format_string, sz, (cast)) \
742static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
743			  show_fc_rport_##field, NULL)
744
745
746#define fc_private_rport_rd_enum_attr(title, maxlen)			\
747static ssize_t								\
748show_fc_rport_##title (struct device *dev,				\
749		       struct device_attribute *attr, char *buf)	\
750{									\
751	struct fc_rport *rport = transport_class_to_rport(dev);		\
752	const char *name;						\
753	name = get_fc_##title##_name(rport->title);			\
754	if (!name)							\
755		return -EINVAL;						\
756	return snprintf(buf, maxlen, "%s\n", name);			\
757}									\
758static FC_DEVICE_ATTR(rport, title, S_IRUGO,			\
759			show_fc_rport_##title, NULL)
760
761
762#define SETUP_RPORT_ATTRIBUTE_RD(field)					\
763	i->private_rport_attrs[count] = device_attr_rport_##field; \
764	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
765	i->private_rport_attrs[count].store = NULL;			\
766	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
767	if (i->f->show_rport_##field)					\
768		count++
769
770#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field)				\
771	i->private_rport_attrs[count] = device_attr_rport_##field; \
772	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
773	i->private_rport_attrs[count].store = NULL;			\
774	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
775	count++
776
777#define SETUP_RPORT_ATTRIBUTE_RW(field)					\
778	i->private_rport_attrs[count] = device_attr_rport_##field; \
779	if (!i->f->set_rport_##field) {					\
780		i->private_rport_attrs[count].attr.mode = S_IRUGO;	\
781		i->private_rport_attrs[count].store = NULL;		\
782	}								\
783	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
784	if (i->f->show_rport_##field)					\
785		count++
786
787#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field)				\
788{									\
789	i->private_rport_attrs[count] = device_attr_rport_##field; \
790	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
791	count++;							\
792}
793
794
795/* The FC Transport Remote Port Attributes: */
796
797/* Fixed Remote Port Attributes */
798
799fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
800
801static ssize_t
802show_fc_rport_supported_classes (struct device *dev,
803				 struct device_attribute *attr, char *buf)
804{
805	struct fc_rport *rport = transport_class_to_rport(dev);
806	if (rport->supported_classes == FC_COS_UNSPECIFIED)
807		return snprintf(buf, 20, "unspecified\n");
808	return get_fc_cos_names(rport->supported_classes, buf);
809}
810static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
811		show_fc_rport_supported_classes, NULL);
812
813/* Dynamic Remote Port Attributes */
814
815/*
816 * dev_loss_tmo attribute
817 */
818fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
819static ssize_t
820store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
821			    const char *buf, size_t count)
822{
823	int val;
824	struct fc_rport *rport = transport_class_to_rport(dev);
825	struct Scsi_Host *shost = rport_to_shost(rport);
826	struct fc_internal *i = to_fc_internal(shost->transportt);
827	char *cp;
828	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
829	    (rport->port_state == FC_PORTSTATE_DELETED) ||
830	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
831		return -EBUSY;
832	val = simple_strtoul(buf, &cp, 0);
833	if ((*cp && (*cp != '\n')) ||
834	    (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
835		return -EINVAL;
836	i->f->set_rport_dev_loss_tmo(rport, val);
837	return count;
838}
839static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
840		show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
841
842
843/* Private Remote Port Attributes */
844
845fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
846fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
847fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
848
849static ssize_t
850show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
851		     char *buf)
852{
853	struct fc_rport *rport = transport_class_to_rport(dev);
854
855	/* identify any roles that are port_id specific */
856	if ((rport->port_id != -1) &&
857	    (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
858					FC_WELLKNOWN_PORTID_MASK) {
859		switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
860		case FC_FPORT_PORTID:
861			return snprintf(buf, 30, "Fabric Port\n");
862		case FC_FABCTLR_PORTID:
863			return snprintf(buf, 30, "Fabric Controller\n");
864		case FC_DIRSRVR_PORTID:
865			return snprintf(buf, 30, "Directory Server\n");
866		case FC_TIMESRVR_PORTID:
867			return snprintf(buf, 30, "Time Server\n");
868		case FC_MGMTSRVR_PORTID:
869			return snprintf(buf, 30, "Management Server\n");
870		default:
871			return snprintf(buf, 30, "Unknown Fabric Entity\n");
872		}
873	} else {
874		if (rport->roles == FC_PORT_ROLE_UNKNOWN)
875			return snprintf(buf, 20, "unknown\n");
876		return get_fc_port_roles_names(rport->roles, buf);
877	}
878}
879static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
880		show_fc_rport_roles, NULL);
881
882fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
883fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
884
885/*
886 * fast_io_fail_tmo attribute
887 */
888static ssize_t
889show_fc_rport_fast_io_fail_tmo (struct device *dev,
890				struct device_attribute *attr, char *buf)
891{
892	struct fc_rport *rport = transport_class_to_rport(dev);
893
894	if (rport->fast_io_fail_tmo == -1)
895		return snprintf(buf, 5, "off\n");
896	return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
897}
898
899static ssize_t
900store_fc_rport_fast_io_fail_tmo(struct device *dev,
901				struct device_attribute *attr, const char *buf,
902				size_t count)
903{
904	int val;
905	char *cp;
906	struct fc_rport *rport = transport_class_to_rport(dev);
907
908	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
909	    (rport->port_state == FC_PORTSTATE_DELETED) ||
910	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
911		return -EBUSY;
912	if (strncmp(buf, "off", 3) == 0)
913		rport->fast_io_fail_tmo = -1;
914	else {
915		val = simple_strtoul(buf, &cp, 0);
916		if ((*cp && (*cp != '\n')) ||
917		    (val < 0) || (val >= rport->dev_loss_tmo))
918			return -EINVAL;
919		rport->fast_io_fail_tmo = val;
920	}
921	return count;
922}
923static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
924	show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
925
926
927/*
928 * FC SCSI Target Attribute Management
929 */
930
931/*
932 * Note: in the target show function we recognize when the remote
933 *  port is in the heirarchy and do not allow the driver to get
934 *  involved in sysfs functions. The driver only gets involved if
935 *  it's the "old" style that doesn't use rports.
936 */
937#define fc_starget_show_function(field, format_string, sz, cast)	\
938static ssize_t								\
939show_fc_starget_##field (struct device *dev, 				\
940			 struct device_attribute *attr, char *buf)	\
941{									\
942	struct scsi_target *starget = transport_class_to_starget(dev);	\
943	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);	\
944	struct fc_internal *i = to_fc_internal(shost->transportt);	\
945	struct fc_rport *rport = starget_to_rport(starget);		\
946	if (rport)							\
947		fc_starget_##field(starget) = rport->field;		\
948	else if (i->f->get_starget_##field)				\
949		i->f->get_starget_##field(starget);			\
950	return snprintf(buf, sz, format_string, 			\
951		cast fc_starget_##field(starget)); 			\
952}
953
954#define fc_starget_rd_attr(field, format_string, sz)			\
955	fc_starget_show_function(field, format_string, sz, )		\
956static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
957			 show_fc_starget_##field, NULL)
958
959#define fc_starget_rd_attr_cast(field, format_string, sz, cast)		\
960	fc_starget_show_function(field, format_string, sz, (cast))	\
961static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
962			  show_fc_starget_##field, NULL)
963
964#define SETUP_STARGET_ATTRIBUTE_RD(field)				\
965	i->private_starget_attrs[count] = device_attr_starget_##field; \
966	i->private_starget_attrs[count].attr.mode = S_IRUGO;		\
967	i->private_starget_attrs[count].store = NULL;			\
968	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
969	if (i->f->show_starget_##field)					\
970		count++
971
972#define SETUP_STARGET_ATTRIBUTE_RW(field)				\
973	i->private_starget_attrs[count] = device_attr_starget_##field; \
974	if (!i->f->set_starget_##field) {				\
975		i->private_starget_attrs[count].attr.mode = S_IRUGO;	\
976		i->private_starget_attrs[count].store = NULL;		\
977	}								\
978	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
979	if (i->f->show_starget_##field)					\
980		count++
981
982/* The FC Transport SCSI Target Attributes: */
983fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
984fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
985fc_starget_rd_attr(port_id, "0x%06x\n", 20);
986
987
988/*
989 * FC Virtual Port Attribute Management
990 */
991
992#define fc_vport_show_function(field, format_string, sz, cast)		\
993static ssize_t								\
994show_fc_vport_##field (struct device *dev, 				\
995		       struct device_attribute *attr, char *buf)	\
996{									\
997	struct fc_vport *vport = transport_class_to_vport(dev);		\
998	struct Scsi_Host *shost = vport_to_shost(vport);		\
999	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1000	if ((i->f->get_vport_##field) &&				\
1001	    !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)))	\
1002		i->f->get_vport_##field(vport);				\
1003	return snprintf(buf, sz, format_string, cast vport->field); 	\
1004}
1005
1006#define fc_vport_store_function(field)					\
1007static ssize_t								\
1008store_fc_vport_##field(struct device *dev,				\
1009		       struct device_attribute *attr,			\
1010		       const char *buf,	size_t count)			\
1011{									\
1012	int val;							\
1013	struct fc_vport *vport = transport_class_to_vport(dev);		\
1014	struct Scsi_Host *shost = vport_to_shost(vport);		\
1015	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1016	char *cp;							\
1017	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))	\
1018		return -EBUSY;						\
1019	val = simple_strtoul(buf, &cp, 0);				\
1020	if (*cp && (*cp != '\n'))					\
1021		return -EINVAL;						\
1022	i->f->set_vport_##field(vport, val);				\
1023	return count;							\
1024}
1025
1026#define fc_vport_store_str_function(field, slen)			\
1027static ssize_t								\
1028store_fc_vport_##field(struct device *dev,				\
1029		       struct device_attribute *attr, 			\
1030		       const char *buf,	size_t count)			\
1031{									\
1032	struct fc_vport *vport = transport_class_to_vport(dev);		\
1033	struct Scsi_Host *shost = vport_to_shost(vport);		\
1034	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1035	unsigned int cnt=count;						\
1036									\
1037	/* count may include a LF at end of string */			\
1038	if (buf[cnt-1] == '\n')						\
1039		cnt--;							\
1040	if (cnt > ((slen) - 1))						\
1041		return -EINVAL;						\
1042	memcpy(vport->field, buf, cnt);					\
1043	i->f->set_vport_##field(vport);					\
1044	return count;							\
1045}
1046
1047#define fc_vport_rd_attr(field, format_string, sz)			\
1048	fc_vport_show_function(field, format_string, sz, )		\
1049static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1050			 show_fc_vport_##field, NULL)
1051
1052#define fc_vport_rd_attr_cast(field, format_string, sz, cast)		\
1053	fc_vport_show_function(field, format_string, sz, (cast))	\
1054static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1055			  show_fc_vport_##field, NULL)
1056
1057#define fc_vport_rw_attr(field, format_string, sz)			\
1058	fc_vport_show_function(field, format_string, sz, )		\
1059	fc_vport_store_function(field)					\
1060static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1061			show_fc_vport_##field,				\
1062			store_fc_vport_##field)
1063
1064#define fc_private_vport_show_function(field, format_string, sz, cast)	\
1065static ssize_t								\
1066show_fc_vport_##field (struct device *dev,				\
1067		       struct device_attribute *attr, char *buf)	\
1068{									\
1069	struct fc_vport *vport = transport_class_to_vport(dev);		\
1070	return snprintf(buf, sz, format_string, cast vport->field); 	\
1071}
1072
1073#define fc_private_vport_store_u32_function(field)			\
1074static ssize_t								\
1075store_fc_vport_##field(struct device *dev,				\
1076		       struct device_attribute *attr,			\
1077		       const char *buf,	size_t count)			\
1078{									\
1079	u32 val;							\
1080	struct fc_vport *vport = transport_class_to_vport(dev);		\
1081	char *cp;							\
1082	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))		\
1083		return -EBUSY;						\
1084	val = simple_strtoul(buf, &cp, 0);				\
1085	if (*cp && (*cp != '\n'))					\
1086		return -EINVAL;						\
1087	vport->field = val;						\
1088	return count;							\
1089}
1090
1091
1092#define fc_private_vport_rd_attr(field, format_string, sz)		\
1093	fc_private_vport_show_function(field, format_string, sz, )	\
1094static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1095			 show_fc_vport_##field, NULL)
1096
1097#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast)	\
1098	fc_private_vport_show_function(field, format_string, sz, (cast)) \
1099static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1100			  show_fc_vport_##field, NULL)
1101
1102#define fc_private_vport_rw_u32_attr(field, format_string, sz)		\
1103	fc_private_vport_show_function(field, format_string, sz, )	\
1104	fc_private_vport_store_u32_function(field)			\
1105static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1106			show_fc_vport_##field,				\
1107			store_fc_vport_##field)
1108
1109
1110#define fc_private_vport_rd_enum_attr(title, maxlen)			\
1111static ssize_t								\
1112show_fc_vport_##title (struct device *dev,				\
1113		       struct device_attribute *attr,			\
1114		       char *buf)					\
1115{									\
1116	struct fc_vport *vport = transport_class_to_vport(dev);		\
1117	const char *name;						\
1118	name = get_fc_##title##_name(vport->title);			\
1119	if (!name)							\
1120		return -EINVAL;						\
1121	return snprintf(buf, maxlen, "%s\n", name);			\
1122}									\
1123static FC_DEVICE_ATTR(vport, title, S_IRUGO,			\
1124			show_fc_vport_##title, NULL)
1125
1126
1127#define SETUP_VPORT_ATTRIBUTE_RD(field)					\
1128	i->private_vport_attrs[count] = device_attr_vport_##field; \
1129	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1130	i->private_vport_attrs[count].store = NULL;			\
1131	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1132	if (i->f->get_##field)						\
1133		count++
1134	/* NOTE: Above MACRO differs: checks function not show bit */
1135
1136#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field)				\
1137	i->private_vport_attrs[count] = device_attr_vport_##field; \
1138	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1139	i->private_vport_attrs[count].store = NULL;			\
1140	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1141	count++
1142
1143#define SETUP_VPORT_ATTRIBUTE_WR(field)					\
1144	i->private_vport_attrs[count] = device_attr_vport_##field; \
1145	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1146	if (i->f->field)						\
1147		count++
1148	/* NOTE: Above MACRO differs: checks function */
1149
1150#define SETUP_VPORT_ATTRIBUTE_RW(field)					\
1151	i->private_vport_attrs[count] = device_attr_vport_##field; \
1152	if (!i->f->set_vport_##field) {					\
1153		i->private_vport_attrs[count].attr.mode = S_IRUGO;	\
1154		i->private_vport_attrs[count].store = NULL;		\
1155	}								\
1156	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1157	count++
1158	/* NOTE: Above MACRO differs: does not check show bit */
1159
1160#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field)				\
1161{									\
1162	i->private_vport_attrs[count] = device_attr_vport_##field; \
1163	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1164	count++;							\
1165}
1166
1167
1168/* The FC Transport Virtual Port Attributes: */
1169
1170/* Fixed Virtual Port Attributes */
1171
1172/* Dynamic Virtual Port Attributes */
1173
1174/* Private Virtual Port Attributes */
1175
1176fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1177fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1178fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1179fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1180
1181static ssize_t
1182show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
1183		     char *buf)
1184{
1185	struct fc_vport *vport = transport_class_to_vport(dev);
1186
1187	if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1188		return snprintf(buf, 20, "unknown\n");
1189	return get_fc_port_roles_names(vport->roles, buf);
1190}
1191static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1192
1193fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1194
1195fc_private_vport_show_function(symbolic_name, "%s\n",
1196		FC_VPORT_SYMBOLIC_NAMELEN + 1, )
1197fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
1198static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1199		show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1200
1201static ssize_t
1202store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1203		      const char *buf, size_t count)
1204{
1205	struct fc_vport *vport = transport_class_to_vport(dev);
1206	struct Scsi_Host *shost = vport_to_shost(vport);
1207
1208	fc_queue_work(shost, &vport->vport_delete_work);
1209	return count;
1210}
1211static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1212			NULL, store_fc_vport_delete);
1213
1214
1215/*
1216 * Enable/Disable vport
1217 *  Write "1" to disable, write "0" to enable
1218 */
1219static ssize_t
1220store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
1221		       const char *buf,
1222			   size_t count)
1223{
1224	struct fc_vport *vport = transport_class_to_vport(dev);
1225	struct Scsi_Host *shost = vport_to_shost(vport);
1226	struct fc_internal *i = to_fc_internal(shost->transportt);
1227	int stat;
1228
1229	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1230		return -EBUSY;
1231
1232	if (*buf == '0') {
1233		if (vport->vport_state != FC_VPORT_DISABLED)
1234			return -EALREADY;
1235	} else if (*buf == '1') {
1236		if (vport->vport_state == FC_VPORT_DISABLED)
1237			return -EALREADY;
1238	} else
1239		return -EINVAL;
1240
1241	stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1242	return stat ? stat : count;
1243}
1244static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1245			NULL, store_fc_vport_disable);
1246
1247
1248/*
1249 * Host Attribute Management
1250 */
1251
1252#define fc_host_show_function(field, format_string, sz, cast)		\
1253static ssize_t								\
1254show_fc_host_##field (struct device *dev,				\
1255		      struct device_attribute *attr, char *buf)		\
1256{									\
1257	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1258	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1259	if (i->f->get_host_##field)					\
1260		i->f->get_host_##field(shost);				\
1261	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1262}
1263
1264#define fc_host_store_function(field)					\
1265static ssize_t								\
1266store_fc_host_##field(struct device *dev, 				\
1267		      struct device_attribute *attr,			\
1268		      const char *buf,	size_t count)			\
1269{									\
1270	int val;							\
1271	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1272	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1273	char *cp;							\
1274									\
1275	val = simple_strtoul(buf, &cp, 0);				\
1276	if (*cp && (*cp != '\n'))					\
1277		return -EINVAL;						\
1278	i->f->set_host_##field(shost, val);				\
1279	return count;							\
1280}
1281
1282#define fc_host_store_str_function(field, slen)				\
1283static ssize_t								\
1284store_fc_host_##field(struct device *dev,				\
1285		      struct device_attribute *attr,			\
1286		      const char *buf, size_t count)			\
1287{									\
1288	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1289	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1290	unsigned int cnt=count;						\
1291									\
1292	/* count may include a LF at end of string */			\
1293	if (buf[cnt-1] == '\n')						\
1294		cnt--;							\
1295	if (cnt > ((slen) - 1))						\
1296		return -EINVAL;						\
1297	memcpy(fc_host_##field(shost), buf, cnt);			\
1298	i->f->set_host_##field(shost);					\
1299	return count;							\
1300}
1301
1302#define fc_host_rd_attr(field, format_string, sz)			\
1303	fc_host_show_function(field, format_string, sz, )		\
1304static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1305			 show_fc_host_##field, NULL)
1306
1307#define fc_host_rd_attr_cast(field, format_string, sz, cast)		\
1308	fc_host_show_function(field, format_string, sz, (cast))		\
1309static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1310			  show_fc_host_##field, NULL)
1311
1312#define fc_host_rw_attr(field, format_string, sz)			\
1313	fc_host_show_function(field, format_string, sz, )		\
1314	fc_host_store_function(field)					\
1315static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR,		\
1316			show_fc_host_##field,				\
1317			store_fc_host_##field)
1318
1319#define fc_host_rd_enum_attr(title, maxlen)				\
1320static ssize_t								\
1321show_fc_host_##title (struct device *dev,				\
1322		      struct device_attribute *attr, char *buf)		\
1323{									\
1324	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1325	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1326	const char *name;						\
1327	if (i->f->get_host_##title)					\
1328		i->f->get_host_##title(shost);				\
1329	name = get_fc_##title##_name(fc_host_##title(shost));		\
1330	if (!name)							\
1331		return -EINVAL;						\
1332	return snprintf(buf, maxlen, "%s\n", name);			\
1333}									\
1334static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1335
1336#define SETUP_HOST_ATTRIBUTE_RD(field)					\
1337	i->private_host_attrs[count] = device_attr_host_##field;	\
1338	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1339	i->private_host_attrs[count].store = NULL;			\
1340	i->host_attrs[count] = &i->private_host_attrs[count];		\
1341	if (i->f->show_host_##field)					\
1342		count++
1343
1344#define SETUP_HOST_ATTRIBUTE_RD_NS(field)				\
1345	i->private_host_attrs[count] = device_attr_host_##field;	\
1346	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1347	i->private_host_attrs[count].store = NULL;			\
1348	i->host_attrs[count] = &i->private_host_attrs[count];		\
1349	count++
1350
1351#define SETUP_HOST_ATTRIBUTE_RW(field)					\
1352	i->private_host_attrs[count] = device_attr_host_##field;	\
1353	if (!i->f->set_host_##field) {					\
1354		i->private_host_attrs[count].attr.mode = S_IRUGO;	\
1355		i->private_host_attrs[count].store = NULL;		\
1356	}								\
1357	i->host_attrs[count] = &i->private_host_attrs[count];		\
1358	if (i->f->show_host_##field)					\
1359		count++
1360
1361
1362#define fc_private_host_show_function(field, format_string, sz, cast)	\
1363static ssize_t								\
1364show_fc_host_##field (struct device *dev,				\
1365		      struct device_attribute *attr, char *buf)		\
1366{									\
1367	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1368	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1369}
1370
1371#define fc_private_host_rd_attr(field, format_string, sz)		\
1372	fc_private_host_show_function(field, format_string, sz, )	\
1373static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1374			 show_fc_host_##field, NULL)
1375
1376#define fc_private_host_rd_attr_cast(field, format_string, sz, cast)	\
1377	fc_private_host_show_function(field, format_string, sz, (cast)) \
1378static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1379			  show_fc_host_##field, NULL)
1380
1381#define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field)			\
1382	i->private_host_attrs[count] = device_attr_host_##field;	\
1383	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1384	i->private_host_attrs[count].store = NULL;			\
1385	i->host_attrs[count] = &i->private_host_attrs[count];		\
1386	count++
1387
1388#define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field)			\
1389{									\
1390	i->private_host_attrs[count] = device_attr_host_##field;	\
1391	i->host_attrs[count] = &i->private_host_attrs[count];		\
1392	count++;							\
1393}
1394
1395
1396/* Fixed Host Attributes */
1397
1398static ssize_t
1399show_fc_host_supported_classes (struct device *dev,
1400			        struct device_attribute *attr, char *buf)
1401{
1402	struct Scsi_Host *shost = transport_class_to_shost(dev);
1403
1404	if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED)
1405		return snprintf(buf, 20, "unspecified\n");
1406
1407	return get_fc_cos_names(fc_host_supported_classes(shost), buf);
1408}
1409static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO,
1410		show_fc_host_supported_classes, NULL);
1411
1412static ssize_t
1413show_fc_host_supported_fc4s (struct device *dev,
1414			     struct device_attribute *attr, char *buf)
1415{
1416	struct Scsi_Host *shost = transport_class_to_shost(dev);
1417	return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
1418}
1419static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO,
1420		show_fc_host_supported_fc4s, NULL);
1421
1422static ssize_t
1423show_fc_host_supported_speeds (struct device *dev,
1424			       struct device_attribute *attr, char *buf)
1425{
1426	struct Scsi_Host *shost = transport_class_to_shost(dev);
1427
1428	if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN)
1429		return snprintf(buf, 20, "unknown\n");
1430
1431	return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
1432}
1433static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
1434		show_fc_host_supported_speeds, NULL);
1435
1436
1437fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1438fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1439fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1440			     unsigned long long);
1441fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1442fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1443fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1444
1445
1446/* Dynamic Host Attributes */
1447
1448static ssize_t
1449show_fc_host_active_fc4s (struct device *dev,
1450			  struct device_attribute *attr, char *buf)
1451{
1452	struct Scsi_Host *shost = transport_class_to_shost(dev);
1453	struct fc_internal *i = to_fc_internal(shost->transportt);
1454
1455	if (i->f->get_host_active_fc4s)
1456		i->f->get_host_active_fc4s(shost);
1457
1458	return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
1459}
1460static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
1461		show_fc_host_active_fc4s, NULL);
1462
1463static ssize_t
1464show_fc_host_speed (struct device *dev,
1465		    struct device_attribute *attr, char *buf)
1466{
1467	struct Scsi_Host *shost = transport_class_to_shost(dev);
1468	struct fc_internal *i = to_fc_internal(shost->transportt);
1469
1470	if (i->f->get_host_speed)
1471		i->f->get_host_speed(shost);
1472
1473	if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
1474		return snprintf(buf, 20, "unknown\n");
1475
1476	return get_fc_port_speed_names(fc_host_speed(shost), buf);
1477}
1478static FC_DEVICE_ATTR(host, speed, S_IRUGO,
1479		show_fc_host_speed, NULL);
1480
1481
1482fc_host_rd_attr(port_id, "0x%06x\n", 20);
1483fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
1484fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
1485fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1486fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1487
1488fc_private_host_show_function(system_hostname, "%s\n",
1489		FC_SYMBOLIC_NAME_SIZE + 1, )
1490fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1491static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1492		show_fc_host_system_hostname, store_fc_host_system_hostname);
1493
1494
1495/* Private Host Attributes */
1496
1497static ssize_t
1498show_fc_private_host_tgtid_bind_type(struct device *dev,
1499				     struct device_attribute *attr, char *buf)
1500{
1501	struct Scsi_Host *shost = transport_class_to_shost(dev);
1502	const char *name;
1503
1504	name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
1505	if (!name)
1506		return -EINVAL;
1507	return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
1508}
1509
1510#define get_list_head_entry(pos, head, member) 		\
1511	pos = list_entry((head)->next, typeof(*pos), member)
1512
1513static ssize_t
1514store_fc_private_host_tgtid_bind_type(struct device *dev,
1515	struct device_attribute *attr, const char *buf, size_t count)
1516{
1517	struct Scsi_Host *shost = transport_class_to_shost(dev);
1518	struct fc_rport *rport;
1519 	enum fc_tgtid_binding_type val;
1520	unsigned long flags;
1521
1522	if (get_fc_tgtid_bind_type_match(buf, &val))
1523		return -EINVAL;
1524
1525	/* if changing bind type, purge all unused consistent bindings */
1526	if (val != fc_host_tgtid_bind_type(shost)) {
1527		spin_lock_irqsave(shost->host_lock, flags);
1528		while (!list_empty(&fc_host_rport_bindings(shost))) {
1529			get_list_head_entry(rport,
1530				&fc_host_rport_bindings(shost), peers);
1531			list_del(&rport->peers);
1532			rport->port_state = FC_PORTSTATE_DELETED;
1533			fc_queue_work(shost, &rport->rport_delete_work);
1534		}
1535		spin_unlock_irqrestore(shost->host_lock, flags);
1536	}
1537
1538	fc_host_tgtid_bind_type(shost) = val;
1539	return count;
1540}
1541
1542static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
1543			show_fc_private_host_tgtid_bind_type,
1544			store_fc_private_host_tgtid_bind_type);
1545
1546static ssize_t
1547store_fc_private_host_issue_lip(struct device *dev,
1548	struct device_attribute *attr, const char *buf, size_t count)
1549{
1550	struct Scsi_Host *shost = transport_class_to_shost(dev);
1551	struct fc_internal *i = to_fc_internal(shost->transportt);
1552	int ret;
1553
1554	/* ignore any data value written to the attribute */
1555	if (i->f->issue_fc_host_lip) {
1556		ret = i->f->issue_fc_host_lip(shost);
1557		return ret ? ret: count;
1558	}
1559
1560	return -ENOENT;
1561}
1562
1563static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1564			store_fc_private_host_issue_lip);
1565
1566fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1567
1568
1569/*
1570 * Host Statistics Management
1571 */
1572
1573/* Show a given an attribute in the statistics group */
1574static ssize_t
1575fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
1576{
1577	struct Scsi_Host *shost = transport_class_to_shost(dev);
1578	struct fc_internal *i = to_fc_internal(shost->transportt);
1579	struct fc_host_statistics *stats;
1580	ssize_t ret = -ENOENT;
1581
1582	if (offset > sizeof(struct fc_host_statistics) ||
1583	    offset % sizeof(u64) != 0)
1584		WARN_ON(1);
1585
1586	if (i->f->get_fc_host_stats) {
1587		stats = (i->f->get_fc_host_stats)(shost);
1588		if (stats)
1589			ret = snprintf(buf, 20, "0x%llx\n",
1590			      (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
1591	}
1592	return ret;
1593}
1594
1595
1596/* generate a read-only statistics attribute */
1597#define fc_host_statistic(name)						\
1598static ssize_t show_fcstat_##name(struct device *cd,			\
1599				  struct device_attribute *attr,	\
1600				  char *buf)				\
1601{									\
1602	return fc_stat_show(cd, buf, 					\
1603			    offsetof(struct fc_host_statistics, name));	\
1604}									\
1605static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
1606
1607fc_host_statistic(seconds_since_last_reset);
1608fc_host_statistic(tx_frames);
1609fc_host_statistic(tx_words);
1610fc_host_statistic(rx_frames);
1611fc_host_statistic(rx_words);
1612fc_host_statistic(lip_count);
1613fc_host_statistic(nos_count);
1614fc_host_statistic(error_frames);
1615fc_host_statistic(dumped_frames);
1616fc_host_statistic(link_failure_count);
1617fc_host_statistic(loss_of_sync_count);
1618fc_host_statistic(loss_of_signal_count);
1619fc_host_statistic(prim_seq_protocol_err_count);
1620fc_host_statistic(invalid_tx_word_count);
1621fc_host_statistic(invalid_crc_count);
1622fc_host_statistic(fcp_input_requests);
1623fc_host_statistic(fcp_output_requests);
1624fc_host_statistic(fcp_control_requests);
1625fc_host_statistic(fcp_input_megabytes);
1626fc_host_statistic(fcp_output_megabytes);
1627
1628static ssize_t
1629fc_reset_statistics(struct device *dev, struct device_attribute *attr,
1630		    const char *buf, size_t count)
1631{
1632	struct Scsi_Host *shost = transport_class_to_shost(dev);
1633	struct fc_internal *i = to_fc_internal(shost->transportt);
1634
1635	/* ignore any data value written to the attribute */
1636	if (i->f->reset_fc_host_stats) {
1637		i->f->reset_fc_host_stats(shost);
1638		return count;
1639	}
1640
1641	return -ENOENT;
1642}
1643static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
1644				fc_reset_statistics);
1645
1646static struct attribute *fc_statistics_attrs[] = {
1647	&device_attr_host_seconds_since_last_reset.attr,
1648	&device_attr_host_tx_frames.attr,
1649	&device_attr_host_tx_words.attr,
1650	&device_attr_host_rx_frames.attr,
1651	&device_attr_host_rx_words.attr,
1652	&device_attr_host_lip_count.attr,
1653	&device_attr_host_nos_count.attr,
1654	&device_attr_host_error_frames.attr,
1655	&device_attr_host_dumped_frames.attr,
1656	&device_attr_host_link_failure_count.attr,
1657	&device_attr_host_loss_of_sync_count.attr,
1658	&device_attr_host_loss_of_signal_count.attr,
1659	&device_attr_host_prim_seq_protocol_err_count.attr,
1660	&device_attr_host_invalid_tx_word_count.attr,
1661	&device_attr_host_invalid_crc_count.attr,
1662	&device_attr_host_fcp_input_requests.attr,
1663	&device_attr_host_fcp_output_requests.attr,
1664	&device_attr_host_fcp_control_requests.attr,
1665	&device_attr_host_fcp_input_megabytes.attr,
1666	&device_attr_host_fcp_output_megabytes.attr,
1667	&device_attr_host_reset_statistics.attr,
1668	NULL
1669};
1670
1671static struct attribute_group fc_statistics_group = {
1672	.name = "statistics",
1673	.attrs = fc_statistics_attrs,
1674};
1675
1676
1677/* Host Vport Attributes */
1678
1679static int
1680fc_parse_wwn(const char *ns, u64 *nm)
1681{
1682	unsigned int i, j;
1683	u8 wwn[8];
1684
1685	memset(wwn, 0, sizeof(wwn));
1686
1687	/* Validate and store the new name */
1688	for (i=0, j=0; i < 16; i++) {
1689		if ((*ns >= 'a') && (*ns <= 'f'))
1690			j = ((j << 4) | ((*ns++ -'a') + 10));
1691		else if ((*ns >= 'A') && (*ns <= 'F'))
1692			j = ((j << 4) | ((*ns++ -'A') + 10));
1693		else if ((*ns >= '0') && (*ns <= '9'))
1694			j = ((j << 4) | (*ns++ -'0'));
1695		else
1696			return -EINVAL;
1697		if (i % 2) {
1698			wwn[i/2] = j & 0xff;
1699			j = 0;
1700		}
1701	}
1702
1703	*nm = wwn_to_u64(wwn);
1704
1705	return 0;
1706}
1707
1708
1709/*
1710 * "Short-cut" sysfs variable to create a new vport on a FC Host.
1711 * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
1712 * will default to a NPIV-based FCP_Initiator; The WWNs are specified
1713 * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
1714 */
1715static ssize_t
1716store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1717			   const char *buf, size_t count)
1718{
1719	struct Scsi_Host *shost = transport_class_to_shost(dev);
1720	struct fc_vport_identifiers vid;
1721	struct fc_vport *vport;
1722	unsigned int cnt=count;
1723	int stat;
1724
1725	memset(&vid, 0, sizeof(vid));
1726
1727	/* count may include a LF at end of string */
1728	if (buf[cnt-1] == '\n')
1729		cnt--;
1730
1731	/* validate we have enough characters for WWPN */
1732	if ((cnt != (16+1+16)) || (buf[16] != ':'))
1733		return -EINVAL;
1734
1735	stat = fc_parse_wwn(&buf[0], &vid.port_name);
1736	if (stat)
1737		return stat;
1738
1739	stat = fc_parse_wwn(&buf[17], &vid.node_name);
1740	if (stat)
1741		return stat;
1742
1743	vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1744	vid.vport_type = FC_PORTTYPE_NPIV;
1745	/* vid.symbolic_name is already zero/NULL's */
1746	vid.disable = false;		/* always enabled */
1747
1748	/* we only allow support on Channel 0 !!! */
1749	stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1750	return stat ? stat : count;
1751}
1752static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
1753			store_fc_host_vport_create);
1754
1755
1756/*
1757 * "Short-cut" sysfs variable to delete a vport on a FC Host.
1758 * Vport is identified by a string containing "<WWPN>:<WWNN>".
1759 * The WWNs are specified as hex characters, and may *not* contain
1760 * any prefixes (e.g. 0x, x, etc)
1761 */
1762static ssize_t
1763store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
1764			   const char *buf, size_t count)
1765{
1766	struct Scsi_Host *shost = transport_class_to_shost(dev);
1767	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1768	struct fc_vport *vport;
1769	u64 wwpn, wwnn;
1770	unsigned long flags;
1771	unsigned int cnt=count;
1772	int stat, match;
1773
1774	/* count may include a LF at end of string */
1775	if (buf[cnt-1] == '\n')
1776		cnt--;
1777
1778	/* validate we have enough characters for WWPN */
1779	if ((cnt != (16+1+16)) || (buf[16] != ':'))
1780		return -EINVAL;
1781
1782	stat = fc_parse_wwn(&buf[0], &wwpn);
1783	if (stat)
1784		return stat;
1785
1786	stat = fc_parse_wwn(&buf[17], &wwnn);
1787	if (stat)
1788		return stat;
1789
1790	spin_lock_irqsave(shost->host_lock, flags);
1791	match = 0;
1792	/* we only allow support on Channel 0 !!! */
1793	list_for_each_entry(vport, &fc_host->vports, peers) {
1794		if ((vport->channel == 0) &&
1795		    (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1796			match = 1;
1797			break;
1798		}
1799	}
1800	spin_unlock_irqrestore(shost->host_lock, flags);
1801
1802	if (!match)
1803		return -ENODEV;
1804
1805	stat = fc_vport_terminate(vport);
1806	return stat ? stat : count;
1807}
1808static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
1809			store_fc_host_vport_delete);
1810
1811
1812static int fc_host_match(struct attribute_container *cont,
1813			  struct device *dev)
1814{
1815	struct Scsi_Host *shost;
1816	struct fc_internal *i;
1817
1818	if (!scsi_is_host_device(dev))
1819		return 0;
1820
1821	shost = dev_to_shost(dev);
1822	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1823	    != &fc_host_class.class)
1824		return 0;
1825
1826	i = to_fc_internal(shost->transportt);
1827
1828	return &i->t.host_attrs.ac == cont;
1829}
1830
1831static int fc_target_match(struct attribute_container *cont,
1832			    struct device *dev)
1833{
1834	struct Scsi_Host *shost;
1835	struct fc_internal *i;
1836
1837	if (!scsi_is_target_device(dev))
1838		return 0;
1839
1840	shost = dev_to_shost(dev->parent);
1841	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1842	    != &fc_host_class.class)
1843		return 0;
1844
1845	i = to_fc_internal(shost->transportt);
1846
1847	return &i->t.target_attrs.ac == cont;
1848}
1849
1850static void fc_rport_dev_release(struct device *dev)
1851{
1852	struct fc_rport *rport = dev_to_rport(dev);
1853	put_device(dev->parent);
1854	kfree(rport);
1855}
1856
1857int scsi_is_fc_rport(const struct device *dev)
1858{
1859	return dev->release == fc_rport_dev_release;
1860}
1861EXPORT_SYMBOL(scsi_is_fc_rport);
1862
1863static int fc_rport_match(struct attribute_container *cont,
1864			    struct device *dev)
1865{
1866	struct Scsi_Host *shost;
1867	struct fc_internal *i;
1868
1869	if (!scsi_is_fc_rport(dev))
1870		return 0;
1871
1872	shost = dev_to_shost(dev->parent);
1873	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1874	    != &fc_host_class.class)
1875		return 0;
1876
1877	i = to_fc_internal(shost->transportt);
1878
1879	return &i->rport_attr_cont.ac == cont;
1880}
1881
1882
1883static void fc_vport_dev_release(struct device *dev)
1884{
1885	struct fc_vport *vport = dev_to_vport(dev);
1886	put_device(dev->parent);		/* release kobj parent */
1887	kfree(vport);
1888}
1889
1890int scsi_is_fc_vport(const struct device *dev)
1891{
1892	return dev->release == fc_vport_dev_release;
1893}
1894EXPORT_SYMBOL(scsi_is_fc_vport);
1895
1896static int fc_vport_match(struct attribute_container *cont,
1897			    struct device *dev)
1898{
1899	struct fc_vport *vport;
1900	struct Scsi_Host *shost;
1901	struct fc_internal *i;
1902
1903	if (!scsi_is_fc_vport(dev))
1904		return 0;
1905	vport = dev_to_vport(dev);
1906
1907	shost = vport_to_shost(vport);
1908	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1909	    != &fc_host_class.class)
1910		return 0;
1911
1912	i = to_fc_internal(shost->transportt);
1913	return &i->vport_attr_cont.ac == cont;
1914}
1915
1916
1917/**
1918 * fc_timed_out - FC Transport I/O timeout intercept handler
1919 * @scmd:	The SCSI command which timed out
1920 *
1921 * This routine protects against error handlers getting invoked while a
1922 * rport is in a blocked state, typically due to a temporarily loss of
1923 * connectivity. If the error handlers are allowed to proceed, requests
1924 * to abort i/o, reset the target, etc will likely fail as there is no way
1925 * to communicate with the device to perform the requested function. These
1926 * failures may result in the midlayer taking the device offline, requiring
1927 * manual intervention to restore operation.
1928 *
1929 * This routine, called whenever an i/o times out, validates the state of
1930 * the underlying rport. If the rport is blocked, it returns
1931 * EH_RESET_TIMER, which will continue to reschedule the timeout.
1932 * Eventually, either the device will return, or devloss_tmo will fire,
1933 * and when the timeout then fires, it will be handled normally.
1934 * If the rport is not blocked, normal error handling continues.
1935 *
1936 * Notes:
1937 *	This routine assumes no locks are held on entry.
1938 */
1939static enum blk_eh_timer_return
1940fc_timed_out(struct scsi_cmnd *scmd)
1941{
1942	struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1943
1944	if (rport->port_state == FC_PORTSTATE_BLOCKED)
1945		return BLK_EH_RESET_TIMER;
1946
1947	return BLK_EH_NOT_HANDLED;
1948}
1949
1950/*
1951 * Called by fc_user_scan to locate an rport on the shost that
1952 * matches the channel and target id, and invoke scsi_scan_target()
1953 * on the rport.
1954 */
1955static void
1956fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
1957{
1958	struct fc_rport *rport;
1959	unsigned long flags;
1960
1961	spin_lock_irqsave(shost->host_lock, flags);
1962
1963	list_for_each_entry(rport, &fc_host_rports(shost), peers) {
1964		if (rport->scsi_target_id == -1)
1965			continue;
1966
1967		if (rport->port_state != FC_PORTSTATE_ONLINE)
1968			continue;
1969
1970		if ((channel == rport->channel) &&
1971		    (id == rport->scsi_target_id)) {
1972			spin_unlock_irqrestore(shost->host_lock, flags);
1973			scsi_scan_target(&rport->dev, channel, id, lun, 1);
1974			return;
1975		}
1976	}
1977
1978	spin_unlock_irqrestore(shost->host_lock, flags);
1979}
1980
1981/*
1982 * Called via sysfs scan routines. Necessary, as the FC transport
1983 * wants to place all target objects below the rport object. So this
1984 * routine must invoke the scsi_scan_target() routine with the rport
1985 * object as the parent.
1986 */
1987static int
1988fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
1989{
1990	uint chlo, chhi;
1991	uint tgtlo, tgthi;
1992
1993	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1994	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1995	    ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
1996		return -EINVAL;
1997
1998	if (channel == SCAN_WILD_CARD) {
1999		chlo = 0;
2000		chhi = shost->max_channel + 1;
2001	} else {
2002		chlo = channel;
2003		chhi = channel + 1;
2004	}
2005
2006	if (id == SCAN_WILD_CARD) {
2007		tgtlo = 0;
2008		tgthi = shost->max_id;
2009	} else {
2010		tgtlo = id;
2011		tgthi = id + 1;
2012	}
2013
2014	for ( ; chlo < chhi; chlo++)
2015		for ( ; tgtlo < tgthi; tgtlo++)
2016			fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2017
2018	return 0;
2019}
2020
2021static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
2022				int result)
2023{
2024	struct fc_internal *i = to_fc_internal(shost->transportt);
2025	return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
2026}
2027
2028static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
2029{
2030	struct fc_internal *i = to_fc_internal(shost->transportt);
2031	return i->f->it_nexus_response(shost, nexus, result);
2032}
2033
2034struct scsi_transport_template *
2035fc_attach_transport(struct fc_function_template *ft)
2036{
2037	int count;
2038	struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
2039					GFP_KERNEL);
2040
2041	if (unlikely(!i))
2042		return NULL;
2043
2044	i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
2045	i->t.target_attrs.ac.class = &fc_transport_class.class;
2046	i->t.target_attrs.ac.match = fc_target_match;
2047	i->t.target_size = sizeof(struct fc_starget_attrs);
2048	transport_container_register(&i->t.target_attrs);
2049
2050	i->t.host_attrs.ac.attrs = &i->host_attrs[0];
2051	i->t.host_attrs.ac.class = &fc_host_class.class;
2052	i->t.host_attrs.ac.match = fc_host_match;
2053	i->t.host_size = sizeof(struct fc_host_attrs);
2054	if (ft->get_fc_host_stats)
2055		i->t.host_attrs.statistics = &fc_statistics_group;
2056	transport_container_register(&i->t.host_attrs);
2057
2058	i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
2059	i->rport_attr_cont.ac.class = &fc_rport_class.class;
2060	i->rport_attr_cont.ac.match = fc_rport_match;
2061	transport_container_register(&i->rport_attr_cont);
2062
2063	i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
2064	i->vport_attr_cont.ac.class = &fc_vport_class.class;
2065	i->vport_attr_cont.ac.match = fc_vport_match;
2066	transport_container_register(&i->vport_attr_cont);
2067
2068	i->f = ft;
2069
2070	/* Transport uses the shost workq for scsi scanning */
2071	i->t.create_work_queue = 1;
2072
2073	i->t.eh_timed_out = fc_timed_out;
2074
2075	i->t.user_scan = fc_user_scan;
2076
2077	/* target-mode drivers' functions */
2078	i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
2079	i->t.it_nexus_response = fc_it_nexus_response;
2080
2081	/*
2082	 * Setup SCSI Target Attributes.
2083	 */
2084	count = 0;
2085	SETUP_STARGET_ATTRIBUTE_RD(node_name);
2086	SETUP_STARGET_ATTRIBUTE_RD(port_name);
2087	SETUP_STARGET_ATTRIBUTE_RD(port_id);
2088
2089	BUG_ON(count > FC_STARGET_NUM_ATTRS);
2090
2091	i->starget_attrs[count] = NULL;
2092
2093
2094	/*
2095	 * Setup SCSI Host Attributes.
2096	 */
2097	count=0;
2098	SETUP_HOST_ATTRIBUTE_RD(node_name);
2099	SETUP_HOST_ATTRIBUTE_RD(port_name);
2100	SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
2101	SETUP_HOST_ATTRIBUTE_RD(supported_classes);
2102	SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
2103	SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
2104	SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
2105	if (ft->vport_create) {
2106		SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
2107		SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2108	}
2109	SETUP_HOST_ATTRIBUTE_RD(serial_number);
2110
2111	SETUP_HOST_ATTRIBUTE_RD(port_id);
2112	SETUP_HOST_ATTRIBUTE_RD(port_type);
2113	SETUP_HOST_ATTRIBUTE_RD(port_state);
2114	SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
2115	SETUP_HOST_ATTRIBUTE_RD(speed);
2116	SETUP_HOST_ATTRIBUTE_RD(fabric_name);
2117	SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
2118	SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2119
2120	/* Transport-managed attributes */
2121	SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2122	if (ft->issue_fc_host_lip)
2123		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
2124	if (ft->vport_create)
2125		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2126	if (ft->vport_delete)
2127		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
2128
2129	BUG_ON(count > FC_HOST_NUM_ATTRS);
2130
2131	i->host_attrs[count] = NULL;
2132
2133	/*
2134	 * Setup Remote Port Attributes.
2135	 */
2136	count=0;
2137	SETUP_RPORT_ATTRIBUTE_RD(maxframe_size);
2138	SETUP_RPORT_ATTRIBUTE_RD(supported_classes);
2139	SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
2140	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name);
2141	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
2142	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
2143	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
2144	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
2145	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
2146	SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
2147
2148	BUG_ON(count > FC_RPORT_NUM_ATTRS);
2149
2150	i->rport_attrs[count] = NULL;
2151
2152	/*
2153	 * Setup Virtual Port Attributes.
2154	 */
2155	count=0;
2156	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
2157	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
2158	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
2159	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
2160	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
2161	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
2162	SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2163	SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2164	SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2165
2166	BUG_ON(count > FC_VPORT_NUM_ATTRS);
2167
2168	i->vport_attrs[count] = NULL;
2169
2170	return &i->t;
2171}
2172EXPORT_SYMBOL(fc_attach_transport);
2173
2174void fc_release_transport(struct scsi_transport_template *t)
2175{
2176	struct fc_internal *i = to_fc_internal(t);
2177
2178	transport_container_unregister(&i->t.target_attrs);
2179	transport_container_unregister(&i->t.host_attrs);
2180	transport_container_unregister(&i->rport_attr_cont);
2181	transport_container_unregister(&i->vport_attr_cont);
2182
2183	kfree(i);
2184}
2185EXPORT_SYMBOL(fc_release_transport);
2186
2187/**
2188 * fc_queue_work - Queue work to the fc_host workqueue.
2189 * @shost:	Pointer to Scsi_Host bound to fc_host.
2190 * @work:	Work to queue for execution.
2191 *
2192 * Return value:
2193 * 	1 - work queued for execution
2194 *	0 - work is already queued
2195 *	-EINVAL - work queue doesn't exist
2196 */
2197static int
2198fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2199{
2200	if (unlikely(!fc_host_work_q(shost))) {
2201		printk(KERN_ERR
2202			"ERROR: FC host '%s' attempted to queue work, "
2203			"when no workqueue created.\n", shost->hostt->name);
2204		dump_stack();
2205
2206		return -EINVAL;
2207	}
2208
2209	return queue_work(fc_host_work_q(shost), work);
2210}
2211
2212/**
2213 * fc_flush_work - Flush a fc_host's workqueue.
2214 * @shost:	Pointer to Scsi_Host bound to fc_host.
2215 */
2216static void
2217fc_flush_work(struct Scsi_Host *shost)
2218{
2219	if (!fc_host_work_q(shost)) {
2220		printk(KERN_ERR
2221			"ERROR: FC host '%s' attempted to flush work, "
2222			"when no workqueue created.\n", shost->hostt->name);
2223		dump_stack();
2224		return;
2225	}
2226
2227	flush_workqueue(fc_host_work_q(shost));
2228}
2229
2230/**
2231 * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue.
2232 * @shost:	Pointer to Scsi_Host bound to fc_host.
2233 * @work:	Work to queue for execution.
2234 * @delay:	jiffies to delay the work queuing
2235 *
2236 * Return value:
2237 * 	1 on success / 0 already queued / < 0 for error
2238 */
2239static int
2240fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2241				unsigned long delay)
2242{
2243	if (unlikely(!fc_host_devloss_work_q(shost))) {
2244		printk(KERN_ERR
2245			"ERROR: FC host '%s' attempted to queue work, "
2246			"when no workqueue created.\n", shost->hostt->name);
2247		dump_stack();
2248
2249		return -EINVAL;
2250	}
2251
2252	return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
2253}
2254
2255/**
2256 * fc_flush_devloss - Flush a fc_host's devloss workqueue.
2257 * @shost:	Pointer to Scsi_Host bound to fc_host.
2258 */
2259static void
2260fc_flush_devloss(struct Scsi_Host *shost)
2261{
2262	if (!fc_host_devloss_work_q(shost)) {
2263		printk(KERN_ERR
2264			"ERROR: FC host '%s' attempted to flush work, "
2265			"when no workqueue created.\n", shost->hostt->name);
2266		dump_stack();
2267		return;
2268	}
2269
2270	flush_workqueue(fc_host_devloss_work_q(shost));
2271}
2272
2273
2274/**
2275 * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
2276 * @shost:	Which &Scsi_Host
2277 *
2278 * This routine is expected to be called immediately preceeding the
2279 * a driver's call to scsi_remove_host().
2280 *
2281 * WARNING: A driver utilizing the fc_transport, which fails to call
2282 *   this routine prior to scsi_remove_host(), will leave dangling
2283 *   objects in /sys/class/fc_remote_ports. Access to any of these
2284 *   objects can result in a system crash !!!
2285 *
2286 * Notes:
2287 *	This routine assumes no locks are held on entry.
2288 */
2289void
2290fc_remove_host(struct Scsi_Host *shost)
2291{
2292	struct fc_vport *vport = NULL, *next_vport = NULL;
2293	struct fc_rport *rport = NULL, *next_rport = NULL;
2294	struct workqueue_struct *work_q;
2295	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2296	unsigned long flags;
2297
2298	spin_lock_irqsave(shost->host_lock, flags);
2299
2300	/* Remove any vports */
2301	list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
2302		fc_queue_work(shost, &vport->vport_delete_work);
2303
2304	/* Remove any remote ports */
2305	list_for_each_entry_safe(rport, next_rport,
2306			&fc_host->rports, peers) {
2307		list_del(&rport->peers);
2308		rport->port_state = FC_PORTSTATE_DELETED;
2309		fc_queue_work(shost, &rport->rport_delete_work);
2310	}
2311
2312	list_for_each_entry_safe(rport, next_rport,
2313			&fc_host->rport_bindings, peers) {
2314		list_del(&rport->peers);
2315		rport->port_state = FC_PORTSTATE_DELETED;
2316		fc_queue_work(shost, &rport->rport_delete_work);
2317	}
2318
2319	spin_unlock_irqrestore(shost->host_lock, flags);
2320
2321	/* flush all scan work items */
2322	scsi_flush_work(shost);
2323
2324	/* flush all stgt delete, and rport delete work items, then kill it  */
2325	if (fc_host->work_q) {
2326		work_q = fc_host->work_q;
2327		fc_host->work_q = NULL;
2328		destroy_workqueue(work_q);
2329	}
2330
2331	/* flush all devloss work items, then kill it  */
2332	if (fc_host->devloss_work_q) {
2333		work_q = fc_host->devloss_work_q;
2334		fc_host->devloss_work_q = NULL;
2335		destroy_workqueue(work_q);
2336	}
2337}
2338EXPORT_SYMBOL(fc_remove_host);
2339
2340static void fc_terminate_rport_io(struct fc_rport *rport)
2341{
2342	struct Scsi_Host *shost = rport_to_shost(rport);
2343	struct fc_internal *i = to_fc_internal(shost->transportt);
2344
2345	/* Involve the LLDD if possible to terminate all io on the rport. */
2346	if (i->f->terminate_rport_io)
2347		i->f->terminate_rport_io(rport);
2348
2349	/*
2350	 * must unblock to flush queued IO. The caller will have set
2351	 * the port_state or flags, so that fc_remote_port_chkready will
2352	 * fail IO.
2353	 */
2354	scsi_target_unblock(&rport->dev);
2355}
2356
2357/**
2358 * fc_starget_delete - called to delete the scsi decendents of an rport
2359 * @work:	remote port to be operated on.
2360 *
2361 * Deletes target and all sdevs.
2362 */
2363static void
2364fc_starget_delete(struct work_struct *work)
2365{
2366	struct fc_rport *rport =
2367		container_of(work, struct fc_rport, stgt_delete_work);
2368
2369	fc_terminate_rport_io(rport);
2370	scsi_remove_target(&rport->dev);
2371}
2372
2373
2374/**
2375 * fc_rport_final_delete - finish rport termination and delete it.
2376 * @work:	remote port to be deleted.
2377 */
2378static void
2379fc_rport_final_delete(struct work_struct *work)
2380{
2381	struct fc_rport *rport =
2382		container_of(work, struct fc_rport, rport_delete_work);
2383	struct device *dev = &rport->dev;
2384	struct Scsi_Host *shost = rport_to_shost(rport);
2385	struct fc_internal *i = to_fc_internal(shost->transportt);
2386	unsigned long flags;
2387
2388	/*
2389	 * if a scan is pending, flush the SCSI Host work_q so that
2390	 * that we can reclaim the rport scan work element.
2391	 */
2392	if (rport->flags & FC_RPORT_SCAN_PENDING)
2393		scsi_flush_work(shost);
2394
2395	fc_terminate_rport_io(rport);
2396
2397	/*
2398	 * Cancel any outstanding timers. These should really exist
2399	 * only when rmmod'ing the LLDD and we're asking for
2400	 * immediate termination of the rports
2401	 */
2402	spin_lock_irqsave(shost->host_lock, flags);
2403	if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
2404		spin_unlock_irqrestore(shost->host_lock, flags);
2405		if (!cancel_delayed_work(&rport->fail_io_work))
2406			fc_flush_devloss(shost);
2407		if (!cancel_delayed_work(&rport->dev_loss_work))
2408			fc_flush_devloss(shost);
2409		spin_lock_irqsave(shost->host_lock, flags);
2410		rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2411	}
2412	spin_unlock_irqrestore(shost->host_lock, flags);
2413
2414	/* Delete SCSI target and sdevs */
2415	if (rport->scsi_target_id != -1)
2416		fc_starget_delete(&rport->stgt_delete_work);
2417
2418	/*
2419	 * Notify the driver that the rport is now dead. The LLDD will
2420	 * also guarantee that any communication to the rport is terminated
2421	 *
2422	 * Avoid this call if we already called it when we preserved the
2423	 * rport for the binding.
2424	 */
2425	if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2426	    (i->f->dev_loss_tmo_callbk))
2427		i->f->dev_loss_tmo_callbk(rport);
2428
2429	fc_bsg_remove(rport->rqst_q);
2430
2431	transport_remove_device(dev);
2432	device_del(dev);
2433	transport_destroy_device(dev);
2434	put_device(&shost->shost_gendev);	/* for fc_host->rport list */
2435	put_device(dev);			/* for self-reference */
2436}
2437
2438
2439/**
2440 * fc_rport_create - allocates and creates a remote FC port.
2441 * @shost:	scsi host the remote port is connected to.
2442 * @channel:	Channel on shost port connected to.
2443 * @ids:	The world wide names, fc address, and FC4 port
2444 *		roles for the remote port.
2445 *
2446 * Allocates and creates the remoter port structure, including the
2447 * class and sysfs creation.
2448 *
2449 * Notes:
2450 *	This routine assumes no locks are held on entry.
2451 */
2452static struct fc_rport *
2453fc_rport_create(struct Scsi_Host *shost, int channel,
2454	struct fc_rport_identifiers  *ids)
2455{
2456	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2457	struct fc_internal *fci = to_fc_internal(shost->transportt);
2458	struct fc_rport *rport;
2459	struct device *dev;
2460	unsigned long flags;
2461	int error;
2462	size_t size;
2463
2464	size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
2465	rport = kzalloc(size, GFP_KERNEL);
2466	if (unlikely(!rport)) {
2467		printk(KERN_ERR "%s: allocation failure\n", __func__);
2468		return NULL;
2469	}
2470
2471	rport->maxframe_size = -1;
2472	rport->supported_classes = FC_COS_UNSPECIFIED;
2473	rport->dev_loss_tmo = fc_dev_loss_tmo;
2474	memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
2475	memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
2476	rport->port_id = ids->port_id;
2477	rport->roles = ids->roles;
2478	rport->port_state = FC_PORTSTATE_ONLINE;
2479	if (fci->f->dd_fcrport_size)
2480		rport->dd_data = &rport[1];
2481	rport->channel = channel;
2482	rport->fast_io_fail_tmo = -1;
2483
2484	INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
2485	INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
2486	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
2487	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
2488	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
2489
2490	spin_lock_irqsave(shost->host_lock, flags);
2491
2492	rport->number = fc_host->next_rport_number++;
2493	if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
2494		rport->scsi_target_id = fc_host->next_target_id++;
2495	else
2496		rport->scsi_target_id = -1;
2497	list_add_tail(&rport->peers, &fc_host->rports);
2498	get_device(&shost->shost_gendev);	/* for fc_host->rport list */
2499
2500	spin_unlock_irqrestore(shost->host_lock, flags);
2501
2502	dev = &rport->dev;
2503	device_initialize(dev);			/* takes self reference */
2504	dev->parent = get_device(&shost->shost_gendev); /* parent reference */
2505	dev->release = fc_rport_dev_release;
2506	dev_set_name(dev, "rport-%d:%d-%d",
2507		     shost->host_no, channel, rport->number);
2508	transport_setup_device(dev);
2509
2510	error = device_add(dev);
2511	if (error) {
2512		printk(KERN_ERR "FC Remote Port device_add failed\n");
2513		goto delete_rport;
2514	}
2515	transport_add_device(dev);
2516	transport_configure_device(dev);
2517
2518	fc_bsg_rportadd(shost, rport);
2519	/* ignore any bsg add error - we just can't do sgio */
2520
2521	if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2522		/* initiate a scan of the target */
2523		rport->flags |= FC_RPORT_SCAN_PENDING;
2524		scsi_queue_work(shost, &rport->scan_work);
2525	}
2526
2527	return rport;
2528
2529delete_rport:
2530	transport_destroy_device(dev);
2531	spin_lock_irqsave(shost->host_lock, flags);
2532	list_del(&rport->peers);
2533	put_device(&shost->shost_gendev);	/* for fc_host->rport list */
2534	spin_unlock_irqrestore(shost->host_lock, flags);
2535	put_device(dev->parent);
2536	kfree(rport);
2537	return NULL;
2538}
2539
2540/**
2541 * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
2542 * @shost:	scsi host the remote port is connected to.
2543 * @channel:	Channel on shost port connected to.
2544 * @ids:	The world wide names, fc address, and FC4 port
2545 *		roles for the remote port.
2546 *
2547 * The LLDD calls this routine to notify the transport of the existence
2548 * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn)
2549 * of the port, it's FC address (port_id), and the FC4 roles that are
2550 * active for the port.
2551 *
2552 * For ports that are FCP targets (aka scsi targets), the FC transport
2553 * maintains consistent target id bindings on behalf of the LLDD.
2554 * A consistent target id binding is an assignment of a target id to
2555 * a remote port identifier, which persists while the scsi host is
2556 * attached. The remote port can disappear, then later reappear, and
2557 * it's target id assignment remains the same. This allows for shifts
2558 * in FC addressing (if binding by wwpn or wwnn) with no apparent
2559 * changes to the scsi subsystem which is based on scsi host number and
2560 * target id values.  Bindings are only valid during the attachment of
2561 * the scsi host. If the host detaches, then later re-attaches, target
2562 * id bindings may change.
2563 *
2564 * This routine is responsible for returning a remote port structure.
2565 * The routine will search the list of remote ports it maintains
2566 * internally on behalf of consistent target id mappings. If found, the
2567 * remote port structure will be reused. Otherwise, a new remote port
2568 * structure will be allocated.
2569 *
2570 * Whenever a remote port is allocated, a new fc_remote_port class
2571 * device is created.
2572 *
2573 * Should not be called from interrupt context.
2574 *
2575 * Notes:
2576 *	This routine assumes no locks are held on entry.
2577 */
2578struct fc_rport *
2579fc_remote_port_add(struct Scsi_Host *shost, int channel,
2580	struct fc_rport_identifiers  *ids)
2581{
2582	struct fc_internal *fci = to_fc_internal(shost->transportt);
2583	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2584	struct fc_rport *rport;
2585	unsigned long flags;
2586	int match = 0;
2587
2588	/* ensure any stgt delete functions are done */
2589	fc_flush_work(shost);
2590
2591	/*
2592	 * Search the list of "active" rports, for an rport that has been
2593	 * deleted, but we've held off the real delete while the target
2594	 * is in a "blocked" state.
2595	 */
2596	spin_lock_irqsave(shost->host_lock, flags);
2597
2598	list_for_each_entry(rport, &fc_host->rports, peers) {
2599
2600		if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
2601			(rport->channel == channel)) {
2602
2603			switch (fc_host->tgtid_bind_type) {
2604			case FC_TGTID_BIND_BY_WWPN:
2605			case FC_TGTID_BIND_NONE:
2606				if (rport->port_name == ids->port_name)
2607					match = 1;
2608				break;
2609			case FC_TGTID_BIND_BY_WWNN:
2610				if (rport->node_name == ids->node_name)
2611					match = 1;
2612				break;
2613			case FC_TGTID_BIND_BY_ID:
2614				if (rport->port_id == ids->port_id)
2615					match = 1;
2616				break;
2617			}
2618
2619			if (match) {
2620
2621				memcpy(&rport->node_name, &ids->node_name,
2622					sizeof(rport->node_name));
2623				memcpy(&rport->port_name, &ids->port_name,
2624					sizeof(rport->port_name));
2625				rport->port_id = ids->port_id;
2626
2627				rport->port_state = FC_PORTSTATE_ONLINE;
2628				rport->roles = ids->roles;
2629
2630				spin_unlock_irqrestore(shost->host_lock, flags);
2631
2632				if (fci->f->dd_fcrport_size)
2633					memset(rport->dd_data, 0,
2634						fci->f->dd_fcrport_size);
2635
2636				/*
2637				 * If we were not a target, cancel the
2638				 * io terminate and rport timers, and
2639				 * we're done.
2640				 *
2641				 * If we were a target, but our new role
2642				 * doesn't indicate a target, leave the
2643				 * timers running expecting the role to
2644				 * change as the target fully logs in. If
2645				 * it doesn't, the target will be torn down.
2646				 *
2647				 * If we were a target, and our role shows
2648				 * we're still a target, cancel the timers
2649				 * and kick off a scan.
2650				 */
2651
2652				/* was a target, not in roles */
2653				if ((rport->scsi_target_id != -1) &&
2654				    (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
2655					return rport;
2656
2657				/*
2658				 * Stop the fail io and dev_loss timers.
2659				 * If they flush, the port_state will
2660				 * be checked and will NOOP the function.
2661				 */
2662				if (!cancel_delayed_work(&rport->fail_io_work))
2663					fc_flush_devloss(shost);
2664				if (!cancel_delayed_work(&rport->dev_loss_work))
2665					fc_flush_devloss(shost);
2666
2667				spin_lock_irqsave(shost->host_lock, flags);
2668
2669				rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2670						  FC_RPORT_DEVLOSS_PENDING |
2671						  FC_RPORT_DEVLOSS_CALLBK_DONE);
2672
2673				/* if target, initiate a scan */
2674				if (rport->scsi_target_id != -1) {
2675					rport->flags |= FC_RPORT_SCAN_PENDING;
2676					scsi_queue_work(shost,
2677							&rport->scan_work);
2678					spin_unlock_irqrestore(shost->host_lock,
2679							flags);
2680					scsi_target_unblock(&rport->dev);
2681				} else
2682					spin_unlock_irqrestore(shost->host_lock,
2683							flags);
2684
2685				fc_bsg_goose_queue(rport);
2686
2687				return rport;
2688			}
2689		}
2690	}
2691
2692	/*
2693	 * Search the bindings array
2694	 * Note: if never a FCP target, you won't be on this list
2695	 */
2696	if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
2697
2698		/* search for a matching consistent binding */
2699
2700		list_for_each_entry(rport, &fc_host->rport_bindings,
2701					peers) {
2702			if (rport->channel != channel)
2703				continue;
2704
2705			switch (fc_host->tgtid_bind_type) {
2706			case FC_TGTID_BIND_BY_WWPN:
2707				if (rport->port_name == ids->port_name)
2708					match = 1;
2709				break;
2710			case FC_TGTID_BIND_BY_WWNN:
2711				if (rport->node_name == ids->node_name)
2712					match = 1;
2713				break;
2714			case FC_TGTID_BIND_BY_ID:
2715				if (rport->port_id == ids->port_id)
2716					match = 1;
2717				break;
2718			case FC_TGTID_BIND_NONE: /* to keep compiler happy */
2719				break;
2720			}
2721
2722			if (match) {
2723				list_move_tail(&rport->peers, &fc_host->rports);
2724				break;
2725			}
2726		}
2727
2728		if (match) {
2729			memcpy(&rport->node_name, &ids->node_name,
2730				sizeof(rport->node_name));
2731			memcpy(&rport->port_name, &ids->port_name,
2732				sizeof(rport->port_name));
2733			rport->port_id = ids->port_id;
2734			rport->roles = ids->roles;
2735			rport->port_state = FC_PORTSTATE_ONLINE;
2736			rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
2737
2738			if (fci->f->dd_fcrport_size)
2739				memset(rport->dd_data, 0,
2740						fci->f->dd_fcrport_size);
2741
2742			if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2743				/* initiate a scan of the target */
2744				rport->flags |= FC_RPORT_SCAN_PENDING;
2745				scsi_queue_work(shost, &rport->scan_work);
2746				spin_unlock_irqrestore(shost->host_lock, flags);
2747				scsi_target_unblock(&rport->dev);
2748			} else
2749				spin_unlock_irqrestore(shost->host_lock, flags);
2750
2751			return rport;
2752		}
2753	}
2754
2755	spin_unlock_irqrestore(shost->host_lock, flags);
2756
2757	/* No consistent binding found - create new remote port entry */
2758	rport = fc_rport_create(shost, channel, ids);
2759
2760	return rport;
2761}
2762EXPORT_SYMBOL(fc_remote_port_add);
2763
2764
2765/**
2766 * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
2767 * @rport:	The remote port that no longer exists
2768 *
2769 * The LLDD calls this routine to notify the transport that a remote
2770 * port is no longer part of the topology. Note: Although a port
2771 * may no longer be part of the topology, it may persist in the remote
2772 * ports displayed by the fc_host. We do this under 2 conditions:
2773 * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
2774 *   This allows the port to temporarily disappear, then reappear without
2775 *   disrupting the SCSI device tree attached to it. During the "blocked"
2776 *   period the port will still exist.
2777 * 2) If the port was a scsi target and disappears for longer than we
2778 *   expect, we'll delete the port and the tear down the SCSI device tree
2779 *   attached to it. However, we want to semi-persist the target id assigned
2780 *   to that port if it eventually does exist. The port structure will
2781 *   remain (although with minimal information) so that the target id
2782 *   bindings remails.
2783 *
2784 * If the remote port is not an FCP Target, it will be fully torn down
2785 * and deallocated, including the fc_remote_port class device.
2786 *
2787 * If the remote port is an FCP Target, the port will be placed in a
2788 * temporary blocked state. From the LLDD's perspective, the rport no
2789 * longer exists. From the SCSI midlayer's perspective, the SCSI target
2790 * exists, but all sdevs on it are blocked from further I/O. The following
2791 * is then expected.
2792 *
2793 *   If the remote port does not return (signaled by a LLDD call to
2794 *   fc_remote_port_add()) within the dev_loss_tmo timeout, then the
2795 *   scsi target is removed - killing all outstanding i/o and removing the
2796 *   scsi devices attached ot it. The port structure will be marked Not
2797 *   Present and be partially cleared, leaving only enough information to
2798 *   recognize the remote port relative to the scsi target id binding if
2799 *   it later appears.  The port will remain as long as there is a valid
2800 *   binding (e.g. until the user changes the binding type or unloads the
2801 *   scsi host with the binding).
2802 *
2803 *   If the remote port returns within the dev_loss_tmo value (and matches
2804 *   according to the target id binding type), the port structure will be
2805 *   reused. If it is no longer a SCSI target, the target will be torn
2806 *   down. If it continues to be a SCSI target, then the target will be
2807 *   unblocked (allowing i/o to be resumed), and a scan will be activated
2808 *   to ensure that all luns are detected.
2809 *
2810 * Called from normal process context only - cannot be called from interrupt.
2811 *
2812 * Notes:
2813 *	This routine assumes no locks are held on entry.
2814 */
2815void
2816fc_remote_port_delete(struct fc_rport  *rport)
2817{
2818	struct Scsi_Host *shost = rport_to_shost(rport);
2819	int timeout = rport->dev_loss_tmo;
2820	unsigned long flags;
2821
2822	/*
2823	 * No need to flush the fc_host work_q's, as all adds are synchronous.
2824	 *
2825	 * We do need to reclaim the rport scan work element, so eventually
2826	 * (in fc_rport_final_delete()) we'll flush the scsi host work_q if
2827	 * there's still a scan pending.
2828	 */
2829
2830	spin_lock_irqsave(shost->host_lock, flags);
2831
2832	if (rport->port_state != FC_PORTSTATE_ONLINE) {
2833		spin_unlock_irqrestore(shost->host_lock, flags);
2834		return;
2835	}
2836
2837	/*
2838	 * In the past, we if this was not an FCP-Target, we would
2839	 * unconditionally just jump to deleting the rport.
2840	 * However, rports can be used as node containers by the LLDD,
2841	 * and its not appropriate to just terminate the rport at the
2842	 * first sign of a loss in connectivity. The LLDD may want to
2843	 * send ELS traffic to re-validate the login. If the rport is
2844	 * immediately deleted, it makes it inappropriate for a node
2845	 * container.
2846	 * So... we now unconditionally wait dev_loss_tmo before
2847	 * destroying an rport.
2848	 */
2849
2850	rport->port_state = FC_PORTSTATE_BLOCKED;
2851
2852	rport->flags |= FC_RPORT_DEVLOSS_PENDING;
2853
2854	spin_unlock_irqrestore(shost->host_lock, flags);
2855
2856	if (rport->roles & FC_PORT_ROLE_FCP_INITIATOR &&
2857	    shost->active_mode & MODE_TARGET)
2858		fc_tgt_it_nexus_destroy(shost, (unsigned long)rport);
2859
2860	scsi_target_block(&rport->dev);
2861
2862	/* see if we need to kill io faster than waiting for device loss */
2863	if ((rport->fast_io_fail_tmo != -1) &&
2864	    (rport->fast_io_fail_tmo < timeout))
2865		fc_queue_devloss_work(shost, &rport->fail_io_work,
2866					rport->fast_io_fail_tmo * HZ);
2867
2868	/* cap the length the devices can be blocked until they are deleted */
2869	fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
2870}
2871EXPORT_SYMBOL(fc_remote_port_delete);
2872
2873/**
2874 * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
2875 * @rport:	The remote port that changed.
2876 * @roles:      New roles for this port.
2877 *
2878 * Description: The LLDD calls this routine to notify the transport that the
2879 * roles on a remote port may have changed. The largest effect of this is
2880 * if a port now becomes a FCP Target, it must be allocated a
2881 * scsi target id.  If the port is no longer a FCP target, any
2882 * scsi target id value assigned to it will persist in case the
2883 * role changes back to include FCP Target. No changes in the scsi
2884 * midlayer will be invoked if the role changes (in the expectation
2885 * that the role will be resumed. If it doesn't normal error processing
2886 * will take place).
2887 *
2888 * Should not be called from interrupt context.
2889 *
2890 * Notes:
2891 *	This routine assumes no locks are held on entry.
2892 */
2893void
2894fc_remote_port_rolechg(struct fc_rport  *rport, u32 roles)
2895{
2896	struct Scsi_Host *shost = rport_to_shost(rport);
2897	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2898	unsigned long flags;
2899	int create = 0;
2900	int ret;
2901
2902	spin_lock_irqsave(shost->host_lock, flags);
2903	if (roles & FC_PORT_ROLE_FCP_TARGET) {
2904		if (rport->scsi_target_id == -1) {
2905			rport->scsi_target_id = fc_host->next_target_id++;
2906			create = 1;
2907		} else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
2908			create = 1;
2909	} else if (shost->active_mode & MODE_TARGET) {
2910		ret = fc_tgt_it_nexus_create(shost, (unsigned long)rport,
2911					     (char *)&rport->node_name);
2912		if (ret)
2913			printk(KERN_ERR "FC Remore Port tgt nexus failed %d\n",
2914			       ret);
2915	}
2916
2917	rport->roles = roles;
2918
2919	spin_unlock_irqrestore(shost->host_lock, flags);
2920
2921	if (create) {
2922		/*
2923		 * There may have been a delete timer running on the
2924		 * port. Ensure that it is cancelled as we now know
2925		 * the port is an FCP Target.
2926		 * Note: we know the rport is exists and in an online
2927		 *  state as the LLDD would not have had an rport
2928		 *  reference to pass us.
2929		 *
2930		 * Take no action on the del_timer failure as the state
2931		 * machine state change will validate the
2932		 * transaction.
2933		 */
2934		if (!cancel_delayed_work(&rport->fail_io_work))
2935			fc_flush_devloss(shost);
2936		if (!cancel_delayed_work(&rport->dev_loss_work))
2937			fc_flush_devloss(shost);
2938
2939		spin_lock_irqsave(shost->host_lock, flags);
2940		rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2941				  FC_RPORT_DEVLOSS_PENDING);
2942		spin_unlock_irqrestore(shost->host_lock, flags);
2943
2944		/* ensure any stgt delete functions are done */
2945		fc_flush_work(shost);
2946
2947		/* initiate a scan of the target */
2948		spin_lock_irqsave(shost->host_lock, flags);
2949		rport->flags |= FC_RPORT_SCAN_PENDING;
2950		scsi_queue_work(shost, &rport->scan_work);
2951		spin_unlock_irqrestore(shost->host_lock, flags);
2952		scsi_target_unblock(&rport->dev);
2953	}
2954}
2955EXPORT_SYMBOL(fc_remote_port_rolechg);
2956
2957/**
2958 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
2959 * @work:	rport target that failed to reappear in the allotted time.
2960 *
2961 * Description: An attempt to delete a remote port blocks, and if it fails
2962 *              to return in the allotted time this gets called.
2963 */
2964static void
2965fc_timeout_deleted_rport(struct work_struct *work)
2966{
2967	struct fc_rport *rport =
2968		container_of(work, struct fc_rport, dev_loss_work.work);
2969	struct Scsi_Host *shost = rport_to_shost(rport);
2970	struct fc_internal *i = to_fc_internal(shost->transportt);
2971	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2972	unsigned long flags;
2973
2974	spin_lock_irqsave(shost->host_lock, flags);
2975
2976	rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2977
2978	/*
2979	 * If the port is ONLINE, then it came back. If it was a SCSI
2980	 * target, validate it still is. If not, tear down the
2981	 * scsi_target on it.
2982	 */
2983	if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2984	    (rport->scsi_target_id != -1) &&
2985	    !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
2986		dev_printk(KERN_ERR, &rport->dev,
2987			"blocked FC remote port time out: no longer"
2988			" a FCP target, removing starget\n");
2989		spin_unlock_irqrestore(shost->host_lock, flags);
2990		scsi_target_unblock(&rport->dev);
2991		fc_queue_work(shost, &rport->stgt_delete_work);
2992		return;
2993	}
2994
2995	/* NOOP state - we're flushing workq's */
2996	if (rport->port_state != FC_PORTSTATE_BLOCKED) {
2997		spin_unlock_irqrestore(shost->host_lock, flags);
2998		dev_printk(KERN_ERR, &rport->dev,
2999			"blocked FC remote port time out: leaving"
3000			" rport%s alone\n",
3001			(rport->scsi_target_id != -1) ?  " and starget" : "");
3002		return;
3003	}
3004
3005	if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
3006	    (rport->scsi_target_id == -1)) {
3007		list_del(&rport->peers);
3008		rport->port_state = FC_PORTSTATE_DELETED;
3009		dev_printk(KERN_ERR, &rport->dev,
3010			"blocked FC remote port time out: removing"
3011			" rport%s\n",
3012			(rport->scsi_target_id != -1) ?  " and starget" : "");
3013		fc_queue_work(shost, &rport->rport_delete_work);
3014		spin_unlock_irqrestore(shost->host_lock, flags);
3015		return;
3016	}
3017
3018	dev_printk(KERN_ERR, &rport->dev,
3019		"blocked FC remote port time out: removing target and "
3020		"saving binding\n");
3021
3022	list_move_tail(&rport->peers, &fc_host->rport_bindings);
3023
3024	/*
3025	 * Note: We do not remove or clear the hostdata area. This allows
3026	 *   host-specific target data to persist along with the
3027	 *   scsi_target_id. It's up to the host to manage it's hostdata area.
3028	 */
3029
3030	/*
3031	 * Reinitialize port attributes that may change if the port comes back.
3032	 */
3033	rport->maxframe_size = -1;
3034	rport->supported_classes = FC_COS_UNSPECIFIED;
3035	rport->roles = FC_PORT_ROLE_UNKNOWN;
3036	rport->port_state = FC_PORTSTATE_NOTPRESENT;
3037	rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3038	rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3039
3040	/*
3041	 * Pre-emptively kill I/O rather than waiting for the work queue
3042	 * item to teardown the starget. (FCOE libFC folks prefer this
3043	 * and to have the rport_port_id still set when it's done).
3044	 */
3045	spin_unlock_irqrestore(shost->host_lock, flags);
3046	fc_terminate_rport_io(rport);
3047
3048	BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
3049
3050	/* remove the identifiers that aren't used in the consisting binding */
3051	switch (fc_host->tgtid_bind_type) {
3052	case FC_TGTID_BIND_BY_WWPN:
3053		rport->node_name = -1;
3054		rport->port_id = -1;
3055		break;
3056	case FC_TGTID_BIND_BY_WWNN:
3057		rport->port_name = -1;
3058		rport->port_id = -1;
3059		break;
3060	case FC_TGTID_BIND_BY_ID:
3061		rport->node_name = -1;
3062		rport->port_name = -1;
3063		break;
3064	case FC_TGTID_BIND_NONE:	/* to keep compiler happy */
3065		break;
3066	}
3067
3068	/*
3069	 * As this only occurs if the remote port (scsi target)
3070	 * went away and didn't come back - we'll remove
3071	 * all attached scsi devices.
3072	 */
3073	fc_queue_work(shost, &rport->stgt_delete_work);
3074
3075	/*
3076	 * Notify the driver that the rport is now dead. The LLDD will
3077	 * also guarantee that any communication to the rport is terminated
3078	 *
3079	 * Note: we set the CALLBK_DONE flag above to correspond
3080	 */
3081	if (i->f->dev_loss_tmo_callbk)
3082		i->f->dev_loss_tmo_callbk(rport);
3083}
3084
3085
3086/**
3087 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
3088 * @work:	rport to terminate io on.
3089 *
3090 * Notes: Only requests the failure of the io, not that all are flushed
3091 *    prior to returning.
3092 */
3093static void
3094fc_timeout_fail_rport_io(struct work_struct *work)
3095{
3096	struct fc_rport *rport =
3097		container_of(work, struct fc_rport, fail_io_work.work);
3098
3099	if (rport->port_state != FC_PORTSTATE_BLOCKED)
3100		return;
3101
3102	rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
3103	fc_terminate_rport_io(rport);
3104}
3105
3106/**
3107 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
3108 * @work:	remote port to be scanned.
3109 */
3110static void
3111fc_scsi_scan_rport(struct work_struct *work)
3112{
3113	struct fc_rport *rport =
3114		container_of(work, struct fc_rport, scan_work);
3115	struct Scsi_Host *shost = rport_to_shost(rport);
3116	struct fc_internal *i = to_fc_internal(shost->transportt);
3117	unsigned long flags;
3118
3119	if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3120	    (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
3121	    !(i->f->disable_target_scan)) {
3122		scsi_scan_target(&rport->dev, rport->channel,
3123			rport->scsi_target_id, SCAN_WILD_CARD, 1);
3124	}
3125
3126	spin_lock_irqsave(shost->host_lock, flags);
3127	rport->flags &= ~FC_RPORT_SCAN_PENDING;
3128	spin_unlock_irqrestore(shost->host_lock, flags);
3129}
3130
3131
3132/**
3133 * fc_vport_setup - allocates and creates a FC virtual port.
3134 * @shost:	scsi host the virtual port is connected to.
3135 * @channel:	Channel on shost port connected to.
3136 * @pdev:	parent device for vport
3137 * @ids:	The world wide names, FC4 port roles, etc for
3138 *              the virtual port.
3139 * @ret_vport:	The pointer to the created vport.
3140 *
3141 * Allocates and creates the vport structure, calls the parent host
3142 * to instantiate the vport, the completes w/ class and sysfs creation.
3143 *
3144 * Notes:
3145 *	This routine assumes no locks are held on entry.
3146 */
3147static int
3148fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3149	struct fc_vport_identifiers  *ids, struct fc_vport **ret_vport)
3150{
3151	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3152	struct fc_internal *fci = to_fc_internal(shost->transportt);
3153	struct fc_vport *vport;
3154	struct device *dev;
3155	unsigned long flags;
3156	size_t size;
3157	int error;
3158
3159	*ret_vport = NULL;
3160
3161	if ( ! fci->f->vport_create)
3162		return -ENOENT;
3163
3164	size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3165	vport = kzalloc(size, GFP_KERNEL);
3166	if (unlikely(!vport)) {
3167		printk(KERN_ERR "%s: allocation failure\n", __func__);
3168		return -ENOMEM;
3169	}
3170
3171	vport->vport_state = FC_VPORT_UNKNOWN;
3172	vport->vport_last_state = FC_VPORT_UNKNOWN;
3173	vport->node_name = ids->node_name;
3174	vport->port_name = ids->port_name;
3175	vport->roles = ids->roles;
3176	vport->vport_type = ids->vport_type;
3177	if (fci->f->dd_fcvport_size)
3178		vport->dd_data = &vport[1];
3179	vport->shost = shost;
3180	vport->channel = channel;
3181	vport->flags = FC_VPORT_CREATING;
3182	INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3183
3184	spin_lock_irqsave(shost->host_lock, flags);
3185
3186	if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3187		spin_unlock_irqrestore(shost->host_lock, flags);
3188		kfree(vport);
3189		return -ENOSPC;
3190	}
3191	fc_host->npiv_vports_inuse++;
3192	vport->number = fc_host->next_vport_number++;
3193	list_add_tail(&vport->peers, &fc_host->vports);
3194	get_device(&shost->shost_gendev);	/* for fc_host->vport list */
3195
3196	spin_unlock_irqrestore(shost->host_lock, flags);
3197
3198	dev = &vport->dev;
3199	device_initialize(dev);			/* takes self reference */
3200	dev->parent = get_device(pdev);		/* takes parent reference */
3201	dev->release = fc_vport_dev_release;
3202	dev_set_name(dev, "vport-%d:%d-%d",
3203		     shost->host_no, channel, vport->number);
3204	transport_setup_device(dev);
3205
3206	error = device_add(dev);
3207	if (error) {
3208		printk(KERN_ERR "FC Virtual Port device_add failed\n");
3209		goto delete_vport;
3210	}
3211	transport_add_device(dev);
3212	transport_configure_device(dev);
3213
3214	error = fci->f->vport_create(vport, ids->disable);
3215	if (error) {
3216		printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3217		goto delete_vport_all;
3218	}
3219
3220	/*
3221	 * if the parent isn't the physical adapter's Scsi_Host, ensure
3222	 * the Scsi_Host at least contains ia symlink to the vport.
3223	 */
3224	if (pdev != &shost->shost_gendev) {
3225		error = sysfs_create_link(&shost->shost_gendev.kobj,
3226				 &dev->kobj, dev_name(dev));
3227		if (error)
3228			printk(KERN_ERR
3229				"%s: Cannot create vport symlinks for "
3230				"%s, err=%d\n",
3231				__func__, dev_name(dev), error);
3232	}
3233	spin_lock_irqsave(shost->host_lock, flags);
3234	vport->flags &= ~FC_VPORT_CREATING;
3235	spin_unlock_irqrestore(shost->host_lock, flags);
3236
3237	dev_printk(KERN_NOTICE, pdev,
3238			"%s created via shost%d channel %d\n", dev_name(dev),
3239			shost->host_no, channel);
3240
3241	*ret_vport = vport;
3242
3243	return 0;
3244
3245delete_vport_all:
3246	transport_remove_device(dev);
3247	device_del(dev);
3248delete_vport:
3249	transport_destroy_device(dev);
3250	spin_lock_irqsave(shost->host_lock, flags);
3251	list_del(&vport->peers);
3252	put_device(&shost->shost_gendev);	/* for fc_host->vport list */
3253	fc_host->npiv_vports_inuse--;
3254	spin_unlock_irqrestore(shost->host_lock, flags);
3255	put_device(dev->parent);
3256	kfree(vport);
3257
3258	return error;
3259}
3260
3261/**
3262 * fc_vport_create - Admin App or LLDD requests creation of a vport
3263 * @shost:	scsi host the virtual port is connected to.
3264 * @channel:	channel on shost port connected to.
3265 * @ids:	The world wide names, FC4 port roles, etc for
3266 *              the virtual port.
3267 *
3268 * Notes:
3269 *	This routine assumes no locks are held on entry.
3270 */
3271struct fc_vport *
3272fc_vport_create(struct Scsi_Host *shost, int channel,
3273	struct fc_vport_identifiers *ids)
3274{
3275	int stat;
3276	struct fc_vport *vport;
3277
3278	stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3279		 ids, &vport);
3280	return stat ? NULL : vport;
3281}
3282EXPORT_SYMBOL(fc_vport_create);
3283
3284/**
3285 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
3286 * @vport:	fc_vport to be terminated
3287 *
3288 * Calls the LLDD vport_delete() function, then deallocates and removes
3289 * the vport from the shost and object tree.
3290 *
3291 * Notes:
3292 *	This routine assumes no locks are held on entry.
3293 */
3294int
3295fc_vport_terminate(struct fc_vport *vport)
3296{
3297	struct Scsi_Host *shost = vport_to_shost(vport);
3298	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3299	struct fc_internal *i = to_fc_internal(shost->transportt);
3300	struct device *dev = &vport->dev;
3301	unsigned long flags;
3302	int stat;
3303
3304	spin_lock_irqsave(shost->host_lock, flags);
3305	if (vport->flags & FC_VPORT_CREATING) {
3306		spin_unlock_irqrestore(shost->host_lock, flags);
3307		return -EBUSY;
3308	}
3309	if (vport->flags & (FC_VPORT_DEL)) {
3310		spin_unlock_irqrestore(shost->host_lock, flags);
3311		return -EALREADY;
3312	}
3313	vport->flags |= FC_VPORT_DELETING;
3314	spin_unlock_irqrestore(shost->host_lock, flags);
3315
3316	if (i->f->vport_delete)
3317		stat = i->f->vport_delete(vport);
3318	else
3319		stat = -ENOENT;
3320
3321	spin_lock_irqsave(shost->host_lock, flags);
3322	vport->flags &= ~FC_VPORT_DELETING;
3323	if (!stat) {
3324		vport->flags |= FC_VPORT_DELETED;
3325		list_del(&vport->peers);
3326		fc_host->npiv_vports_inuse--;
3327		put_device(&shost->shost_gendev);  /* for fc_host->vport list */
3328	}
3329	spin_unlock_irqrestore(shost->host_lock, flags);
3330
3331	if (stat)
3332		return stat;
3333
3334	if (dev->parent != &shost->shost_gendev)
3335		sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
3336	transport_remove_device(dev);
3337	device_del(dev);
3338	transport_destroy_device(dev);
3339
3340	/*
3341	 * Removing our self-reference should mean our
3342	 * release function gets called, which will drop the remaining
3343	 * parent reference and free the data structure.
3344	 */
3345	put_device(dev);			/* for self-reference */
3346
3347	return 0; /* SUCCESS */
3348}
3349EXPORT_SYMBOL(fc_vport_terminate);
3350
3351/**
3352 * fc_vport_sched_delete - workq-based delete request for a vport
3353 * @work:	vport to be deleted.
3354 */
3355static void
3356fc_vport_sched_delete(struct work_struct *work)
3357{
3358	struct fc_vport *vport =
3359		container_of(work, struct fc_vport, vport_delete_work);
3360	int stat;
3361
3362	stat = fc_vport_terminate(vport);
3363	if (stat)
3364		dev_printk(KERN_ERR, vport->dev.parent,
3365			"%s: %s could not be deleted created via "
3366			"shost%d channel %d - error %d\n", __func__,
3367			dev_name(&vport->dev), vport->shost->host_no,
3368			vport->channel, stat);
3369}
3370
3371
3372/*
3373 * BSG support
3374 */
3375
3376
3377/**
3378 * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
3379 * @job:	fc_bsg_job that is to be torn down
3380 */
3381static void
3382fc_destroy_bsgjob(struct fc_bsg_job *job)
3383{
3384	unsigned long flags;
3385
3386	spin_lock_irqsave(&job->job_lock, flags);
3387	if (job->ref_cnt) {
3388		spin_unlock_irqrestore(&job->job_lock, flags);
3389		return;
3390	}
3391	spin_unlock_irqrestore(&job->job_lock, flags);
3392
3393	put_device(job->dev);	/* release reference for the request */
3394
3395	kfree(job->request_payload.sg_list);
3396	kfree(job->reply_payload.sg_list);
3397	kfree(job);
3398}
3399
3400/**
3401 * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
3402 *                  completed
3403 * @job:	fc_bsg_job that is complete
3404 */
3405static void
3406fc_bsg_jobdone(struct fc_bsg_job *job)
3407{
3408	struct request *req = job->req;
3409	struct request *rsp = req->next_rq;
3410	int err;
3411
3412	err = job->req->errors = job->reply->result;
3413
3414	if (err < 0)
3415		/* we're only returning the result field in the reply */
3416		job->req->sense_len = sizeof(uint32_t);
3417	else
3418		job->req->sense_len = job->reply_len;
3419
3420	/* we assume all request payload was transferred, residual == 0 */
3421	req->resid_len = 0;
3422
3423	if (rsp) {
3424		WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
3425
3426		/* set reply (bidi) residual */
3427		rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
3428				      rsp->resid_len);
3429	}
3430	blk_complete_request(req);
3431}
3432
3433/**
3434 * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
3435 * @rq:        BSG request that holds the job to be destroyed
3436 */
3437static void fc_bsg_softirq_done(struct request *rq)
3438{
3439	struct fc_bsg_job *job = rq->special;
3440	unsigned long flags;
3441
3442	spin_lock_irqsave(&job->job_lock, flags);
3443	job->state_flags |= FC_RQST_STATE_DONE;
3444	job->ref_cnt--;
3445	spin_unlock_irqrestore(&job->job_lock, flags);
3446
3447	blk_end_request_all(rq, rq->errors);
3448	fc_destroy_bsgjob(job);
3449}
3450
3451/**
3452 * fc_bsg_job_timeout - handler for when a bsg request timesout
3453 * @req:	request that timed out
3454 */
3455static enum blk_eh_timer_return
3456fc_bsg_job_timeout(struct request *req)
3457{
3458	struct fc_bsg_job *job = (void *) req->special;
3459	struct Scsi_Host *shost = job->shost;
3460	struct fc_internal *i = to_fc_internal(shost->transportt);
3461	unsigned long flags;
3462	int err = 0, done = 0;
3463
3464	if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
3465		return BLK_EH_RESET_TIMER;
3466
3467	spin_lock_irqsave(&job->job_lock, flags);
3468	if (job->state_flags & FC_RQST_STATE_DONE)
3469		done = 1;
3470	else
3471		job->ref_cnt++;
3472	spin_unlock_irqrestore(&job->job_lock, flags);
3473
3474	if (!done && i->f->bsg_timeout) {
3475		/* call LLDD to abort the i/o as it has timed out */
3476		err = i->f->bsg_timeout(job);
3477		if (err)
3478			printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3479				"abort failed with status %d\n", err);
3480	}
3481
3482	/* the blk_end_sync_io() doesn't check the error */
3483	if (done)
3484		return BLK_EH_NOT_HANDLED;
3485	else
3486		return BLK_EH_HANDLED;
3487}
3488
3489static int
3490fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
3491{
3492	size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
3493
3494	BUG_ON(!req->nr_phys_segments);
3495
3496	buf->sg_list = kzalloc(sz, GFP_KERNEL);
3497	if (!buf->sg_list)
3498		return -ENOMEM;
3499	sg_init_table(buf->sg_list, req->nr_phys_segments);
3500	buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
3501	buf->payload_len = blk_rq_bytes(req);
3502	return 0;
3503}
3504
3505
3506/**
3507 * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
3508 *                   bsg request
3509 * @shost:	SCSI Host corresponding to the bsg object
3510 * @rport:	(optional) FC Remote Port corresponding to the bsg object
3511 * @req:	BSG request that needs a job structure
3512 */
3513static int
3514fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
3515	struct request *req)
3516{
3517	struct fc_internal *i = to_fc_internal(shost->transportt);
3518	struct request *rsp = req->next_rq;
3519	struct fc_bsg_job *job;
3520	int ret;
3521
3522	BUG_ON(req->special);
3523
3524	job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
3525			GFP_KERNEL);
3526	if (!job)
3527		return -ENOMEM;
3528
3529	/*
3530	 * Note: this is a bit silly.
3531	 * The request gets formatted as a SGIO v4 ioctl request, which
3532	 * then gets reformatted as a blk request, which then gets
3533	 * reformatted as a fc bsg request. And on completion, we have
3534	 * to wrap return results such that SGIO v4 thinks it was a scsi
3535	 * status.  I hope this was all worth it.
3536	 */
3537
3538	req->special = job;
3539	job->shost = shost;
3540	job->rport = rport;
3541	job->req = req;
3542	if (i->f->dd_bsg_size)
3543		job->dd_data = (void *)&job[1];
3544	spin_lock_init(&job->job_lock);
3545	job->request = (struct fc_bsg_request *)req->cmd;
3546	job->request_len = req->cmd_len;
3547	job->reply = req->sense;
3548	job->reply_len = SCSI_SENSE_BUFFERSIZE;	/* Size of sense buffer
3549						 * allocated */
3550	if (req->bio) {
3551		ret = fc_bsg_map_buffer(&job->request_payload, req);
3552		if (ret)
3553			goto failjob_rls_job;
3554	}
3555	if (rsp && rsp->bio) {
3556		ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
3557		if (ret)
3558			goto failjob_rls_rqst_payload;
3559	}
3560	job->job_done = fc_bsg_jobdone;
3561	if (rport)
3562		job->dev = &rport->dev;
3563	else
3564		job->dev = &shost->shost_gendev;
3565	get_device(job->dev);		/* take a reference for the request */
3566
3567	job->ref_cnt = 1;
3568
3569	return 0;
3570
3571
3572failjob_rls_rqst_payload:
3573	kfree(job->request_payload.sg_list);
3574failjob_rls_job:
3575	kfree(job);
3576	return -ENOMEM;
3577}
3578
3579
3580enum fc_dispatch_result {
3581	FC_DISPATCH_BREAK,	/* on return, q is locked, break from q loop */
3582	FC_DISPATCH_LOCKED,	/* on return, q is locked, continue on */
3583	FC_DISPATCH_UNLOCKED,	/* on return, q is unlocked, continue on */
3584};
3585
3586
3587/**
3588 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
3589 * @q:		fc host request queue
3590 * @shost:	scsi host rport attached to
3591 * @job:	bsg job to be processed
3592 */
3593static enum fc_dispatch_result
3594fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3595			 struct fc_bsg_job *job)
3596{
3597	struct fc_internal *i = to_fc_internal(shost->transportt);
3598	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
3599	int ret;
3600
3601	/* Validate the host command */
3602	switch (job->request->msgcode) {
3603	case FC_BSG_HST_ADD_RPORT:
3604		cmdlen += sizeof(struct fc_bsg_host_add_rport);
3605		break;
3606
3607	case FC_BSG_HST_DEL_RPORT:
3608		cmdlen += sizeof(struct fc_bsg_host_del_rport);
3609		break;
3610
3611	case FC_BSG_HST_ELS_NOLOGIN:
3612		cmdlen += sizeof(struct fc_bsg_host_els);
3613		/* there better be a xmt and rcv payloads */
3614		if ((!job->request_payload.payload_len) ||
3615		    (!job->reply_payload.payload_len)) {
3616			ret = -EINVAL;
3617			goto fail_host_msg;
3618		}
3619		break;
3620
3621	case FC_BSG_HST_CT:
3622		cmdlen += sizeof(struct fc_bsg_host_ct);
3623		/* there better be xmt and rcv payloads */
3624		if ((!job->request_payload.payload_len) ||
3625		    (!job->reply_payload.payload_len)) {
3626			ret = -EINVAL;
3627			goto fail_host_msg;
3628		}
3629		break;
3630
3631	case FC_BSG_HST_VENDOR:
3632		cmdlen += sizeof(struct fc_bsg_host_vendor);
3633		if ((shost->hostt->vendor_id == 0L) ||
3634		    (job->request->rqst_data.h_vendor.vendor_id !=
3635			shost->hostt->vendor_id)) {
3636			ret = -ESRCH;
3637			goto fail_host_msg;
3638		}
3639		break;
3640
3641	default:
3642		ret = -EBADR;
3643		goto fail_host_msg;
3644	}
3645
3646	/* check if we really have all the request data needed */
3647	if (job->request_len < cmdlen) {
3648		ret = -ENOMSG;
3649		goto fail_host_msg;
3650	}
3651
3652	ret = i->f->bsg_request(job);
3653	if (!ret)
3654		return FC_DISPATCH_UNLOCKED;
3655
3656fail_host_msg:
3657	/* return the errno failure code as the only status */
3658	BUG_ON(job->reply_len < sizeof(uint32_t));
3659	job->reply->result = ret;
3660	job->reply_len = sizeof(uint32_t);
3661	fc_bsg_jobdone(job);
3662	return FC_DISPATCH_UNLOCKED;
3663}
3664
3665
3666/*
3667 * fc_bsg_goose_queue - restart rport queue in case it was stopped
3668 * @rport:	rport to be restarted
3669 */
3670static void
3671fc_bsg_goose_queue(struct fc_rport *rport)
3672{
3673	int flagset;
3674	unsigned long flags;
3675
3676	if (!rport->rqst_q)
3677		return;
3678
3679	get_device(&rport->dev);
3680
3681	spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
3682	flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3683		  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3684	if (flagset)
3685		queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3686	__blk_run_queue(rport->rqst_q);
3687	if (flagset)
3688		queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3689	spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
3690
3691	put_device(&rport->dev);
3692}
3693
3694
3695/**
3696 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3697 * @q:		rport request queue
3698 * @shost:	scsi host rport attached to
3699 * @rport:	rport request destined to
3700 * @job:	bsg job to be processed
3701 */
3702static enum fc_dispatch_result
3703fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3704			 struct fc_rport *rport, struct fc_bsg_job *job)
3705{
3706	struct fc_internal *i = to_fc_internal(shost->transportt);
3707	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
3708	int ret;
3709
3710	/* Validate the rport command */
3711	switch (job->request->msgcode) {
3712	case FC_BSG_RPT_ELS:
3713		cmdlen += sizeof(struct fc_bsg_rport_els);
3714		goto check_bidi;
3715
3716	case FC_BSG_RPT_CT:
3717		cmdlen += sizeof(struct fc_bsg_rport_ct);
3718check_bidi:
3719		/* there better be xmt and rcv payloads */
3720		if ((!job->request_payload.payload_len) ||
3721		    (!job->reply_payload.payload_len)) {
3722			ret = -EINVAL;
3723			goto fail_rport_msg;
3724		}
3725		break;
3726	default:
3727		ret = -EBADR;
3728		goto fail_rport_msg;
3729	}
3730
3731	/* check if we really have all the request data needed */
3732	if (job->request_len < cmdlen) {
3733		ret = -ENOMSG;
3734		goto fail_rport_msg;
3735	}
3736
3737	ret = i->f->bsg_request(job);
3738	if (!ret)
3739		return FC_DISPATCH_UNLOCKED;
3740
3741fail_rport_msg:
3742	/* return the errno failure code as the only status */
3743	BUG_ON(job->reply_len < sizeof(uint32_t));
3744	job->reply->result = ret;
3745	job->reply_len = sizeof(uint32_t);
3746	fc_bsg_jobdone(job);
3747	return FC_DISPATCH_UNLOCKED;
3748}
3749
3750
3751/**
3752 * fc_bsg_request_handler - generic handler for bsg requests
3753 * @q:		request queue to manage
3754 * @shost:	Scsi_Host related to the bsg object
3755 * @rport:	FC remote port related to the bsg object (optional)
3756 * @dev:	device structure for bsg object
3757 */
3758static void
3759fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3760		       struct fc_rport *rport, struct device *dev)
3761{
3762	struct request *req;
3763	struct fc_bsg_job *job;
3764	enum fc_dispatch_result ret;
3765
3766	if (!get_device(dev))
3767		return;
3768
3769	while (!blk_queue_plugged(q)) {
3770		if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
3771				break;
3772
3773		req = blk_fetch_request(q);
3774		if (!req)
3775			break;
3776
3777		if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3778			req->errors = -ENXIO;
3779			spin_unlock_irq(q->queue_lock);
3780			blk_end_request(req, -ENXIO, blk_rq_bytes(req));
3781			spin_lock_irq(q->queue_lock);
3782			continue;
3783		}
3784
3785		spin_unlock_irq(q->queue_lock);
3786
3787		ret = fc_req_to_bsgjob(shost, rport, req);
3788		if (ret) {
3789			req->errors = ret;
3790			blk_end_request(req, ret, blk_rq_bytes(req));
3791			spin_lock_irq(q->queue_lock);
3792			continue;
3793		}
3794
3795		job = req->special;
3796
3797		/* check if we have the msgcode value at least */
3798		if (job->request_len < sizeof(uint32_t)) {
3799			BUG_ON(job->reply_len < sizeof(uint32_t));
3800			job->reply->result = -ENOMSG;
3801			job->reply_len = sizeof(uint32_t);
3802			fc_bsg_jobdone(job);
3803			spin_lock_irq(q->queue_lock);
3804			continue;
3805		}
3806
3807		/* the dispatch routines will unlock the queue_lock */
3808		if (rport)
3809			ret = fc_bsg_rport_dispatch(q, shost, rport, job);
3810		else
3811			ret = fc_bsg_host_dispatch(q, shost, job);
3812
3813		/* did dispatcher hit state that can't process any more */
3814		if (ret == FC_DISPATCH_BREAK)
3815			break;
3816
3817		/* did dispatcher had released the lock */
3818		if (ret == FC_DISPATCH_UNLOCKED)
3819			spin_lock_irq(q->queue_lock);
3820	}
3821
3822	spin_unlock_irq(q->queue_lock);
3823	put_device(dev);
3824	spin_lock_irq(q->queue_lock);
3825}
3826
3827
3828/**
3829 * fc_bsg_host_handler - handler for bsg requests for a fc host
3830 * @q:		fc host request queue
3831 */
3832static void
3833fc_bsg_host_handler(struct request_queue *q)
3834{
3835	struct Scsi_Host *shost = q->queuedata;
3836
3837	fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
3838}
3839
3840
3841/**
3842 * fc_bsg_rport_handler - handler for bsg requests for a fc rport
3843 * @q:		rport request queue
3844 */
3845static void
3846fc_bsg_rport_handler(struct request_queue *q)
3847{
3848	struct fc_rport *rport = q->queuedata;
3849	struct Scsi_Host *shost = rport_to_shost(rport);
3850
3851	fc_bsg_request_handler(q, shost, rport, &rport->dev);
3852}
3853
3854
3855/**
3856 * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
3857 * @shost:	shost for fc_host
3858 * @fc_host:	fc_host adding the structures to
3859 */
3860static int
3861fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
3862{
3863	struct device *dev = &shost->shost_gendev;
3864	struct fc_internal *i = to_fc_internal(shost->transportt);
3865	struct request_queue *q;
3866	int err;
3867	char bsg_name[20];
3868
3869	fc_host->rqst_q = NULL;
3870
3871	if (!i->f->bsg_request)
3872		return -ENOTSUPP;
3873
3874	snprintf(bsg_name, sizeof(bsg_name),
3875		 "fc_host%d", shost->host_no);
3876
3877	q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
3878	if (!q) {
3879		printk(KERN_ERR "fc_host%d: bsg interface failed to "
3880				"initialize - no request queue\n",
3881				 shost->host_no);
3882		return -ENOMEM;
3883	}
3884
3885	q->queuedata = shost;
3886	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3887	blk_queue_softirq_done(q, fc_bsg_softirq_done);
3888	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3889	blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
3890
3891	err = bsg_register_queue(q, dev, bsg_name, NULL);
3892	if (err) {
3893		printk(KERN_ERR "fc_host%d: bsg interface failed to "
3894				"initialize - register queue\n",
3895				shost->host_no);
3896		blk_cleanup_queue(q);
3897		return err;
3898	}
3899
3900	fc_host->rqst_q = q;
3901	return 0;
3902}
3903
3904
3905/**
3906 * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
3907 * @shost:	shost that rport is attached to
3908 * @rport:	rport that the bsg hooks are being attached to
3909 */
3910static int
3911fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
3912{
3913	struct device *dev = &rport->dev;
3914	struct fc_internal *i = to_fc_internal(shost->transportt);
3915	struct request_queue *q;
3916	int err;
3917
3918	rport->rqst_q = NULL;
3919
3920	if (!i->f->bsg_request)
3921		return -ENOTSUPP;
3922
3923	q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
3924	if (!q) {
3925		printk(KERN_ERR "%s: bsg interface failed to "
3926				"initialize - no request queue\n",
3927				 dev->kobj.name);
3928		return -ENOMEM;
3929	}
3930
3931	q->queuedata = rport;
3932	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3933	blk_queue_softirq_done(q, fc_bsg_softirq_done);
3934	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3935	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
3936
3937	err = bsg_register_queue(q, dev, NULL, NULL);
3938	if (err) {
3939		printk(KERN_ERR "%s: bsg interface failed to "
3940				"initialize - register queue\n",
3941				 dev->kobj.name);
3942		blk_cleanup_queue(q);
3943		return err;
3944	}
3945
3946	rport->rqst_q = q;
3947	return 0;
3948}
3949
3950
3951/**
3952 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
3953 * @q:	the request_queue that is to be torn down.
3954 */
3955static void
3956fc_bsg_remove(struct request_queue *q)
3957{
3958	if (q) {
3959		bsg_unregister_queue(q);
3960		blk_cleanup_queue(q);
3961	}
3962}
3963
3964
3965/* Original Author:  Martin Hicks */
3966MODULE_AUTHOR("James Smart");
3967MODULE_DESCRIPTION("FC Transport Attributes");
3968MODULE_LICENSE("GPL");
3969
3970module_init(fc_transport_init);
3971module_exit(fc_transport_exit);
3972