tcm_qla2xxx.c revision f4c24db1b7ad0ce84409e15744d26c6f86a96840
1/*******************************************************************************
2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs
4 *
5 * (c) Copyright 2010-2013 Datera, Inc.
6 *
7 * Author: Nicholas A. Bellinger <nab@daterainc.com>
8 *
9 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
10 * the TCM_FC / Open-FCoE.org fabric module.
11 *
12 * Copyright (c) 2010 Cisco Systems, Inc
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22 * GNU General Public License for more details.
23 ****************************************************************************/
24
25
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <generated/utsrelease.h>
29#include <linux/utsname.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/slab.h>
33#include <linux/kthread.h>
34#include <linux/types.h>
35#include <linux/string.h>
36#include <linux/configfs.h>
37#include <linux/ctype.h>
38#include <asm/unaligned.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
43#include <target/target_core_base.h>
44#include <target/target_core_fabric.h>
45#include <target/target_core_fabric_configfs.h>
46#include <target/target_core_configfs.h>
47#include <target/configfs_macros.h>
48
49#include "qla_def.h"
50#include "qla_target.h"
51#include "tcm_qla2xxx.h"
52
53struct workqueue_struct *tcm_qla2xxx_free_wq;
54struct workqueue_struct *tcm_qla2xxx_cmd_wq;
55
56/*
57 * Parse WWN.
58 * If strict, we require lower-case hex and colon separators to be sure
59 * the name is the same as what would be generated by ft_format_wwn()
60 * so the name and wwn are mapped one-to-one.
61 */
62static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
63{
64	const char *cp;
65	char c;
66	u32 nibble;
67	u32 byte = 0;
68	u32 pos = 0;
69	u32 err;
70
71	*wwn = 0;
72	for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
73		c = *cp;
74		if (c == '\n' && cp[1] == '\0')
75			continue;
76		if (strict && pos++ == 2 && byte++ < 7) {
77			pos = 0;
78			if (c == ':')
79				continue;
80			err = 1;
81			goto fail;
82		}
83		if (c == '\0') {
84			err = 2;
85			if (strict && byte != 8)
86				goto fail;
87			return cp - name;
88		}
89		err = 3;
90		if (isdigit(c))
91			nibble = c - '0';
92		else if (isxdigit(c) && (islower(c) || !strict))
93			nibble = tolower(c) - 'a' + 10;
94		else
95			goto fail;
96		*wwn = (*wwn << 4) | nibble;
97	}
98	err = 4;
99fail:
100	pr_debug("err %u len %zu pos %u byte %u\n",
101			err, cp - name, pos, byte);
102	return -1;
103}
104
105static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
106{
107	u8 b[8];
108
109	put_unaligned_be64(wwn, b);
110	return snprintf(buf, len,
111		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
112		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
113}
114
115static char *tcm_qla2xxx_get_fabric_name(void)
116{
117	return "qla2xxx";
118}
119
120/*
121 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
122 */
123static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
124{
125	unsigned int i, j;
126	u8 wwn[8];
127
128	memset(wwn, 0, sizeof(wwn));
129
130	/* Validate and store the new name */
131	for (i = 0, j = 0; i < 16; i++) {
132		int value;
133
134		value = hex_to_bin(*ns++);
135		if (value >= 0)
136			j = (j << 4) | value;
137		else
138			return -EINVAL;
139
140		if (i % 2) {
141			wwn[i/2] = j & 0xff;
142			j = 0;
143		}
144	}
145
146	*nm = wwn_to_u64(wwn);
147	return 0;
148}
149
150/*
151 * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
152 * store_fc_host_vport_create()
153 */
154static int tcm_qla2xxx_npiv_parse_wwn(
155	const char *name,
156	size_t count,
157	u64 *wwpn,
158	u64 *wwnn)
159{
160	unsigned int cnt = count;
161	int rc;
162
163	*wwpn = 0;
164	*wwnn = 0;
165
166	/* count may include a LF at end of string */
167	if (name[cnt-1] == '\n' || name[cnt-1] == 0)
168		cnt--;
169
170	/* validate we have enough characters for WWPN */
171	if ((cnt != (16+1+16)) || (name[16] != ':'))
172		return -EINVAL;
173
174	rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
175	if (rc != 0)
176		return rc;
177
178	rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
179	if (rc != 0)
180		return rc;
181
182	return 0;
183}
184
185static char *tcm_qla2xxx_npiv_get_fabric_name(void)
186{
187	return "qla2xxx_npiv";
188}
189
190static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
191{
192	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
193				struct tcm_qla2xxx_tpg, se_tpg);
194	struct tcm_qla2xxx_lport *lport = tpg->lport;
195	u8 proto_id;
196
197	switch (lport->lport_proto_id) {
198	case SCSI_PROTOCOL_FCP:
199	default:
200		proto_id = fc_get_fabric_proto_ident(se_tpg);
201		break;
202	}
203
204	return proto_id;
205}
206
207static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
208{
209	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
210				struct tcm_qla2xxx_tpg, se_tpg);
211	struct tcm_qla2xxx_lport *lport = tpg->lport;
212
213	return lport->lport_naa_name;
214}
215
216static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
217{
218	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
219				struct tcm_qla2xxx_tpg, se_tpg);
220	return tpg->lport_tpgt;
221}
222
223static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
224{
225	return 1;
226}
227
228static u32 tcm_qla2xxx_get_pr_transport_id(
229	struct se_portal_group *se_tpg,
230	struct se_node_acl *se_nacl,
231	struct t10_pr_registration *pr_reg,
232	int *format_code,
233	unsigned char *buf)
234{
235	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
236				struct tcm_qla2xxx_tpg, se_tpg);
237	struct tcm_qla2xxx_lport *lport = tpg->lport;
238	int ret = 0;
239
240	switch (lport->lport_proto_id) {
241	case SCSI_PROTOCOL_FCP:
242	default:
243		ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
244					format_code, buf);
245		break;
246	}
247
248	return ret;
249}
250
251static u32 tcm_qla2xxx_get_pr_transport_id_len(
252	struct se_portal_group *se_tpg,
253	struct se_node_acl *se_nacl,
254	struct t10_pr_registration *pr_reg,
255	int *format_code)
256{
257	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
258				struct tcm_qla2xxx_tpg, se_tpg);
259	struct tcm_qla2xxx_lport *lport = tpg->lport;
260	int ret = 0;
261
262	switch (lport->lport_proto_id) {
263	case SCSI_PROTOCOL_FCP:
264	default:
265		ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
266					format_code);
267		break;
268	}
269
270	return ret;
271}
272
273static char *tcm_qla2xxx_parse_pr_out_transport_id(
274	struct se_portal_group *se_tpg,
275	const char *buf,
276	u32 *out_tid_len,
277	char **port_nexus_ptr)
278{
279	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
280				struct tcm_qla2xxx_tpg, se_tpg);
281	struct tcm_qla2xxx_lport *lport = tpg->lport;
282	char *tid = NULL;
283
284	switch (lport->lport_proto_id) {
285	case SCSI_PROTOCOL_FCP:
286	default:
287		tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
288					port_nexus_ptr);
289		break;
290	}
291
292	return tid;
293}
294
295static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
296{
297	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
298				struct tcm_qla2xxx_tpg, se_tpg);
299
300	return tpg->tpg_attrib.generate_node_acls;
301}
302
303static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
304{
305	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
306				struct tcm_qla2xxx_tpg, se_tpg);
307
308	return tpg->tpg_attrib.cache_dynamic_acls;
309}
310
311static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
312{
313	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
314				struct tcm_qla2xxx_tpg, se_tpg);
315
316	return tpg->tpg_attrib.demo_mode_write_protect;
317}
318
319static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
320{
321	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
322				struct tcm_qla2xxx_tpg, se_tpg);
323
324	return tpg->tpg_attrib.prod_mode_write_protect;
325}
326
327static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
328{
329	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
330				struct tcm_qla2xxx_tpg, se_tpg);
331
332	return tpg->tpg_attrib.demo_mode_login_only;
333}
334
335static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
336	struct se_portal_group *se_tpg)
337{
338	struct tcm_qla2xxx_nacl *nacl;
339
340	nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
341	if (!nacl) {
342		pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
343		return NULL;
344	}
345
346	return &nacl->se_node_acl;
347}
348
349static void tcm_qla2xxx_release_fabric_acl(
350	struct se_portal_group *se_tpg,
351	struct se_node_acl *se_nacl)
352{
353	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
354			struct tcm_qla2xxx_nacl, se_node_acl);
355	kfree(nacl);
356}
357
358static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
359{
360	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
361				struct tcm_qla2xxx_tpg, se_tpg);
362
363	return tpg->lport_tpgt;
364}
365
366static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
367{
368	struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
369			struct qla_tgt_mgmt_cmd, free_work);
370
371	transport_generic_free_cmd(&mcmd->se_cmd, 0);
372}
373
374/*
375 * Called from qla_target_template->free_mcmd(), and will call
376 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
377 * release callback.  qla_hw_data->hardware_lock is expected to be held
378 */
379static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
380{
381	INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
382	queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
383}
384
385static void tcm_qla2xxx_complete_free(struct work_struct *work)
386{
387	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
388
389	transport_generic_free_cmd(&cmd->se_cmd, 0);
390}
391
392/*
393 * Called from qla_target_template->free_cmd(), and will call
394 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
395 * release callback.  qla_hw_data->hardware_lock is expected to be held
396 */
397static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
398{
399	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
400	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
401}
402
403/*
404 * Called from struct target_core_fabric_ops->check_stop_free() context
405 */
406static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
407{
408	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
409}
410
411/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
412 * fabric descriptor @se_cmd command to release
413 */
414static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
415{
416	struct qla_tgt_cmd *cmd;
417
418	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
419		struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
420				struct qla_tgt_mgmt_cmd, se_cmd);
421		qlt_free_mcmd(mcmd);
422		return;
423	}
424
425	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
426	qlt_free_cmd(cmd);
427}
428
429static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
430{
431	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
432	struct scsi_qla_host *vha;
433	unsigned long flags;
434
435	BUG_ON(!sess);
436	vha = sess->vha;
437
438	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
439	target_sess_cmd_list_set_waiting(se_sess);
440	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
441
442	return 1;
443}
444
445static void tcm_qla2xxx_close_session(struct se_session *se_sess)
446{
447	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
448	struct scsi_qla_host *vha;
449	unsigned long flags;
450
451	BUG_ON(!sess);
452	vha = sess->vha;
453
454	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
455	qlt_unreg_sess(sess);
456	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
457}
458
459static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
460{
461	return 0;
462}
463
464static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
465{
466	struct qla_tgt_cmd *cmd = container_of(se_cmd,
467				struct qla_tgt_cmd, se_cmd);
468
469	cmd->bufflen = se_cmd->data_length;
470	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
471
472	cmd->sg_cnt = se_cmd->t_data_nents;
473	cmd->sg = se_cmd->t_data_sg;
474
475	cmd->prot_sg_cnt = se_cmd->t_prot_nents;
476	cmd->prot_sg = se_cmd->t_prot_sg;
477	cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
478	se_cmd->pi_err = 0;
479
480	/*
481	 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
482	 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
483	 */
484	return qlt_rdy_to_xfer(cmd);
485}
486
487static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
488{
489	unsigned long flags;
490	/*
491	 * Check for WRITE_PENDING status to determine if we need to wait for
492	 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
493	 */
494	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
495	if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
496	    se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
497		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
498		wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
499						3000);
500		return 0;
501	}
502	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
503
504	return 0;
505}
506
507static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
508{
509	return;
510}
511
512static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
513{
514	struct qla_tgt_cmd *cmd = container_of(se_cmd,
515				struct qla_tgt_cmd, se_cmd);
516
517	return cmd->tag;
518}
519
520static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
521{
522	return 0;
523}
524
525/*
526 * Called from process context in qla_target.c:qlt_do_work() code
527 */
528static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
529	unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
530	int data_dir, int bidi)
531{
532	struct se_cmd *se_cmd = &cmd->se_cmd;
533	struct se_session *se_sess;
534	struct qla_tgt_sess *sess;
535	int flags = TARGET_SCF_ACK_KREF;
536
537	if (bidi)
538		flags |= TARGET_SCF_BIDI_OP;
539
540	sess = cmd->sess;
541	if (!sess) {
542		pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
543		return -EINVAL;
544	}
545
546	se_sess = sess->se_sess;
547	if (!se_sess) {
548		pr_err("Unable to locate active struct se_session\n");
549		return -EINVAL;
550	}
551
552	return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
553				cmd->unpacked_lun, data_length, fcp_task_attr,
554				data_dir, flags);
555}
556
557static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
558{
559	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
560
561	/*
562	 * Ensure that the complete FCP WRITE payload has been received.
563	 * Otherwise return an exception via CHECK_CONDITION status.
564	 */
565	if (!cmd->write_data_transferred) {
566		/*
567		 * Check if se_cmd has already been aborted via LUN_RESET, and
568		 * waiting upon completion in tcm_qla2xxx_write_pending_status()
569		 */
570		if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
571			complete(&cmd->se_cmd.t_transport_stop_comp);
572			return;
573		}
574
575		if (cmd->se_cmd.pi_err)
576			transport_generic_request_failure(&cmd->se_cmd,
577				cmd->se_cmd.pi_err);
578		else
579			transport_generic_request_failure(&cmd->se_cmd,
580				TCM_CHECK_CONDITION_ABORT_CMD);
581
582		return;
583	}
584
585	return target_execute_cmd(&cmd->se_cmd);
586}
587
588/*
589 * Called from qla_target.c:qlt_do_ctio_completion()
590 */
591static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
592{
593	INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
594	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
595}
596
597static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
598{
599	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
600
601	/* take an extra kref to prevent cmd free too early.
602	 * need to wait for SCSI status/check condition to
603	 * finish responding generate by transport_generic_request_failure.
604	 */
605	kref_get(&cmd->se_cmd.cmd_kref);
606	transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
607}
608
609/*
610 * Called from qla_target.c:qlt_do_ctio_completion()
611 */
612static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
613{
614	INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
615	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
616}
617
618/*
619 * Called from qla_target.c:qlt_issue_task_mgmt()
620 */
621static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
622	uint8_t tmr_func, uint32_t tag)
623{
624	struct qla_tgt_sess *sess = mcmd->sess;
625	struct se_cmd *se_cmd = &mcmd->se_cmd;
626
627	return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
628			tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
629}
630
631static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
632{
633	struct qla_tgt_cmd *cmd = container_of(se_cmd,
634				struct qla_tgt_cmd, se_cmd);
635
636	cmd->bufflen = se_cmd->data_length;
637	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
638	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
639
640	cmd->sg_cnt = se_cmd->t_data_nents;
641	cmd->sg = se_cmd->t_data_sg;
642	cmd->offset = 0;
643
644	cmd->prot_sg_cnt = se_cmd->t_prot_nents;
645	cmd->prot_sg = se_cmd->t_prot_sg;
646	cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
647	se_cmd->pi_err = 0;
648
649	/*
650	 * Now queue completed DATA_IN the qla2xxx LLD and response ring
651	 */
652	return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
653				se_cmd->scsi_status);
654}
655
656static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
657{
658	struct qla_tgt_cmd *cmd = container_of(se_cmd,
659				struct qla_tgt_cmd, se_cmd);
660	int xmit_type = QLA_TGT_XMIT_STATUS;
661
662	cmd->bufflen = se_cmd->data_length;
663	cmd->sg = NULL;
664	cmd->sg_cnt = 0;
665	cmd->offset = 0;
666	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
667	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
668
669	if (se_cmd->data_direction == DMA_FROM_DEVICE) {
670		/*
671		 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
672		 * for qla_tgt_xmit_response LLD code
673		 */
674		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
675			se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
676			se_cmd->residual_count = 0;
677		}
678		se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
679		se_cmd->residual_count += se_cmd->data_length;
680
681		cmd->bufflen = 0;
682	}
683	/*
684	 * Now queue status response to qla2xxx LLD code and response ring
685	 */
686	return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
687}
688
689static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
690{
691	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
692	struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
693				struct qla_tgt_mgmt_cmd, se_cmd);
694
695	pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
696			mcmd, se_tmr->function, se_tmr->response);
697	/*
698	 * Do translation between TCM TM response codes and
699	 * QLA2xxx FC TM response codes.
700	 */
701	switch (se_tmr->response) {
702	case TMR_FUNCTION_COMPLETE:
703		mcmd->fc_tm_rsp = FC_TM_SUCCESS;
704		break;
705	case TMR_TASK_DOES_NOT_EXIST:
706		mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
707		break;
708	case TMR_FUNCTION_REJECTED:
709		mcmd->fc_tm_rsp = FC_TM_REJECT;
710		break;
711	case TMR_LUN_DOES_NOT_EXIST:
712	default:
713		mcmd->fc_tm_rsp = FC_TM_FAILED;
714		break;
715	}
716	/*
717	 * Queue the TM response to QLA2xxx LLD to build a
718	 * CTIO response packet.
719	 */
720	qlt_xmit_tm_rsp(mcmd);
721}
722
723static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
724{
725	struct qla_tgt_cmd *cmd = container_of(se_cmd,
726				struct qla_tgt_cmd, se_cmd);
727	struct scsi_qla_host *vha = cmd->vha;
728	struct qla_hw_data *ha = vha->hw;
729
730	if (!cmd->sg_mapped)
731		return;
732
733	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
734	cmd->sg_mapped = 0;
735}
736
737/* Local pointer to allocated TCM configfs fabric module */
738struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
739struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
740
741static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
742			struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
743/*
744 * Expected to be called with struct qla_hw_data->hardware_lock held
745 */
746static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
747{
748	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
749	struct se_portal_group *se_tpg = se_nacl->se_tpg;
750	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
751	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
752				struct tcm_qla2xxx_lport, lport_wwn);
753	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
754				struct tcm_qla2xxx_nacl, se_node_acl);
755	void *node;
756
757	pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
758
759	node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
760	if (WARN_ON(node && (node != se_nacl))) {
761		/*
762		 * The nacl no longer matches what we think it should be.
763		 * Most likely a new dynamic acl has been added while
764		 * someone dropped the hardware lock.  It clearly is a
765		 * bug elsewhere, but this bit can't make things worse.
766		 */
767		btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
768			       node, GFP_ATOMIC);
769	}
770
771	pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
772	    se_nacl, nacl->nport_wwnn, nacl->nport_id);
773	/*
774	 * Now clear the se_nacl and session pointers from our HW lport lookup
775	 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
776	 *
777	 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
778	 * target_wait_for_sess_cmds() before the session waits for outstanding
779	 * I/O to complete, to avoid a race between session shutdown execution
780	 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
781	 */
782	tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
783}
784
785static void tcm_qla2xxx_release_session(struct kref *kref)
786{
787	struct se_session *se_sess = container_of(kref,
788			struct se_session, sess_kref);
789
790	qlt_unreg_sess(se_sess->fabric_sess_ptr);
791}
792
793static void tcm_qla2xxx_put_session(struct se_session *se_sess)
794{
795	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
796	struct qla_hw_data *ha = sess->vha->hw;
797	unsigned long flags;
798
799	spin_lock_irqsave(&ha->hardware_lock, flags);
800	kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
801	spin_unlock_irqrestore(&ha->hardware_lock, flags);
802}
803
804static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
805{
806	if (!sess)
807		return;
808
809	assert_spin_locked(&sess->vha->hw->hardware_lock);
810	kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
811}
812
813static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
814{
815	assert_spin_locked(&sess->vha->hw->hardware_lock);
816	target_sess_cmd_list_set_waiting(sess->se_sess);
817}
818
819static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
820	struct se_portal_group *se_tpg,
821	struct config_group *group,
822	const char *name)
823{
824	struct se_node_acl *se_nacl, *se_nacl_new;
825	struct tcm_qla2xxx_nacl *nacl;
826	u64 wwnn;
827	u32 qla2xxx_nexus_depth;
828
829	if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
830		return ERR_PTR(-EINVAL);
831
832	se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
833	if (!se_nacl_new)
834		return ERR_PTR(-ENOMEM);
835/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
836	qla2xxx_nexus_depth = 1;
837
838	/*
839	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
840	 * when converting a NodeACL from demo mode -> explict
841	 */
842	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
843				name, qla2xxx_nexus_depth);
844	if (IS_ERR(se_nacl)) {
845		tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
846		return se_nacl;
847	}
848	/*
849	 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
850	 */
851	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
852	nacl->nport_wwnn = wwnn;
853	tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
854
855	return se_nacl;
856}
857
858static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
859{
860	struct se_portal_group *se_tpg = se_acl->se_tpg;
861	struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
862				struct tcm_qla2xxx_nacl, se_node_acl);
863
864	core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
865	kfree(nacl);
866}
867
868/* Start items for tcm_qla2xxx_tpg_attrib_cit */
869
870#define DEF_QLA_TPG_ATTRIB(name)					\
871									\
872static ssize_t tcm_qla2xxx_tpg_attrib_show_##name(			\
873	struct se_portal_group *se_tpg,					\
874	char *page)							\
875{									\
876	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
877			struct tcm_qla2xxx_tpg, se_tpg);		\
878									\
879	return sprintf(page, "%u\n", tpg->tpg_attrib.name);	\
880}									\
881									\
882static ssize_t tcm_qla2xxx_tpg_attrib_store_##name(			\
883	struct se_portal_group *se_tpg,					\
884	const char *page,						\
885	size_t count)							\
886{									\
887	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
888			struct tcm_qla2xxx_tpg, se_tpg);		\
889	unsigned long val;						\
890	int ret;							\
891									\
892	ret = kstrtoul(page, 0, &val);					\
893	if (ret < 0) {							\
894		pr_err("kstrtoul() failed with"				\
895				" ret: %d\n", ret);			\
896		return -EINVAL;						\
897	}								\
898	ret = tcm_qla2xxx_set_attrib_##name(tpg, val);			\
899									\
900	return (!ret) ? count : -EINVAL;				\
901}
902
903#define DEF_QLA_TPG_ATTR_BOOL(_name)					\
904									\
905static int tcm_qla2xxx_set_attrib_##_name(				\
906	struct tcm_qla2xxx_tpg *tpg,					\
907	unsigned long val)						\
908{									\
909	struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib;		\
910									\
911	if ((val != 0) && (val != 1)) {					\
912		pr_err("Illegal boolean value %lu\n", val);		\
913		return -EINVAL;						\
914	}								\
915									\
916	a->_name = val;							\
917	return 0;							\
918}
919
920#define QLA_TPG_ATTR(_name, _mode) \
921	TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
922
923/*
924 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
925 */
926DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
927DEF_QLA_TPG_ATTRIB(generate_node_acls);
928QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
929
930/*
931 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
932 */
933DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
934DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
935QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
936
937/*
938 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
939 */
940DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
941DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
942QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
943
944/*
945 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
946 */
947DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
948DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
949QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
950
951/*
952 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
953 */
954DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
955DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
956QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);
957
958static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
959	&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
960	&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
961	&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
962	&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
963	&tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
964	NULL,
965};
966
967/* End items for tcm_qla2xxx_tpg_attrib_cit */
968
969static ssize_t tcm_qla2xxx_tpg_show_enable(
970	struct se_portal_group *se_tpg,
971	char *page)
972{
973	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
974			struct tcm_qla2xxx_tpg, se_tpg);
975
976	return snprintf(page, PAGE_SIZE, "%d\n",
977			atomic_read(&tpg->lport_tpg_enabled));
978}
979
980static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
981{
982	struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
983				struct tcm_qla2xxx_tpg, tpg_base_work);
984	struct se_portal_group *se_tpg = &base_tpg->se_tpg;
985	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
986
987	if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
988				  &se_tpg->tpg_group.cg_item)) {
989		atomic_set(&base_tpg->lport_tpg_enabled, 1);
990		qlt_enable_vha(base_vha);
991	}
992	complete(&base_tpg->tpg_base_comp);
993}
994
995static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
996{
997	struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
998				struct tcm_qla2xxx_tpg, tpg_base_work);
999	struct se_portal_group *se_tpg = &base_tpg->se_tpg;
1000	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
1001
1002	if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
1003		atomic_set(&base_tpg->lport_tpg_enabled, 0);
1004		configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1005				       &se_tpg->tpg_group.cg_item);
1006	}
1007	complete(&base_tpg->tpg_base_comp);
1008}
1009
1010static ssize_t tcm_qla2xxx_tpg_store_enable(
1011	struct se_portal_group *se_tpg,
1012	const char *page,
1013	size_t count)
1014{
1015	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1016			struct tcm_qla2xxx_tpg, se_tpg);
1017	unsigned long op;
1018	int rc;
1019
1020	rc = kstrtoul(page, 0, &op);
1021	if (rc < 0) {
1022		pr_err("kstrtoul() returned %d\n", rc);
1023		return -EINVAL;
1024	}
1025	if ((op != 1) && (op != 0)) {
1026		pr_err("Illegal value for tpg_enable: %lu\n", op);
1027		return -EINVAL;
1028	}
1029	if (op) {
1030		if (atomic_read(&tpg->lport_tpg_enabled))
1031			return -EEXIST;
1032
1033		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
1034	} else {
1035		if (!atomic_read(&tpg->lport_tpg_enabled))
1036			return count;
1037
1038		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
1039	}
1040	init_completion(&tpg->tpg_base_comp);
1041	schedule_work(&tpg->tpg_base_work);
1042	wait_for_completion(&tpg->tpg_base_comp);
1043
1044	if (op) {
1045		if (!atomic_read(&tpg->lport_tpg_enabled))
1046			return -ENODEV;
1047	} else {
1048		if (atomic_read(&tpg->lport_tpg_enabled))
1049			return -EPERM;
1050	}
1051	return count;
1052}
1053
1054TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
1055
1056static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
1057	&tcm_qla2xxx_tpg_enable.attr,
1058	NULL,
1059};
1060
1061static struct se_portal_group *tcm_qla2xxx_make_tpg(
1062	struct se_wwn *wwn,
1063	struct config_group *group,
1064	const char *name)
1065{
1066	struct tcm_qla2xxx_lport *lport = container_of(wwn,
1067			struct tcm_qla2xxx_lport, lport_wwn);
1068	struct tcm_qla2xxx_tpg *tpg;
1069	unsigned long tpgt;
1070	int ret;
1071
1072	if (strstr(name, "tpgt_") != name)
1073		return ERR_PTR(-EINVAL);
1074	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1075		return ERR_PTR(-EINVAL);
1076
1077	if ((tpgt != 1)) {
1078		pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
1079		return ERR_PTR(-ENOSYS);
1080	}
1081
1082	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1083	if (!tpg) {
1084		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1085		return ERR_PTR(-ENOMEM);
1086	}
1087	tpg->lport = lport;
1088	tpg->lport_tpgt = tpgt;
1089	/*
1090	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1091	 * NodeACLs
1092	 */
1093	tpg->tpg_attrib.generate_node_acls = 1;
1094	tpg->tpg_attrib.demo_mode_write_protect = 1;
1095	tpg->tpg_attrib.cache_dynamic_acls = 1;
1096	tpg->tpg_attrib.demo_mode_login_only = 1;
1097
1098	ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1099				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1100	if (ret < 0) {
1101		kfree(tpg);
1102		return NULL;
1103	}
1104
1105	lport->tpg_1 = tpg;
1106
1107	return &tpg->se_tpg;
1108}
1109
1110static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1111{
1112	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1113			struct tcm_qla2xxx_tpg, se_tpg);
1114	struct tcm_qla2xxx_lport *lport = tpg->lport;
1115	struct scsi_qla_host *vha = lport->qla_vha;
1116	/*
1117	 * Call into qla2x_target.c LLD logic to shutdown the active
1118	 * FC Nexuses and disable target mode operation for this qla_hw_data
1119	 */
1120	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
1121		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
1122
1123	core_tpg_deregister(se_tpg);
1124	/*
1125	 * Clear local TPG=1 pointer for non NPIV mode.
1126	 */
1127	lport->tpg_1 = NULL;
1128	kfree(tpg);
1129}
1130
1131static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
1132	struct se_portal_group *se_tpg,
1133	char *page)
1134{
1135	return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
1136}
1137
1138static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
1139	struct se_portal_group *se_tpg,
1140	const char *page,
1141	size_t count)
1142{
1143	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
1144	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
1145			struct tcm_qla2xxx_lport, lport_wwn);
1146	struct scsi_qla_host *vha = lport->qla_vha;
1147	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1148			struct tcm_qla2xxx_tpg, se_tpg);
1149	unsigned long op;
1150	int rc;
1151
1152	rc = kstrtoul(page, 0, &op);
1153	if (rc < 0) {
1154		pr_err("kstrtoul() returned %d\n", rc);
1155		return -EINVAL;
1156	}
1157	if ((op != 1) && (op != 0)) {
1158		pr_err("Illegal value for tpg_enable: %lu\n", op);
1159		return -EINVAL;
1160	}
1161	if (op) {
1162		if (atomic_read(&tpg->lport_tpg_enabled))
1163			return -EEXIST;
1164
1165		atomic_set(&tpg->lport_tpg_enabled, 1);
1166		qlt_enable_vha(vha);
1167	} else {
1168		if (!atomic_read(&tpg->lport_tpg_enabled))
1169			return count;
1170
1171		atomic_set(&tpg->lport_tpg_enabled, 0);
1172		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
1173	}
1174
1175	return count;
1176}
1177
1178TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
1179
1180static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
1181        &tcm_qla2xxx_npiv_tpg_enable.attr,
1182        NULL,
1183};
1184
1185static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1186	struct se_wwn *wwn,
1187	struct config_group *group,
1188	const char *name)
1189{
1190	struct tcm_qla2xxx_lport *lport = container_of(wwn,
1191			struct tcm_qla2xxx_lport, lport_wwn);
1192	struct tcm_qla2xxx_tpg *tpg;
1193	unsigned long tpgt;
1194	int ret;
1195
1196	if (strstr(name, "tpgt_") != name)
1197		return ERR_PTR(-EINVAL);
1198	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1199		return ERR_PTR(-EINVAL);
1200
1201	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1202	if (!tpg) {
1203		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1204		return ERR_PTR(-ENOMEM);
1205	}
1206	tpg->lport = lport;
1207	tpg->lport_tpgt = tpgt;
1208
1209	/*
1210	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1211	 * NodeACLs
1212	 */
1213	tpg->tpg_attrib.generate_node_acls = 1;
1214	tpg->tpg_attrib.demo_mode_write_protect = 1;
1215	tpg->tpg_attrib.cache_dynamic_acls = 1;
1216	tpg->tpg_attrib.demo_mode_login_only = 1;
1217
1218	ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
1219				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1220	if (ret < 0) {
1221		kfree(tpg);
1222		return NULL;
1223	}
1224	lport->tpg_1 = tpg;
1225	return &tpg->se_tpg;
1226}
1227
1228/*
1229 * Expected to be called with struct qla_hw_data->hardware_lock held
1230 */
1231static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1232	scsi_qla_host_t *vha,
1233	const uint8_t *s_id)
1234{
1235	struct tcm_qla2xxx_lport *lport;
1236	struct se_node_acl *se_nacl;
1237	struct tcm_qla2xxx_nacl *nacl;
1238	u32 key;
1239
1240	lport = vha->vha_tgt.target_lport_ptr;
1241	if (!lport) {
1242		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1243		dump_stack();
1244		return NULL;
1245	}
1246
1247	key = (((unsigned long)s_id[0] << 16) |
1248	       ((unsigned long)s_id[1] << 8) |
1249	       (unsigned long)s_id[2]);
1250	pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1251
1252	se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
1253	if (!se_nacl) {
1254		pr_debug("Unable to locate s_id: 0x%06x\n", key);
1255		return NULL;
1256	}
1257	pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
1258	    se_nacl, se_nacl->initiatorname);
1259
1260	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1261	if (!nacl->qla_tgt_sess) {
1262		pr_err("Unable to locate struct qla_tgt_sess\n");
1263		return NULL;
1264	}
1265
1266	return nacl->qla_tgt_sess;
1267}
1268
1269/*
1270 * Expected to be called with struct qla_hw_data->hardware_lock held
1271 */
1272static void tcm_qla2xxx_set_sess_by_s_id(
1273	struct tcm_qla2xxx_lport *lport,
1274	struct se_node_acl *new_se_nacl,
1275	struct tcm_qla2xxx_nacl *nacl,
1276	struct se_session *se_sess,
1277	struct qla_tgt_sess *qla_tgt_sess,
1278	uint8_t *s_id)
1279{
1280	u32 key;
1281	void *slot;
1282	int rc;
1283
1284	key = (((unsigned long)s_id[0] << 16) |
1285	       ((unsigned long)s_id[1] << 8) |
1286	       (unsigned long)s_id[2]);
1287	pr_debug("set_sess_by_s_id: %06x\n", key);
1288
1289	slot = btree_lookup32(&lport->lport_fcport_map, key);
1290	if (!slot) {
1291		if (new_se_nacl) {
1292			pr_debug("Setting up new fc_port entry to new_se_nacl\n");
1293			nacl->nport_id = key;
1294			rc = btree_insert32(&lport->lport_fcport_map, key,
1295					new_se_nacl, GFP_ATOMIC);
1296			if (rc)
1297				printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
1298				    (int)key);
1299		} else {
1300			pr_debug("Wiping nonexisting fc_port entry\n");
1301		}
1302
1303		qla_tgt_sess->se_sess = se_sess;
1304		nacl->qla_tgt_sess = qla_tgt_sess;
1305		return;
1306	}
1307
1308	if (nacl->qla_tgt_sess) {
1309		if (new_se_nacl == NULL) {
1310			pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
1311			btree_remove32(&lport->lport_fcport_map, key);
1312			nacl->qla_tgt_sess = NULL;
1313			return;
1314		}
1315		pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
1316		btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1317		qla_tgt_sess->se_sess = se_sess;
1318		nacl->qla_tgt_sess = qla_tgt_sess;
1319		return;
1320	}
1321
1322	if (new_se_nacl == NULL) {
1323		pr_debug("Clearing existing fc_port entry\n");
1324		btree_remove32(&lport->lport_fcport_map, key);
1325		return;
1326	}
1327
1328	pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
1329	btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1330	qla_tgt_sess->se_sess = se_sess;
1331	nacl->qla_tgt_sess = qla_tgt_sess;
1332
1333	pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
1334	    nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1335}
1336
1337/*
1338 * Expected to be called with struct qla_hw_data->hardware_lock held
1339 */
1340static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1341	scsi_qla_host_t *vha,
1342	const uint16_t loop_id)
1343{
1344	struct tcm_qla2xxx_lport *lport;
1345	struct se_node_acl *se_nacl;
1346	struct tcm_qla2xxx_nacl *nacl;
1347	struct tcm_qla2xxx_fc_loopid *fc_loopid;
1348
1349	lport = vha->vha_tgt.target_lport_ptr;
1350	if (!lport) {
1351		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1352		dump_stack();
1353		return NULL;
1354	}
1355
1356	pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1357
1358	fc_loopid = lport->lport_loopid_map + loop_id;
1359	se_nacl = fc_loopid->se_nacl;
1360	if (!se_nacl) {
1361		pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
1362		    loop_id);
1363		return NULL;
1364	}
1365
1366	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1367
1368	if (!nacl->qla_tgt_sess) {
1369		pr_err("Unable to locate struct qla_tgt_sess\n");
1370		return NULL;
1371	}
1372
1373	return nacl->qla_tgt_sess;
1374}
1375
1376/*
1377 * Expected to be called with struct qla_hw_data->hardware_lock held
1378 */
1379static void tcm_qla2xxx_set_sess_by_loop_id(
1380	struct tcm_qla2xxx_lport *lport,
1381	struct se_node_acl *new_se_nacl,
1382	struct tcm_qla2xxx_nacl *nacl,
1383	struct se_session *se_sess,
1384	struct qla_tgt_sess *qla_tgt_sess,
1385	uint16_t loop_id)
1386{
1387	struct se_node_acl *saved_nacl;
1388	struct tcm_qla2xxx_fc_loopid *fc_loopid;
1389
1390	pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1391
1392	fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
1393			lport->lport_loopid_map)[loop_id];
1394
1395	saved_nacl = fc_loopid->se_nacl;
1396	if (!saved_nacl) {
1397		pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
1398		fc_loopid->se_nacl = new_se_nacl;
1399		if (qla_tgt_sess->se_sess != se_sess)
1400			qla_tgt_sess->se_sess = se_sess;
1401		if (nacl->qla_tgt_sess != qla_tgt_sess)
1402			nacl->qla_tgt_sess = qla_tgt_sess;
1403		return;
1404	}
1405
1406	if (nacl->qla_tgt_sess) {
1407		if (new_se_nacl == NULL) {
1408			pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1409			fc_loopid->se_nacl = NULL;
1410			nacl->qla_tgt_sess = NULL;
1411			return;
1412		}
1413
1414		pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1415		fc_loopid->se_nacl = new_se_nacl;
1416		if (qla_tgt_sess->se_sess != se_sess)
1417			qla_tgt_sess->se_sess = se_sess;
1418		if (nacl->qla_tgt_sess != qla_tgt_sess)
1419			nacl->qla_tgt_sess = qla_tgt_sess;
1420		return;
1421	}
1422
1423	if (new_se_nacl == NULL) {
1424		pr_debug("Clearing fc_loopid->se_nacl\n");
1425		fc_loopid->se_nacl = NULL;
1426		return;
1427	}
1428
1429	pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
1430	fc_loopid->se_nacl = new_se_nacl;
1431	if (qla_tgt_sess->se_sess != se_sess)
1432		qla_tgt_sess->se_sess = se_sess;
1433	if (nacl->qla_tgt_sess != qla_tgt_sess)
1434		nacl->qla_tgt_sess = qla_tgt_sess;
1435
1436	pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
1437	    nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1438}
1439
1440/*
1441 * Should always be called with qla_hw_data->hardware_lock held.
1442 */
1443static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1444		struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
1445{
1446	struct se_session *se_sess = sess->se_sess;
1447	unsigned char be_sid[3];
1448
1449	be_sid[0] = sess->s_id.b.domain;
1450	be_sid[1] = sess->s_id.b.area;
1451	be_sid[2] = sess->s_id.b.al_pa;
1452
1453	tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1454				sess, be_sid);
1455	tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1456				sess, sess->loop_id);
1457}
1458
1459static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1460{
1461	struct qla_tgt *tgt = sess->tgt;
1462	struct qla_hw_data *ha = tgt->ha;
1463	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1464	struct se_session *se_sess;
1465	struct se_node_acl *se_nacl;
1466	struct tcm_qla2xxx_lport *lport;
1467	struct tcm_qla2xxx_nacl *nacl;
1468
1469	BUG_ON(in_interrupt());
1470
1471	se_sess = sess->se_sess;
1472	if (!se_sess) {
1473		pr_err("struct qla_tgt_sess->se_sess is NULL\n");
1474		dump_stack();
1475		return;
1476	}
1477	se_nacl = se_sess->se_node_acl;
1478	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1479
1480	lport = vha->vha_tgt.target_lport_ptr;
1481	if (!lport) {
1482		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1483		dump_stack();
1484		return;
1485	}
1486	target_wait_for_sess_cmds(se_sess);
1487
1488	transport_deregister_session_configfs(sess->se_sess);
1489	transport_deregister_session(sess->se_sess);
1490}
1491
1492/*
1493 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1494 * to locate struct se_node_acl
1495 */
1496static int tcm_qla2xxx_check_initiator_node_acl(
1497	scsi_qla_host_t *vha,
1498	unsigned char *fc_wwpn,
1499	void *qla_tgt_sess,
1500	uint8_t *s_id,
1501	uint16_t loop_id)
1502{
1503	struct qla_hw_data *ha = vha->hw;
1504	struct tcm_qla2xxx_lport *lport;
1505	struct tcm_qla2xxx_tpg *tpg;
1506	struct tcm_qla2xxx_nacl *nacl;
1507	struct se_portal_group *se_tpg;
1508	struct se_node_acl *se_nacl;
1509	struct se_session *se_sess;
1510	struct qla_tgt_sess *sess = qla_tgt_sess;
1511	unsigned char port_name[36];
1512	unsigned long flags;
1513	int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
1514		       TCM_QLA2XXX_DEFAULT_TAGS;
1515
1516	lport = vha->vha_tgt.target_lport_ptr;
1517	if (!lport) {
1518		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1519		dump_stack();
1520		return -EINVAL;
1521	}
1522	/*
1523	 * Locate the TPG=1 reference..
1524	 */
1525	tpg = lport->tpg_1;
1526	if (!tpg) {
1527		pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1528		return -EINVAL;
1529	}
1530	se_tpg = &tpg->se_tpg;
1531
1532	se_sess = transport_init_session_tags(num_tags,
1533					      sizeof(struct qla_tgt_cmd),
1534					      TARGET_PROT_NORMAL);
1535	if (IS_ERR(se_sess)) {
1536		pr_err("Unable to initialize struct se_session\n");
1537		return PTR_ERR(se_sess);
1538	}
1539	/*
1540	 * Format the FCP Initiator port_name into colon seperated values to
1541	 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1542	 */
1543	memset(&port_name, 0, 36);
1544	snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1545		fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1546		fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1547	/*
1548	 * Locate our struct se_node_acl either from an explict NodeACL created
1549	 * via ConfigFS, or via running in TPG demo mode.
1550	 */
1551	se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
1552					port_name);
1553	if (!se_sess->se_node_acl) {
1554		transport_free_session(se_sess);
1555		return -EINVAL;
1556	}
1557	se_nacl = se_sess->se_node_acl;
1558	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1559	/*
1560	 * And now setup the new se_nacl and session pointers into our HW lport
1561	 * mappings for fabric S_ID and LOOP_ID.
1562	 */
1563	spin_lock_irqsave(&ha->hardware_lock, flags);
1564	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1565			qla_tgt_sess, s_id);
1566	tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1567			qla_tgt_sess, loop_id);
1568	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1569	/*
1570	 * Finally register the new FC Nexus with TCM
1571	 */
1572	__transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1573
1574	return 0;
1575}
1576
1577static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1578				    uint16_t loop_id, bool conf_compl_supported)
1579{
1580	struct qla_tgt *tgt = sess->tgt;
1581	struct qla_hw_data *ha = tgt->ha;
1582	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1583	struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
1584	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
1585	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
1586			struct tcm_qla2xxx_nacl, se_node_acl);
1587	u32 key;
1588
1589
1590	if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
1591		pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
1592		    sess, sess->port_name,
1593		    sess->loop_id, loop_id, sess->s_id.b.domain,
1594		    sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
1595		    s_id.b.area, s_id.b.al_pa);
1596
1597	if (sess->loop_id != loop_id) {
1598		/*
1599		 * Because we can shuffle loop IDs around and we
1600		 * update different sessions non-atomically, we might
1601		 * have overwritten this session's old loop ID
1602		 * already, and we might end up overwriting some other
1603		 * session that will be updated later.  So we have to
1604		 * be extra careful and we can't warn about those things...
1605		 */
1606		if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
1607			lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
1608
1609		lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
1610
1611		sess->loop_id = loop_id;
1612	}
1613
1614	if (sess->s_id.b24 != s_id.b24) {
1615		key = (((u32) sess->s_id.b.domain << 16) |
1616		       ((u32) sess->s_id.b.area   <<  8) |
1617		       ((u32) sess->s_id.b.al_pa));
1618
1619		if (btree_lookup32(&lport->lport_fcport_map, key))
1620			WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
1621			     "Found wrong se_nacl when updating s_id %x:%x:%x\n",
1622			     sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
1623		else
1624			WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
1625			     sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
1626
1627		key = (((u32) s_id.b.domain << 16) |
1628		       ((u32) s_id.b.area   <<  8) |
1629		       ((u32) s_id.b.al_pa));
1630
1631		if (btree_lookup32(&lport->lport_fcport_map, key)) {
1632			WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
1633			     s_id.b.domain, s_id.b.area, s_id.b.al_pa);
1634			btree_update32(&lport->lport_fcport_map, key, se_nacl);
1635		} else {
1636			btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
1637		}
1638
1639		sess->s_id = s_id;
1640		nacl->nport_id = key;
1641	}
1642
1643	sess->conf_compl_supported = conf_compl_supported;
1644}
1645
1646/*
1647 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1648 */
1649static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1650	.handle_cmd		= tcm_qla2xxx_handle_cmd,
1651	.handle_data		= tcm_qla2xxx_handle_data,
1652	.handle_dif_err		= tcm_qla2xxx_handle_dif_err,
1653	.handle_tmr		= tcm_qla2xxx_handle_tmr,
1654	.free_cmd		= tcm_qla2xxx_free_cmd,
1655	.free_mcmd		= tcm_qla2xxx_free_mcmd,
1656	.free_session		= tcm_qla2xxx_free_session,
1657	.update_sess		= tcm_qla2xxx_update_sess,
1658	.check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1659	.find_sess_by_s_id	= tcm_qla2xxx_find_sess_by_s_id,
1660	.find_sess_by_loop_id	= tcm_qla2xxx_find_sess_by_loop_id,
1661	.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1662	.put_sess		= tcm_qla2xxx_put_sess,
1663	.shutdown_sess		= tcm_qla2xxx_shutdown_sess,
1664};
1665
1666static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1667{
1668	int rc;
1669
1670	rc = btree_init32(&lport->lport_fcport_map);
1671	if (rc) {
1672		pr_err("Unable to initialize lport->lport_fcport_map btree\n");
1673		return rc;
1674	}
1675
1676	lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
1677				65536);
1678	if (!lport->lport_loopid_map) {
1679		pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1680		    sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1681		btree_destroy32(&lport->lport_fcport_map);
1682		return -ENOMEM;
1683	}
1684	memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1685	       * 65536);
1686	pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1687	       sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1688	return 0;
1689}
1690
1691static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
1692					 void *target_lport_ptr,
1693					 u64 npiv_wwpn, u64 npiv_wwnn)
1694{
1695	struct qla_hw_data *ha = vha->hw;
1696	struct tcm_qla2xxx_lport *lport =
1697			(struct tcm_qla2xxx_lport *)target_lport_ptr;
1698	/*
1699	 * Setup tgt_ops, local pointer to vha and target_lport_ptr
1700	 */
1701	ha->tgt.tgt_ops = &tcm_qla2xxx_template;
1702	vha->vha_tgt.target_lport_ptr = target_lport_ptr;
1703	lport->qla_vha = vha;
1704
1705	return 0;
1706}
1707
1708static struct se_wwn *tcm_qla2xxx_make_lport(
1709	struct target_fabric_configfs *tf,
1710	struct config_group *group,
1711	const char *name)
1712{
1713	struct tcm_qla2xxx_lport *lport;
1714	u64 wwpn;
1715	int ret = -ENODEV;
1716
1717	if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
1718		return ERR_PTR(-EINVAL);
1719
1720	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1721	if (!lport) {
1722		pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
1723		return ERR_PTR(-ENOMEM);
1724	}
1725	lport->lport_wwpn = wwpn;
1726	tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1727				wwpn);
1728	sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
1729
1730	ret = tcm_qla2xxx_init_lport(lport);
1731	if (ret != 0)
1732		goto out;
1733
1734	ret = qlt_lport_register(lport, wwpn, 0, 0,
1735				 tcm_qla2xxx_lport_register_cb);
1736	if (ret != 0)
1737		goto out_lport;
1738
1739	return &lport->lport_wwn;
1740out_lport:
1741	vfree(lport->lport_loopid_map);
1742	btree_destroy32(&lport->lport_fcport_map);
1743out:
1744	kfree(lport);
1745	return ERR_PTR(ret);
1746}
1747
1748static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1749{
1750	struct tcm_qla2xxx_lport *lport = container_of(wwn,
1751			struct tcm_qla2xxx_lport, lport_wwn);
1752	struct scsi_qla_host *vha = lport->qla_vha;
1753	struct se_node_acl *node;
1754	u32 key = 0;
1755
1756	/*
1757	 * Call into qla2x_target.c LLD logic to complete the
1758	 * shutdown of struct qla_tgt after the call to
1759	 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1760	 */
1761	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
1762		qlt_stop_phase2(vha->vha_tgt.qla_tgt);
1763
1764	qlt_lport_deregister(vha);
1765
1766	vfree(lport->lport_loopid_map);
1767	btree_for_each_safe32(&lport->lport_fcport_map, key, node)
1768		btree_remove32(&lport->lport_fcport_map, key);
1769	btree_destroy32(&lport->lport_fcport_map);
1770	kfree(lport);
1771}
1772
1773static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1774					      void *target_lport_ptr,
1775					      u64 npiv_wwpn, u64 npiv_wwnn)
1776{
1777	struct fc_vport *vport;
1778	struct Scsi_Host *sh = base_vha->host;
1779	struct scsi_qla_host *npiv_vha;
1780	struct tcm_qla2xxx_lport *lport =
1781			(struct tcm_qla2xxx_lport *)target_lport_ptr;
1782	struct tcm_qla2xxx_lport *base_lport =
1783			(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
1784	struct tcm_qla2xxx_tpg *base_tpg;
1785	struct fc_vport_identifiers vport_id;
1786
1787	if (!qla_tgt_mode_enabled(base_vha)) {
1788		pr_err("qla2xxx base_vha not enabled for target mode\n");
1789		return -EPERM;
1790	}
1791
1792	if (!base_lport || !base_lport->tpg_1 ||
1793	    !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
1794		pr_err("qla2xxx base_lport or tpg_1 not available\n");
1795		return -EPERM;
1796	}
1797	base_tpg = base_lport->tpg_1;
1798
1799	memset(&vport_id, 0, sizeof(vport_id));
1800	vport_id.port_name = npiv_wwpn;
1801	vport_id.node_name = npiv_wwnn;
1802	vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
1803	vport_id.vport_type = FC_PORTTYPE_NPIV;
1804	vport_id.disable = false;
1805
1806	vport = fc_vport_create(sh, 0, &vport_id);
1807	if (!vport) {
1808		pr_err("fc_vport_create failed for qla2xxx_npiv\n");
1809		return -ENODEV;
1810	}
1811	/*
1812	 * Setup local pointer to NPIV vhba + target_lport_ptr
1813	 */
1814	npiv_vha = (struct scsi_qla_host *)vport->dd_data;
1815	npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
1816	lport->qla_vha = npiv_vha;
1817	scsi_host_get(npiv_vha->host);
1818	return 0;
1819}
1820
1821
1822static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1823	struct target_fabric_configfs *tf,
1824	struct config_group *group,
1825	const char *name)
1826{
1827	struct tcm_qla2xxx_lport *lport;
1828	u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
1829	char *p, tmp[128];
1830	int ret;
1831
1832	snprintf(tmp, 128, "%s", name);
1833
1834	p = strchr(tmp, '@');
1835	if (!p) {
1836		pr_err("Unable to locate NPIV '@' seperator\n");
1837		return ERR_PTR(-EINVAL);
1838	}
1839	*p++ = '\0';
1840
1841	if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
1842		return ERR_PTR(-EINVAL);
1843
1844	if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
1845				       &npiv_wwpn, &npiv_wwnn) < 0)
1846		return ERR_PTR(-EINVAL);
1847
1848	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1849	if (!lport) {
1850		pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
1851		return ERR_PTR(-ENOMEM);
1852	}
1853	lport->lport_npiv_wwpn = npiv_wwpn;
1854	lport->lport_npiv_wwnn = npiv_wwnn;
1855	sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
1856
1857	ret = tcm_qla2xxx_init_lport(lport);
1858	if (ret != 0)
1859		goto out;
1860
1861	ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
1862				 tcm_qla2xxx_lport_register_npiv_cb);
1863	if (ret != 0)
1864		goto out_lport;
1865
1866	return &lport->lport_wwn;
1867out_lport:
1868	vfree(lport->lport_loopid_map);
1869	btree_destroy32(&lport->lport_fcport_map);
1870out:
1871	kfree(lport);
1872	return ERR_PTR(ret);
1873}
1874
1875static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1876{
1877	struct tcm_qla2xxx_lport *lport = container_of(wwn,
1878			struct tcm_qla2xxx_lport, lport_wwn);
1879	struct scsi_qla_host *npiv_vha = lport->qla_vha;
1880	struct qla_hw_data *ha = npiv_vha->hw;
1881	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1882
1883	scsi_host_put(npiv_vha->host);
1884	/*
1885	 * Notify libfc that we want to release the vha->fc_vport
1886	 */
1887	fc_vport_terminate(npiv_vha->fc_vport);
1888	scsi_host_put(base_vha->host);
1889	kfree(lport);
1890}
1891
1892
1893static ssize_t tcm_qla2xxx_wwn_show_attr_version(
1894	struct target_fabric_configfs *tf,
1895	char *page)
1896{
1897	return sprintf(page,
1898	    "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1899	    UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1900	    utsname()->machine);
1901}
1902
1903TF_WWN_ATTR_RO(tcm_qla2xxx, version);
1904
1905static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1906	&tcm_qla2xxx_wwn_version.attr,
1907	NULL,
1908};
1909
1910static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1911	.get_fabric_name		= tcm_qla2xxx_get_fabric_name,
1912	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident,
1913	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
1914	.tpg_get_tag			= tcm_qla2xxx_get_tag,
1915	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,
1916	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
1917	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,
1918	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id,
1919	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
1920	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache,
1921	.tpg_check_demo_mode_write_protect =
1922					tcm_qla2xxx_check_demo_write_protect,
1923	.tpg_check_prod_mode_write_protect =
1924					tcm_qla2xxx_check_prod_write_protect,
1925	.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
1926	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
1927	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
1928	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
1929	.check_stop_free		= tcm_qla2xxx_check_stop_free,
1930	.release_cmd			= tcm_qla2xxx_release_cmd,
1931	.put_session			= tcm_qla2xxx_put_session,
1932	.shutdown_session		= tcm_qla2xxx_shutdown_session,
1933	.close_session			= tcm_qla2xxx_close_session,
1934	.sess_get_index			= tcm_qla2xxx_sess_get_index,
1935	.sess_get_initiator_sid		= NULL,
1936	.write_pending			= tcm_qla2xxx_write_pending,
1937	.write_pending_status		= tcm_qla2xxx_write_pending_status,
1938	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
1939	.get_task_tag			= tcm_qla2xxx_get_task_tag,
1940	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
1941	.queue_data_in			= tcm_qla2xxx_queue_data_in,
1942	.queue_status			= tcm_qla2xxx_queue_status,
1943	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
1944	.aborted_task			= tcm_qla2xxx_aborted_task,
1945	/*
1946	 * Setup function pointers for generic logic in
1947	 * target_core_fabric_configfs.c
1948	 */
1949	.fabric_make_wwn		= tcm_qla2xxx_make_lport,
1950	.fabric_drop_wwn		= tcm_qla2xxx_drop_lport,
1951	.fabric_make_tpg		= tcm_qla2xxx_make_tpg,
1952	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
1953	.fabric_post_link		= NULL,
1954	.fabric_pre_unlink		= NULL,
1955	.fabric_make_np			= NULL,
1956	.fabric_drop_np			= NULL,
1957	.fabric_make_nodeacl		= tcm_qla2xxx_make_nodeacl,
1958	.fabric_drop_nodeacl		= tcm_qla2xxx_drop_nodeacl,
1959};
1960
1961static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1962	.get_fabric_name		= tcm_qla2xxx_npiv_get_fabric_name,
1963	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident,
1964	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
1965	.tpg_get_tag			= tcm_qla2xxx_get_tag,
1966	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,
1967	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
1968	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,
1969	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id,
1970	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
1971	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache,
1972	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
1973	.tpg_check_prod_mode_write_protect =
1974	    tcm_qla2xxx_check_prod_write_protect,
1975	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_demo_mode_login_only,
1976	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,
1977	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,
1978	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
1979	.check_stop_free                = tcm_qla2xxx_check_stop_free,
1980	.release_cmd			= tcm_qla2xxx_release_cmd,
1981	.put_session			= tcm_qla2xxx_put_session,
1982	.shutdown_session		= tcm_qla2xxx_shutdown_session,
1983	.close_session			= tcm_qla2xxx_close_session,
1984	.sess_get_index			= tcm_qla2xxx_sess_get_index,
1985	.sess_get_initiator_sid		= NULL,
1986	.write_pending			= tcm_qla2xxx_write_pending,
1987	.write_pending_status		= tcm_qla2xxx_write_pending_status,
1988	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
1989	.get_task_tag			= tcm_qla2xxx_get_task_tag,
1990	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
1991	.queue_data_in			= tcm_qla2xxx_queue_data_in,
1992	.queue_status			= tcm_qla2xxx_queue_status,
1993	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
1994	.aborted_task			= tcm_qla2xxx_aborted_task,
1995	/*
1996	 * Setup function pointers for generic logic in
1997	 * target_core_fabric_configfs.c
1998	 */
1999	.fabric_make_wwn		= tcm_qla2xxx_npiv_make_lport,
2000	.fabric_drop_wwn		= tcm_qla2xxx_npiv_drop_lport,
2001	.fabric_make_tpg		= tcm_qla2xxx_npiv_make_tpg,
2002	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
2003	.fabric_post_link		= NULL,
2004	.fabric_pre_unlink		= NULL,
2005	.fabric_make_np			= NULL,
2006	.fabric_drop_np			= NULL,
2007	.fabric_make_nodeacl		= tcm_qla2xxx_make_nodeacl,
2008	.fabric_drop_nodeacl		= tcm_qla2xxx_drop_nodeacl,
2009};
2010
2011static int tcm_qla2xxx_register_configfs(void)
2012{
2013	struct target_fabric_configfs *fabric, *npiv_fabric;
2014	int ret;
2015
2016	pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
2017	    UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
2018	    utsname()->machine);
2019	/*
2020	 * Register the top level struct config_item_type with TCM core
2021	 */
2022	fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
2023	if (IS_ERR(fabric)) {
2024		pr_err("target_fabric_configfs_init() failed\n");
2025		return PTR_ERR(fabric);
2026	}
2027	/*
2028	 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
2029	 */
2030	fabric->tf_ops = tcm_qla2xxx_ops;
2031	/*
2032	 * Setup default attribute lists for various fabric->tf_cit_tmpl
2033	 */
2034	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
2035	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
2036	fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
2037						tcm_qla2xxx_tpg_attrib_attrs;
2038	fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2039	fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2040	fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2041	fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2042	fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2043	fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2044	/*
2045	 * Register the fabric for use within TCM
2046	 */
2047	ret = target_fabric_configfs_register(fabric);
2048	if (ret < 0) {
2049		pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
2050		return ret;
2051	}
2052	/*
2053	 * Setup our local pointer to *fabric
2054	 */
2055	tcm_qla2xxx_fabric_configfs = fabric;
2056	pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
2057
2058	/*
2059	 * Register the top level struct config_item_type for NPIV with TCM core
2060	 */
2061	npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
2062	if (IS_ERR(npiv_fabric)) {
2063		pr_err("target_fabric_configfs_init() failed\n");
2064		ret = PTR_ERR(npiv_fabric);
2065		goto out_fabric;
2066	}
2067	/*
2068	 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
2069	 */
2070	npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
2071	/*
2072	 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
2073	 */
2074	npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
2075	npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
2076	    tcm_qla2xxx_npiv_tpg_attrs;
2077	npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2078	npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2079	npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2080	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2081	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2082	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2083	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2084	/*
2085	 * Register the npiv_fabric for use within TCM
2086	 */
2087	ret = target_fabric_configfs_register(npiv_fabric);
2088	if (ret < 0) {
2089		pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
2090		goto out_fabric;
2091	}
2092	/*
2093	 * Setup our local pointer to *npiv_fabric
2094	 */
2095	tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
2096	pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
2097
2098	tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
2099						WQ_MEM_RECLAIM, 0);
2100	if (!tcm_qla2xxx_free_wq) {
2101		ret = -ENOMEM;
2102		goto out_fabric_npiv;
2103	}
2104
2105	tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
2106	if (!tcm_qla2xxx_cmd_wq) {
2107		ret = -ENOMEM;
2108		goto out_free_wq;
2109	}
2110
2111	return 0;
2112
2113out_free_wq:
2114	destroy_workqueue(tcm_qla2xxx_free_wq);
2115out_fabric_npiv:
2116	target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
2117out_fabric:
2118	target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
2119	return ret;
2120}
2121
2122static void tcm_qla2xxx_deregister_configfs(void)
2123{
2124	destroy_workqueue(tcm_qla2xxx_cmd_wq);
2125	destroy_workqueue(tcm_qla2xxx_free_wq);
2126
2127	target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
2128	tcm_qla2xxx_fabric_configfs = NULL;
2129	pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
2130
2131	target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
2132	tcm_qla2xxx_npiv_fabric_configfs = NULL;
2133	pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
2134}
2135
2136static int __init tcm_qla2xxx_init(void)
2137{
2138	int ret;
2139
2140	ret = tcm_qla2xxx_register_configfs();
2141	if (ret < 0)
2142		return ret;
2143
2144	return 0;
2145}
2146
2147static void __exit tcm_qla2xxx_exit(void)
2148{
2149	tcm_qla2xxx_deregister_configfs();
2150}
2151
2152MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
2153MODULE_LICENSE("GPL");
2154module_init(tcm_qla2xxx_init);
2155module_exit(tcm_qla2xxx_exit);
2156